49 lines
1.5 KiB
Python
Executable File
49 lines
1.5 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
from typing import Literal, List, Dict
|
|
import numpy as np
|
|
|
|
INPUT_FILE: str = "./data/all_cleaned_words_shuffled.txt"
|
|
OUTPUT_FILE: str = "./data.npy"
|
|
|
|
alphabet: List[str] = list("abcdefghijklmnopqrstuvwxyz")
|
|
|
|
char_to_index: Dict[str, int] = {ch: idx for idx, ch in enumerate(alphabet)}
|
|
default_index: int = len(alphabet) # 26 + 1 -> Unknown character
|
|
|
|
|
|
def encode_letter(c: str) -> int:
|
|
return char_to_index.get(c, default_index)
|
|
|
|
|
|
def build_dataset(input_path: str) -> np.ndarray:
|
|
all_features: List[List[int]] = []
|
|
|
|
with open(input_path, 'r') as input_file:
|
|
for line in input_file:
|
|
word: str = line.strip().lower()
|
|
prev_chars: List[str] = [""] * 10
|
|
|
|
for i, curr_char in enumerate(word):
|
|
features: List[int] = []
|
|
|
|
# Use indices instead of one-hot for previous 10 characters
|
|
for prev in prev_chars:
|
|
features.append(encode_letter(prev))
|
|
|
|
# Append current char index (target for classification)
|
|
features.append(encode_letter(curr_char))
|
|
|
|
# features.extend([prev_type])
|
|
all_features.append(features)
|
|
|
|
prev_chars = prev_chars[1:] + [curr_char]
|
|
|
|
return np.array(all_features, dtype=np.int32)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
dataset: np.ndarray = build_dataset(INPUT_FILE)
|
|
np.save(OUTPUT_FILE, dataset)
|
|
print(f"Saved dataset shape: {dataset.shape} → {OUTPUT_FILE}")
|