1
0
mirror of https://github.com/osmarks/nanogpt-experiments.git synced 2024-11-10 20:09:58 +00:00

Merge pull request #57 from ryouze/patch-1

Improve readability of huge numbers
This commit is contained in:
Andrej 2023-01-19 15:08:35 -08:00 committed by GitHub
commit c1c20a0311
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -17,13 +17,13 @@ if not os.path.exists('input.txt'):
with open('input.txt', 'r') as f: with open('input.txt', 'r') as f:
data = f.read() data = f.read()
print("length of dataset in characters: ", len(data)) print(f"length of dataset in characters: {len(data):,}")
# get all the unique characters that occur in this text # get all the unique characters that occur in this text
chars = sorted(list(set(data))) chars = sorted(list(set(data)))
vocab_size = len(chars) vocab_size = len(chars)
print("all the unique characters:", ''.join(chars)) print("all the unique characters:", ''.join(chars))
print("vocab size:", vocab_size) print(f"vocab size: {vocab_size:,}")
# create a mapping from characters to integers # create a mapping from characters to integers
stoi = { ch:i for i,ch in enumerate(chars) } stoi = { ch:i for i,ch in enumerate(chars) }
@ -41,8 +41,8 @@ val_data = data[int(n*0.9):]
# encode both to integers # encode both to integers
train_ids = encode(train_data) train_ids = encode(train_data)
val_ids = encode(val_data) val_ids = encode(val_data)
print(f"train has {len(train_ids)} tokens") print(f"train has {len(train_ids):,} tokens")
print(f"val has {len(val_ids)} tokens") print(f"val has {len(val_ids):,} tokens")
# export to bin files # export to bin files
train_ids = np.array(train_ids, dtype=np.uint16) train_ids = np.array(train_ids, dtype=np.uint16)