1
0
mirror of https://github.com/osmarks/nanogpt-experiments.git synced 2024-11-14 05:44:51 +00:00

Merge pull request #10 from LaihoE/master

batch file write
This commit is contained in:
Andrej 2023-04-13 00:39:41 -07:00 committed by GitHub
commit d9f4735f5e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -54,12 +54,16 @@ for split, dset in tokenized.items():
filename = os.path.join(os.path.dirname(__file__), f'{split}.bin') filename = os.path.join(os.path.dirname(__file__), f'{split}.bin')
dtype = np.uint16 # (can do since enc.max_token_value == 50256 is < 2**16) dtype = np.uint16 # (can do since enc.max_token_value == 50256 is < 2**16)
arr = np.memmap(filename, dtype=dtype, mode='w+', shape=(arr_len,)) arr = np.memmap(filename, dtype=dtype, mode='w+', shape=(arr_len,))
total_batches = 1024
print(f"writing {filename}...")
idx = 0 idx = 0
for example in tqdm(dset): for batch_idx in tqdm(range(total_batches), desc=f'writing {filename}'):
arr[idx : idx + example['len']] = example['ids'] # Batch together samples for faster write
idx += example['len'] batch = dset.shard(num_shards=total_batches, index=batch_idx, contiguous=True).with_format('numpy')
arr_batch = np.concatenate(batch['ids'])
# Write into mmap
arr[idx : idx + len(arr_batch)] = arr_batch
idx += len(arr_batch)
arr.flush() arr.flush()
# train.bin is ~17GB, val.bin ~8.5MB # train.bin is ~17GB, val.bin ~8.5MB