From 0a2ea9533844be1ec7e289ac392553e9f050be36 Mon Sep 17 00:00:00 2001 From: Laiho Date: Mon, 2 Jan 2023 17:49:21 +0200 Subject: [PATCH] batch file write --- data/openwebtext/prepare.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/data/openwebtext/prepare.py b/data/openwebtext/prepare.py index 0aadbb8..4f31017 100644 --- a/data/openwebtext/prepare.py +++ b/data/openwebtext/prepare.py @@ -53,12 +53,16 @@ for split, dset in tokenized.items(): filename = f'{split}.bin' dtype = np.uint16 # (can do since enc.max_token_value == 50256 is < 2**16) arr = np.memmap(filename, dtype=dtype, mode='w+', shape=(arr_len,)) + total_batches = 1024 - print(f"writing {filename}...") idx = 0 - for example in tqdm(dset): - arr[idx : idx + example['len']] = example['ids'] - idx += example['len'] + for batch_idx in tqdm(range(total_batches), desc=f'writing {filename}'): + # Batch together samples for faster write + batch = dset.shard(num_shards=total_batches, index=batch_idx, contiguous=True).with_format('numpy') + arr_batch = np.concatenate(batch['ids']) + # Write into mmap + arr[idx : idx + len(arr_batch)] = arr_batch + idx += len(arr_batch) arr.flush() # train.bin is ~17GB, val.bin ~8.5MB