mirror of
				https://github.com/osmarks/nanogpt-experiments.git
				synced 2025-10-31 15:23:01 +00:00 
			
		
		
		
	 be571fff2c
			
		
	
	be571fff2c
	
	
	
		
			
			Before: length of dataset in characters: 1115394 all the unique characters: !$&',-.3:;?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz vocab size: 65 train has 1003854 tokens val has 111540 tokens After: length of dataset in characters: 1,115,394 all the unique characters: !$&',-.3:;?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz vocab size: 65 train has 1,003,854 tokens val has 111,540 tokens
		
			
				
	
	
		
			68 lines
		
	
	
		
			2.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			68 lines
		
	
	
		
			2.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| """
 | |
| Prepare the Shakespeare dataset for character-level language modeling.
 | |
| So instead of encoding with GPT-2 BPE tokens, we just map characters to ints.
 | |
| Will save train.bin, val.bin containing the ids, and meta.pkl containing the
 | |
| encoder and decoder and some other related info.
 | |
| """
 | |
| import os
 | |
| import pickle
 | |
| import requests
 | |
| import numpy as np
 | |
| 
 | |
| # download the tiny shakespeare dataset
 | |
| if not os.path.exists('input.txt'):
 | |
|     data_url = 'https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt'
 | |
|     with open('input.txt', 'w') as f:
 | |
|         f.write(requests.get(data_url).text)
 | |
| 
 | |
| with open('input.txt', 'r') as f:
 | |
|     data = f.read()
 | |
| print(f"length of dataset in characters: {len(data):,}")
 | |
| 
 | |
| # get all the unique characters that occur in this text
 | |
| chars = sorted(list(set(data)))
 | |
| vocab_size = len(chars)
 | |
| print("all the unique characters:", ''.join(chars))
 | |
| print(f"vocab size: {vocab_size:,}")
 | |
| 
 | |
| # create a mapping from characters to integers
 | |
| stoi = { ch:i for i,ch in enumerate(chars) }
 | |
| itos = { i:ch for i,ch in enumerate(chars) }
 | |
| def encode(s):
 | |
|     return [stoi[c] for c in s] # encoder: take a string, output a list of integers
 | |
| def decode(l):
 | |
|     ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string
 | |
| 
 | |
| # create the train and test splits
 | |
| n = len(data)
 | |
| train_data = data[:int(n*0.9)]
 | |
| val_data = data[int(n*0.9):]
 | |
| 
 | |
| # encode both to integers
 | |
| train_ids = encode(train_data)
 | |
| val_ids = encode(val_data)
 | |
| print(f"train has {len(train_ids):,} tokens")
 | |
| print(f"val has {len(val_ids):,} tokens")
 | |
| 
 | |
| # export to bin files
 | |
| train_ids = np.array(train_ids, dtype=np.uint16)
 | |
| val_ids = np.array(val_ids, dtype=np.uint16)
 | |
| train_ids.tofile('train.bin')
 | |
| val_ids.tofile('val.bin')
 | |
| 
 | |
| # save the meta information as well, to help us encode/decode later
 | |
| meta = {
 | |
|     'vocab_size': vocab_size,
 | |
|     'itos': itos,
 | |
|     'stoi': stoi,
 | |
| }
 | |
| with open('meta.pkl', 'wb') as f:
 | |
|     pickle.dump(meta, f)
 | |
| 
 | |
| # length of dataset in characters:  1115394
 | |
| # all the unique characters:
 | |
| #  !$&',-.3:;?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz
 | |
| # vocab size: 65
 | |
| # train has 1003854 tokens
 | |
| # val has 111540 tokens
 |