2022-12-28 00:58:19 +00:00
|
|
|
"""
|
|
|
|
Sample from a trained model
|
|
|
|
"""
|
|
|
|
import os
|
2023-01-11 05:27:19 +00:00
|
|
|
import pickle
|
2023-01-08 19:32:13 +00:00
|
|
|
from contextlib import nullcontext
|
2022-12-28 00:58:19 +00:00
|
|
|
import torch
|
|
|
|
import tiktoken
|
|
|
|
from model import GPTConfig, GPT
|
|
|
|
|
2023-01-02 02:11:39 +00:00
|
|
|
# -----------------------------------------------------------------------------
|
2023-01-25 00:55:29 +00:00
|
|
|
init_from = 'resume' # either 'resume' (from an out_dir) or a gpt2 variant (e.g. 'gpt2-xl')
|
|
|
|
out_dir = 'out' # ignored if init_from is not 'resume'
|
2023-01-25 01:12:43 +00:00
|
|
|
start = "\n" # or "<|endoftext|>" or etc. Can also specify a file, use as: "FILE:prompt.txt"
|
2023-01-02 02:11:39 +00:00
|
|
|
num_samples = 10 # number of samples to draw
|
|
|
|
max_new_tokens = 500 # number of tokens generated in each sample
|
2023-01-18 16:10:05 +00:00
|
|
|
temperature = 0.8 # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions
|
2023-01-02 02:11:39 +00:00
|
|
|
top_k = 200 # retain only the top_k most likely tokens, clamp others to have 0 probability
|
|
|
|
seed = 1337
|
2023-01-08 19:32:13 +00:00
|
|
|
device = 'cuda' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1', etc.
|
2023-06-19 22:05:09 +00:00
|
|
|
dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16' # 'float32' or 'bfloat16' or 'float16'
|
2023-01-08 19:32:13 +00:00
|
|
|
compile = False # use PyTorch 2.0 to compile the model to be faster
|
2023-01-05 00:44:35 +00:00
|
|
|
exec(open('configurator.py').read()) # overrides from command line or config file
|
2023-01-02 02:11:39 +00:00
|
|
|
# -----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
torch.manual_seed(seed)
|
|
|
|
torch.cuda.manual_seed(seed)
|
2022-12-28 00:58:19 +00:00
|
|
|
torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
|
|
|
|
torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
|
2023-01-08 19:32:13 +00:00
|
|
|
device_type = 'cuda' if 'cuda' in device else 'cpu' # for later use in torch.autocast
|
|
|
|
ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
|
|
|
|
ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
|
2022-12-28 00:58:19 +00:00
|
|
|
|
2023-01-02 02:11:39 +00:00
|
|
|
# model
|
2023-01-25 00:55:29 +00:00
|
|
|
if init_from == 'resume':
|
|
|
|
# init from a model saved in a specific directory
|
|
|
|
ckpt_path = os.path.join(out_dir, 'ckpt.pt')
|
|
|
|
checkpoint = torch.load(ckpt_path, map_location=device)
|
|
|
|
gptconf = GPTConfig(**checkpoint['model_args'])
|
|
|
|
model = GPT(gptconf)
|
|
|
|
state_dict = checkpoint['model']
|
|
|
|
unwanted_prefix = '_orig_mod.'
|
|
|
|
for k,v in list(state_dict.items()):
|
|
|
|
if k.startswith(unwanted_prefix):
|
|
|
|
state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
|
|
|
|
model.load_state_dict(state_dict)
|
|
|
|
elif init_from.startswith('gpt2'):
|
|
|
|
# init from a given GPT-2 model
|
|
|
|
model = GPT.from_pretrained(init_from, dict(dropout=0.0))
|
|
|
|
|
2022-12-28 00:58:19 +00:00
|
|
|
model.eval()
|
|
|
|
model.to(device)
|
2023-01-02 02:11:39 +00:00
|
|
|
if compile:
|
|
|
|
model = torch.compile(model) # requires PyTorch 2.0 (optional)
|
2022-12-28 00:58:19 +00:00
|
|
|
|
2023-01-11 05:27:19 +00:00
|
|
|
# look for the meta pickle in case it is available in the dataset folder
|
|
|
|
load_meta = False
|
2023-01-25 00:55:29 +00:00
|
|
|
if init_from == 'resume' and 'config' in checkpoint and 'dataset' in checkpoint['config']: # older checkpoints might not have these...
|
2023-01-11 05:27:19 +00:00
|
|
|
meta_path = os.path.join('data', checkpoint['config']['dataset'], 'meta.pkl')
|
|
|
|
load_meta = os.path.exists(meta_path)
|
|
|
|
if load_meta:
|
|
|
|
print(f"Loading meta from {meta_path}...")
|
|
|
|
with open(meta_path, 'rb') as f:
|
|
|
|
meta = pickle.load(f)
|
|
|
|
# TODO want to make this more general to arbitrary encoder/decoder schemes
|
|
|
|
stoi, itos = meta['stoi'], meta['itos']
|
|
|
|
encode = lambda s: [stoi[c] for c in s]
|
|
|
|
decode = lambda l: ''.join([itos[i] for i in l])
|
|
|
|
else:
|
|
|
|
# ok let's assume gpt-2 encodings by default
|
|
|
|
print("No meta.pkl found, assuming GPT-2 encodings...")
|
|
|
|
enc = tiktoken.get_encoding("gpt2")
|
|
|
|
encode = lambda s: enc.encode(s, allowed_special={"<|endoftext|>"})
|
|
|
|
decode = lambda l: enc.decode(l)
|
|
|
|
|
2023-01-02 02:11:39 +00:00
|
|
|
# encode the beginning of the prompt
|
2023-01-25 01:12:43 +00:00
|
|
|
if start.startswith('FILE:'):
|
|
|
|
with open(start[5:], 'r', encoding='utf-8') as f:
|
|
|
|
start = f.read()
|
2023-01-11 05:27:19 +00:00
|
|
|
start_ids = encode(start)
|
2023-01-02 02:11:39 +00:00
|
|
|
x = (torch.tensor(start_ids, dtype=torch.long, device=device)[None, ...])
|
2022-12-28 00:58:19 +00:00
|
|
|
|
2023-01-08 19:32:13 +00:00
|
|
|
# run generation
|
|
|
|
with torch.no_grad():
|
|
|
|
with ctx:
|
|
|
|
for k in range(num_samples):
|
2023-01-02 02:11:39 +00:00
|
|
|
y = model.generate(x, max_new_tokens, temperature=temperature, top_k=top_k)
|
2023-01-11 05:27:19 +00:00
|
|
|
print(decode(y[0].tolist()))
|
|
|
|
print('---------------')
|