mirror of
https://github.com/osmarks/nanogpt-experiments.git
synced 2024-11-11 04:19:57 +00:00
allow the prompt to compe from a file
This commit is contained in:
parent
21675d7755
commit
e0c689cf38
@ -11,7 +11,7 @@ from model import GPTConfig, GPT
|
|||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
init_from = 'resume' # either 'resume' (from an out_dir) or a gpt2 variant (e.g. 'gpt2-xl')
|
init_from = 'resume' # either 'resume' (from an out_dir) or a gpt2 variant (e.g. 'gpt2-xl')
|
||||||
out_dir = 'out' # ignored if init_from is not 'resume'
|
out_dir = 'out' # ignored if init_from is not 'resume'
|
||||||
start = "\n" # or "<|endoftext|>" or whatever you like
|
start = "\n" # or "<|endoftext|>" or etc. Can also specify a file, use as: "FILE:prompt.txt"
|
||||||
num_samples = 10 # number of samples to draw
|
num_samples = 10 # number of samples to draw
|
||||||
max_new_tokens = 500 # number of tokens generated in each sample
|
max_new_tokens = 500 # number of tokens generated in each sample
|
||||||
temperature = 0.8 # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions
|
temperature = 0.8 # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions
|
||||||
@ -74,6 +74,9 @@ else:
|
|||||||
decode = lambda l: enc.decode(l)
|
decode = lambda l: enc.decode(l)
|
||||||
|
|
||||||
# encode the beginning of the prompt
|
# encode the beginning of the prompt
|
||||||
|
if start.startswith('FILE:'):
|
||||||
|
with open(start[5:], 'r', encoding='utf-8') as f:
|
||||||
|
start = f.read()
|
||||||
start_ids = encode(start)
|
start_ids = encode(start)
|
||||||
x = (torch.tensor(start_ids, dtype=torch.long, device=device)[None, ...])
|
x = (torch.tensor(start_ids, dtype=torch.long, device=device)[None, ...])
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user