mirror of
https://github.com/osmarks/nanogpt-experiments.git
synced 2024-12-18 14:10:28 +00:00
stay true to the README file and set grad accum to 5, so the default batch size is about 0.5M and is reproducing gpt2
This commit is contained in:
parent
79dbe0086d
commit
001c1e7be7
2
train.py
2
train.py
@ -45,7 +45,7 @@ wandb_project = 'owt'
|
|||||||
wandb_run_name = 'gpt2' # 'run' + str(time.time())
|
wandb_run_name = 'gpt2' # 'run' + str(time.time())
|
||||||
# data
|
# data
|
||||||
dataset = 'openwebtext'
|
dataset = 'openwebtext'
|
||||||
gradient_accumulation_steps = 1 # used to simulate larger batch sizes
|
gradient_accumulation_steps = 5 # used to simulate larger batch sizes
|
||||||
batch_size = 12 # if gradient_accumulation_steps > 1, this is the micro-batch size
|
batch_size = 12 # if gradient_accumulation_steps > 1, this is the micro-batch size
|
||||||
block_size = 1024
|
block_size = 1024
|
||||||
# model
|
# model
|
||||||
|
Loading…
Reference in New Issue
Block a user