1
0
mirror of https://github.com/osmarks/nanogpt-experiments.git synced 2025-09-01 18:37:58 +00:00

Fix for gradient_accumulation_steps training slow

This commit is contained in:
Otavio Good
2023-03-25 00:04:45 -07:00
parent a82b33b525
commit 978d4fe538
3 changed files with 5 additions and 3 deletions

View File

@@ -10,7 +10,7 @@ wandb_run_name='gpt2-124M'
# 12 batch size * 1024 block size * 5 gradaccum * 8 GPUs = 491,520
batch_size = 12
block_size = 1024
gradient_accumulation_steps = 5
gradient_accumulation_steps = 5 * 8
# this makes total number of tokens be 300B
max_iters = 600000

View File

@@ -14,6 +14,7 @@ wandb_project = 'shakespeare-char'
wandb_run_name = 'mini-gpt'
dataset = 'shakespeare_char'
gradient_accumulation_steps = 1
batch_size = 64
block_size = 256 # context of up to 256 previous characters