mirror of
https://github.com/osmarks/nanogpt-experiments.git
synced 2024-12-18 14:10:28 +00:00
Merge pull request #71 from cchan/patch-1
Zero-grad more aggressively to save memory
This commit is contained in:
commit
3611338959
2
train.py
2
train.py
@ -259,7 +259,6 @@ while True:
|
|||||||
break
|
break
|
||||||
|
|
||||||
# forward backward update, with optional gradient accumulation to simulate larger batch size
|
# forward backward update, with optional gradient accumulation to simulate larger batch size
|
||||||
optimizer.zero_grad(set_to_none=True)
|
|
||||||
for micro_step in range(gradient_accumulation_steps):
|
for micro_step in range(gradient_accumulation_steps):
|
||||||
X, Y = get_batch('train')
|
X, Y = get_batch('train')
|
||||||
if ddp:
|
if ddp:
|
||||||
@ -272,6 +271,7 @@ while True:
|
|||||||
logits, loss = model(X, Y)
|
logits, loss = model(X, Y)
|
||||||
loss.backward()
|
loss.backward()
|
||||||
optimizer.step()
|
optimizer.step()
|
||||||
|
optimizer.zero_grad(set_to_none=True)
|
||||||
|
|
||||||
# timing and logging
|
# timing and logging
|
||||||
t1 = time.time()
|
t1 = time.time()
|
||||||
|
Loading…
Reference in New Issue
Block a user