mirror of
https://github.com/osmarks/nanogpt-experiments.git
synced 2024-11-10 20:09:58 +00:00
Zero-grad more aggressively to save memory
This commit is contained in:
parent
2c7806db6e
commit
67166079c9
2
train.py
2
train.py
@ -259,7 +259,6 @@ while True:
|
||||
break
|
||||
|
||||
# forward backward update, with optional gradient accumulation to simulate larger batch size
|
||||
optimizer.zero_grad(set_to_none=True)
|
||||
for micro_step in range(gradient_accumulation_steps):
|
||||
X, Y = get_batch('train')
|
||||
if ddp:
|
||||
@ -272,6 +271,7 @@ while True:
|
||||
logits, loss = model(X, Y)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad(set_to_none=True)
|
||||
|
||||
# timing and logging
|
||||
t1 = time.time()
|
||||
|
Loading…
Reference in New Issue
Block a user