1
0
mirror of https://github.com/osmarks/nanogpt-experiments.git synced 2024-09-21 03:39:44 +00:00

Pin memory only when training on GPU

This commit is contained in:
Nan Yang 2023-02-04 11:16:26 -08:00
parent 77e7e04c26
commit b8286f343e

View File

@ -113,7 +113,12 @@ def get_batch(split):
x = torch.stack([torch.from_numpy((data[i:i+block_size]).astype(np.int64)) for i in ix]) x = torch.stack([torch.from_numpy((data[i:i+block_size]).astype(np.int64)) for i in ix])
y = torch.stack([torch.from_numpy((data[i+1:i+1+block_size]).astype(np.int64)) for i in ix]) y = torch.stack([torch.from_numpy((data[i+1:i+1+block_size]).astype(np.int64)) for i in ix])
# pin arrays x,y, which allows us to move them to GPU asynchronously (non_blocking=True) # pin arrays x,y, which allows us to move them to GPU asynchronously (non_blocking=True)
x, y = x.pin_memory().to(device, non_blocking=True), y.pin_memory().to(device, non_blocking=True) if "cuda" in device:
# GPU training
x, y = x.pin_memory().to(device, non_blocking=True), y.pin_memory().to(device, non_blocking=True)
else:
# CPU or MPS training
x, y = x.to(device), y.to(device)
return x, y return x, y
# init these up here, can override if init_from='resume' (i.e. from a checkpoint) # init these up here, can override if init_from='resume' (i.e. from a checkpoint)