From e0e94a109458d5c700f7d9fed8246c893b8709ab Mon Sep 17 00:00:00 2001 From: johnwildauer Date: Tue, 24 Jan 2023 15:53:31 -0700 Subject: [PATCH] use GradScaler in model only if dtype is float16 --- train.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/train.py b/train.py index 796c9ce..1fa9805 100644 --- a/train.py +++ b/train.py @@ -68,7 +68,7 @@ min_lr = 6e-5 # minimum learning rate, should be ~= learning_rate/10 per Chinchi backend = 'nccl' # 'nccl', 'gloo', etc. # system device = 'cuda' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1' etc., or try 'mps' on macbooks -dtype = 'bfloat16' # 'float32' or 'bfloat16' +dtype = 'bfloat16' # 'float32', 'bfloat16', or 'float16', the latter will auto implement a GradScaler compile = True # use PyTorch 2.0 to compile the model to be faster # ----------------------------------------------------------------------------- config_keys = [k for k,v in globals().items() if not k.startswith('_') and isinstance(v, (int, float, bool, str))] @@ -96,8 +96,8 @@ torch.manual_seed(1337 + seed_offset) torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn device_type = 'cuda' if 'cuda' in device else 'cpu' # for later use in torch.autocast -# note: float16 would require us to change the code to use a GradScaler -ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16}[dtype] +# note: float16 data type will automatically use a GradScaler +ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype] ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype) # poor man's data loader, TODO evaluate need for actual DataLoader @@ -169,6 +169,11 @@ if block_size < model.config.block_size: model.crop_block_size(block_size) model.to(device) +# initialize a GradScaler if data type is float16 +if dtype == 'float16': + print(f'Initializing Gradient Scaler to account for dtype: {dtype}') + scaler = torch.cuda.amp.GradScaler() + # optimizer optimizer = model.configure_optimizers(weight_decay, learning_rate, (beta1, beta2)) if init_from == 'resume': @@ -259,6 +264,7 @@ while True: break # forward backward update, with optional gradient accumulation to simulate larger batch size + # and using the GradScaler if data type is float16 for micro_step in range(gradient_accumulation_steps): X, Y = get_batch('train') if ddp: @@ -269,8 +275,12 @@ while True: model.require_backward_grad_sync = (micro_step == gradient_accumulation_steps - 1) with ctx: logits, loss = model(X, Y) - loss.backward() - optimizer.step() + scaler.scale(loss).backward() if dtype == 'float16' else loss.backward() + if dtype == 'float16': + scaler.step(optimizer) + scaler.update() + else: + optimizer.step() optimizer.zero_grad(set_to_none=True) # timing and logging