Browse Source

Merge pull request #2 from NVIDIA/single-gpu-and-0.4

train.py single gpu and 0.4 update
master
Rafael Valle 6 years ago
committed by GitHub
parent
commit
d750fcf395
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 20 additions and 7 deletions
  1. +1
    -1
      README.md
  2. +19
    -6
      train.py

+ 1
- 1
README.md View File

@ -31,7 +31,7 @@ Distributed and FP16 support relies on work by Christian Sarofeen and NVIDIA's
2. (OPTIONAL) `tensorboard --logdir=outdir/logdir` 2. (OPTIONAL) `tensorboard --logdir=outdir/logdir`
## Multi-GPU (distributed) and FP16 Training ## Multi-GPU (distributed) and FP16 Training
1. `python -m multiproc train.py --output_directory=/outdir --log_directory=/logdir --hparams=distributed_run=True --fp16_run=True`
1. `python -m multiproc train.py --output_directory=outdir --log_directory=logdir --hparams=distributed_run=True,fp16_run=True`
## Inference ## Inference
1. `jupyter notebook --ip=127.0.0.1 --port=31337` 1. `jupyter notebook --ip=127.0.0.1 --port=31337`

+ 19
- 6
train.py View File

@ -78,15 +78,19 @@ def prepare_directories_and_logger(output_directory, log_directory, rank):
def load_model(hparams): def load_model(hparams):
model = Tacotron2(hparams).cuda() model = Tacotron2(hparams).cuda()
model = batchnorm_to_float(model.half()) if hparams.fp16_run else model model = batchnorm_to_float(model.half()) if hparams.fp16_run else model
model = DistributedDataParallel(model) \
if hparams.distributed_run else DataParallel(model)
if hparams.distributed_run:
model = DistributedDataParallel(model)
elif torch.cuda.device_count() > 1:
model = DataParallel(model)
return model return model
def warm_start_model(checkpoint_path, model): def warm_start_model(checkpoint_path, model):
assert os.path.isfile(checkpoint_path) assert os.path.isfile(checkpoint_path)
print("Warm starting model from checkpoint '{}'".format(checkpoint_path)) print("Warm starting model from checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path)
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint_dict['state_dict']) model.load_state_dict(checkpoint_dict['state_dict'])
return model return model
@ -124,8 +128,13 @@ def validate(model, criterion, valset, iteration, batch_size, n_gpus,
pin_memory=False, collate_fn=collate_fn) pin_memory=False, collate_fn=collate_fn)
val_loss = 0.0 val_loss = 0.0
if distributed_run or torch.cuda.device_count() > 1:
batch_parser = model.module.parse_batch
else:
batch_parser = model.parse_batch
for i, batch in enumerate(val_loader): for i, batch in enumerate(val_loader):
x, y = model.module.parse_batch(batch)
x, y = batch_parser(batch)
y_pred = model(x) y_pred = model(x)
loss = criterion(y_pred, y) loss = criterion(y_pred, y)
reduced_val_loss = reduce_tensor(loss.data, n_gpus)[0] \ reduced_val_loss = reduce_tensor(loss.data, n_gpus)[0] \
@ -184,6 +193,10 @@ def train(output_directory, log_directory, checkpoint_path, warm_start, n_gpus,
epoch_offset = max(0, int(iteration / len(train_loader))) epoch_offset = max(0, int(iteration / len(train_loader)))
model.train() model.train()
if hparams.distributed_run or torch.cuda.device_count() > 1:
batch_parser = model.module.parse_batch
else:
batch_parser = model.parse_batch
# ================ MAIN TRAINNIG LOOP! =================== # ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, hparams.epochs): for epoch in range(epoch_offset, hparams.epochs):
print("Epoch: {}".format(epoch)) print("Epoch: {}".format(epoch))
@ -193,7 +206,7 @@ def train(output_directory, log_directory, checkpoint_path, warm_start, n_gpus,
param_group['lr'] = learning_rate param_group['lr'] = learning_rate
model.zero_grad() model.zero_grad()
x, y = model.module.parse_batch(batch)
x, y = batch_parser(batch)
y_pred = model(x) y_pred = model(x)
loss = criterion(y_pred, y) loss = criterion(y_pred, y)
reduced_loss = reduce_tensor(loss.data, n_gpus)[0] \ reduced_loss = reduce_tensor(loss.data, n_gpus)[0] \
@ -205,7 +218,7 @@ def train(output_directory, log_directory, checkpoint_path, warm_start, n_gpus,
else: else:
loss.backward() loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm( grad_norm = torch.nn.utils.clip_grad_norm(
model.module.parameters(), hparams.grad_clip_thresh)
model.parameters(), hparams.grad_clip_thresh)
optimizer.step() optimizer.step()

Loading…
Cancel
Save