|
|
|
@ -185,46 +185,47 @@ class Trainer():
|
|
|
|
|
|
|
|
|
|
def train(self):
|
|
|
|
|
"""The training process control by epoch."""
|
|
|
|
|
with Timer("Load/Init Model: {}"):
|
|
|
|
|
from_scratch = self.resume_or_scratch()
|
|
|
|
|
if from_scratch:
|
|
|
|
|
# save init model, i.e. 0 epoch
|
|
|
|
|
self.save(tag='init', infos=None)
|
|
|
|
|
self.lr_scheduler.step(self.epoch)
|
|
|
|
|
if self.parallel and hasattr(self.train_loader, "batch_sampler"):
|
|
|
|
|
self.train_loader.batch_sampler.set_epoch(self.epoch)
|
|
|
|
|
from_scratch = self.resume_or_scratch()
|
|
|
|
|
if from_scratch:
|
|
|
|
|
# save init model, i.e. 0 epoch
|
|
|
|
|
self.save(tag='init', infos=None)
|
|
|
|
|
self.lr_scheduler.step(self.epoch)
|
|
|
|
|
if self.parallel and hasattr(self.train_loader, "batch_sampler"):
|
|
|
|
|
self.train_loader.batch_sampler.set_epoch(self.epoch)
|
|
|
|
|
|
|
|
|
|
logger.info(f"Train Total Examples: {len(self.train_loader.dataset)}")
|
|
|
|
|
while self.epoch < self.config.training.n_epoch:
|
|
|
|
|
self.model.train()
|
|
|
|
|
try:
|
|
|
|
|
data_start_time = time.time()
|
|
|
|
|
for batch_index, batch in enumerate(self.train_loader):
|
|
|
|
|
dataload_time = time.time() - data_start_time
|
|
|
|
|
msg = "Train: Rank: {}, ".format(dist.get_rank())
|
|
|
|
|
msg += "epoch: {}, ".format(self.epoch)
|
|
|
|
|
msg += "step: {}, ".format(self.iteration)
|
|
|
|
|
msg += "batch : {}/{}, ".format(batch_index + 1,
|
|
|
|
|
len(self.train_loader))
|
|
|
|
|
msg += "lr: {:>.8f}, ".format(self.lr_scheduler())
|
|
|
|
|
msg += "data time: {:>.3f}s, ".format(dataload_time)
|
|
|
|
|
self.train_batch(batch_index, batch, msg)
|
|
|
|
|
with Timer("Epoch-Train Time Cost: {}"):
|
|
|
|
|
self.model.train()
|
|
|
|
|
try:
|
|
|
|
|
data_start_time = time.time()
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.error(e)
|
|
|
|
|
raise e
|
|
|
|
|
|
|
|
|
|
total_loss, num_seen_utts = self.valid()
|
|
|
|
|
if dist.get_world_size() > 1:
|
|
|
|
|
num_seen_utts = paddle.to_tensor(num_seen_utts)
|
|
|
|
|
# the default operator in all_reduce function is sum.
|
|
|
|
|
dist.all_reduce(num_seen_utts)
|
|
|
|
|
total_loss = paddle.to_tensor(total_loss)
|
|
|
|
|
dist.all_reduce(total_loss)
|
|
|
|
|
cv_loss = total_loss / num_seen_utts
|
|
|
|
|
cv_loss = float(cv_loss)
|
|
|
|
|
else:
|
|
|
|
|
cv_loss = total_loss / num_seen_utts
|
|
|
|
|
for batch_index, batch in enumerate(self.train_loader):
|
|
|
|
|
dataload_time = time.time() - data_start_time
|
|
|
|
|
msg = "Train: Rank: {}, ".format(dist.get_rank())
|
|
|
|
|
msg += "epoch: {}, ".format(self.epoch)
|
|
|
|
|
msg += "step: {}, ".format(self.iteration)
|
|
|
|
|
msg += "batch : {}/{}, ".format(batch_index + 1,
|
|
|
|
|
len(self.train_loader))
|
|
|
|
|
msg += "lr: {:>.8f}, ".format(self.lr_scheduler())
|
|
|
|
|
msg += "data time: {:>.3f}s, ".format(dataload_time)
|
|
|
|
|
self.train_batch(batch_index, batch, msg)
|
|
|
|
|
data_start_time = time.time()
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.error(e)
|
|
|
|
|
raise e
|
|
|
|
|
|
|
|
|
|
with Timer("Eval Time Cost: {}"):
|
|
|
|
|
total_loss, num_seen_utts = self.valid()
|
|
|
|
|
if dist.get_world_size() > 1:
|
|
|
|
|
num_seen_utts = paddle.to_tensor(num_seen_utts)
|
|
|
|
|
# the default operator in all_reduce function is sum.
|
|
|
|
|
dist.all_reduce(num_seen_utts)
|
|
|
|
|
total_loss = paddle.to_tensor(total_loss)
|
|
|
|
|
dist.all_reduce(total_loss)
|
|
|
|
|
cv_loss = total_loss / num_seen_utts
|
|
|
|
|
cv_loss = float(cv_loss)
|
|
|
|
|
else:
|
|
|
|
|
cv_loss = total_loss / num_seen_utts
|
|
|
|
|
|
|
|
|
|
logger.info(
|
|
|
|
|
'Epoch {} Val info val_loss {}'.format(self.epoch, cv_loss))
|
|
|
|
|