add timer info

pull/819/head
Hui Zhang 3 years ago
parent b8601c756f
commit 65e666378d

@ -48,9 +48,8 @@ class ClipGradByGlobalNormWithLog(paddle.nn.ClipGradByGlobalNorm):
sum_square_list.append(sum_square) sum_square_list.append(sum_square)
# debug log # debug log
if i < 10: logger.debug(
logger.debug( f"Grad Before Clip: {p.name}: {float(sum_square.sqrt()) }")
f"Grad Before Clip: {p.name}: {float(sum_square.sqrt()) }")
# all parameters have been filterd out # all parameters have been filterd out
if len(sum_square_list) == 0: if len(sum_square_list) == 0:
@ -77,9 +76,8 @@ class ClipGradByGlobalNormWithLog(paddle.nn.ClipGradByGlobalNorm):
params_and_grads.append((p, new_grad)) params_and_grads.append((p, new_grad))
# debug log # debug log
if i < 10: logger.debug(
logger.debug( f"Grad After Clip: {p.name}: {float(new_grad.square().sum().sqrt())}"
f"Grad After Clip: {p.name}: {float(new_grad.square().sum().sqrt())}" )
)
return params_and_grads return params_and_grads

@ -27,7 +27,7 @@ class Timer():
do some thing do some thing
""" """
def __init__(self, message): def __init__(self, message=None):
self.message = message self.message = message
def duration(self) -> str: def duration(self) -> str:
@ -40,7 +40,8 @@ class Timer():
return self return self
def __exit__(self, type, value, traceback): def __exit__(self, type, value, traceback):
logger.info(self.message.format(self.duration())) if self.message:
logger.info(self.message.format(self.duration()))
def __call__(self) -> float: def __call__(self) -> float:
return time.time() - self.start return time.time() - self.start

@ -185,46 +185,47 @@ class Trainer():
def train(self): def train(self):
"""The training process control by epoch.""" """The training process control by epoch."""
with Timer("Load/Init Model: {}"): from_scratch = self.resume_or_scratch()
from_scratch = self.resume_or_scratch() if from_scratch:
if from_scratch: # save init model, i.e. 0 epoch
# save init model, i.e. 0 epoch self.save(tag='init', infos=None)
self.save(tag='init', infos=None) self.lr_scheduler.step(self.epoch)
self.lr_scheduler.step(self.epoch) if self.parallel and hasattr(self.train_loader, "batch_sampler"):
if self.parallel and hasattr(self.train_loader, "batch_sampler"): self.train_loader.batch_sampler.set_epoch(self.epoch)
self.train_loader.batch_sampler.set_epoch(self.epoch)
logger.info(f"Train Total Examples: {len(self.train_loader.dataset)}") logger.info(f"Train Total Examples: {len(self.train_loader.dataset)}")
while self.epoch < self.config.training.n_epoch: while self.epoch < self.config.training.n_epoch:
self.model.train() with Timer("Epoch-Train Time Cost: {}"):
try: self.model.train()
data_start_time = time.time() try:
for batch_index, batch in enumerate(self.train_loader):
dataload_time = time.time() - data_start_time
msg = "Train: Rank: {}, ".format(dist.get_rank())
msg += "epoch: {}, ".format(self.epoch)
msg += "step: {}, ".format(self.iteration)
msg += "batch : {}/{}, ".format(batch_index + 1,
len(self.train_loader))
msg += "lr: {:>.8f}, ".format(self.lr_scheduler())
msg += "data time: {:>.3f}s, ".format(dataload_time)
self.train_batch(batch_index, batch, msg)
data_start_time = time.time() data_start_time = time.time()
except Exception as e: for batch_index, batch in enumerate(self.train_loader):
logger.error(e) dataload_time = time.time() - data_start_time
raise e msg = "Train: Rank: {}, ".format(dist.get_rank())
msg += "epoch: {}, ".format(self.epoch)
total_loss, num_seen_utts = self.valid() msg += "step: {}, ".format(self.iteration)
if dist.get_world_size() > 1: msg += "batch : {}/{}, ".format(batch_index + 1,
num_seen_utts = paddle.to_tensor(num_seen_utts) len(self.train_loader))
# the default operator in all_reduce function is sum. msg += "lr: {:>.8f}, ".format(self.lr_scheduler())
dist.all_reduce(num_seen_utts) msg += "data time: {:>.3f}s, ".format(dataload_time)
total_loss = paddle.to_tensor(total_loss) self.train_batch(batch_index, batch, msg)
dist.all_reduce(total_loss) data_start_time = time.time()
cv_loss = total_loss / num_seen_utts except Exception as e:
cv_loss = float(cv_loss) logger.error(e)
else: raise e
cv_loss = total_loss / num_seen_utts
with Timer("Eval Time Cost: {}"):
total_loss, num_seen_utts = self.valid()
if dist.get_world_size() > 1:
num_seen_utts = paddle.to_tensor(num_seen_utts)
# the default operator in all_reduce function is sum.
dist.all_reduce(num_seen_utts)
total_loss = paddle.to_tensor(total_loss)
dist.all_reduce(total_loss)
cv_loss = total_loss / num_seen_utts
cv_loss = float(cv_loss)
else:
cv_loss = total_loss / num_seen_utts
logger.info( logger.info(
'Epoch {} Val info val_loss {}'.format(self.epoch, cv_loss)) 'Epoch {} Val info val_loss {}'.format(self.epoch, cv_loss))

Loading…
Cancel
Save