change autoLog

pull/720/head
Jackwaterveg 3 years ago
parent 205c56412e
commit 48e877375d

@ -35,10 +35,8 @@ from deepspeech.utils import error_rate
from deepspeech.utils import layer_tools
from deepspeech.utils import mp_tools
from deepspeech.utils.log import Log
from deepspeech.utils.log import Autolog
import auto_log
import os
from paddle import inference
logger = Log(__name__).getlog()
@ -228,28 +226,8 @@ class DeepSpeech2Tester(DeepSpeech2Trainer):
def __init__(self, config, args):
super().__init__(config, args)
pid = os.getpid()
gpu_id = int(os.environ['CUDA_VISIBLE_DEVICES'].split(',')[0])
infer_config = inference.Config()
infer_config.enable_use_gpu(100, gpu_id)
autolog = auto_log.AutoLogger(
model_name="deepspeech2",
model_precision="fp32",
batch_size=config.decoding.batch_size,
data_shape="dynamic",
save_path="./output/auto_log.lpg",
inference_config=infer_config,
pids=pid,
process_name=None,
gpu_ids=gpu_id,
time_keys=[
'preprocess_time', 'inference_time', 'postprocess_time'
],
warmup=0)
self.autolog = autolog
logger = autolog.logger
logger.info("gpu_id:{}".format(gpu_id))
self.autolog = Autolog(batch_size = config.decoding.batch_size, model_name = "deepspeech2", model_precision = "fp32").getlog()
def ordid2token(self, texts, texts_len):
""" ord() id to chr() chr """
trans = []

@ -17,6 +17,12 @@ import os
import socket
import sys
import auto_log
import os
from paddle import inference
FORMAT_STR = '[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s'
DATE_FMT_STR = '%Y/%m/%d %H:%M:%S'
@ -146,3 +152,29 @@ class Log():
def getlog(self):
return self.logger
class Autolog:
def __init__(self, batch_size, model_name = "DeepSpeech", model_precision = "fp32"):
pid = os.getpid()
gpu_id = int(os.environ['CUDA_VISIBLE_DEVICES'].split(',')[0])
infer_config = inference.Config()
infer_config.enable_use_gpu(100, gpu_id)
autolog = auto_log.AutoLogger(
model_name = model_name,
model_precision = model_precision,
batch_size = batch_size,
data_shape="dynamic",
save_path="./output/auto_log.lpg",
inference_config = infer_config,
pids = pid,
process_name = None,
gpu_ids = gpu_id,
time_keys=[
'preprocess_time', 'inference_time', 'postprocess_time'
],
warmup=0)
self.autolog = autolog
def getlog(self):
return self.autolog

@ -2,9 +2,9 @@
set -e
source path.sh
gpus=2
gpus=0,1,2,3
stage=0
stop_stage=4
stop_stage=100
conf_path=conf/deepspeech2.yaml
avg_num=1
@ -31,10 +31,10 @@ fi
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
# test ckpt avg_n
CUDA_VISIBLE_DEVICES=${gpus} ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1
CUDA_VISIBLE_DEVICES=0 ./local/test.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} || exit -1
fi
if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
# export ckpt avg_n
CUDA_VISIBLE_DEVICES=${gpus} ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit
CUDA_VISIBLE_DEVICES=0 ./local/export.sh ${conf_path} exp/${ckpt}/checkpoints/${avg_ckpt} exp/${ckpt}/checkpoints/${avg_ckpt}.jit
fi

Loading…
Cancel
Save