diff --git a/paddlespeech/t2s/exps/syn_utils.py b/paddlespeech/t2s/exps/syn_utils.py index 75b98f1e0..d29dd8110 100644 --- a/paddlespeech/t2s/exps/syn_utils.py +++ b/paddlespeech/t2s/exps/syn_utils.py @@ -591,7 +591,7 @@ def get_predictor( config = inference.Config( str(Path(model_dir) / model_file), str(Path(model_dir) / params_file)) - if device != "npu": + if paddle.__version__ <= "2.5.2" and paddle.__version__ != "0.0.0": config.enable_memory_optim() config.switch_ir_optim(True) if device == "gpu": diff --git a/paddlespeech/t2s/exps/synthesize.py b/paddlespeech/t2s/exps/synthesize.py index 5e403e7e7..fa3ac1569 100644 --- a/paddlespeech/t2s/exps/synthesize.py +++ b/paddlespeech/t2s/exps/synthesize.py @@ -219,18 +219,18 @@ def parse_args(): ) # other parser.add_argument( - "--ngpu", type=int, default=1, help="if ngpu == 0, use cpu or xpu or npu.") + "--ngpu", type=int, default=1, help="if wish to use gpu, set ngpu > 0, otherwise use xpu, npu or cpu.") parser.add_argument( "--nxpu", type=int, default=0, - help="if wish to use xpu, set ngpu == 0 and nxpu > 0, and if ngpu == 0 and nxpu == 0 and nnpu == 0, use cpu." + help="if wish to use xpu, set ngpu == 0 and nxpu > 0, otherwise use gpu, npu or cpu." ) parser.add_argument( "--nnpu", type=int, default=0, - help="if wish to use npu, set ngpu == 0 and nxpu == 0 and nnpu > 0, and if ngpu == 0 and nxpu == 0 and nnpu == 0, use cpu." + help="if wish to use npu, set ngpu == 0 and nnpu > 0, otherwise use gpu, xpu or cpu." ) parser.add_argument("--test_metadata", type=str, help="test metadata.") parser.add_argument("--output_dir", type=str, help="output dir.")