From 6cbb8878b4582902a09a856f8f91365b6b642b5a Mon Sep 17 00:00:00 2001 From: zxcd <228587199@qq.com> Date: Tue, 3 Dec 2024 11:39:27 +0000 Subject: [PATCH] pir infer --- paddlespeech/t2s/exps/speedyspeech/inference.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/paddlespeech/t2s/exps/speedyspeech/inference.py b/paddlespeech/t2s/exps/speedyspeech/inference.py index d4958bc49..f2688dbd0 100644 --- a/paddlespeech/t2s/exps/speedyspeech/inference.py +++ b/paddlespeech/t2s/exps/speedyspeech/inference.py @@ -49,15 +49,12 @@ def main(): print("frontend done!") speedyspeech_config = inference.Config( - str(Path(args.inference_dir) / "speedyspeech.pdmodel"), - str(Path(args.inference_dir) / "speedyspeech.pdiparams")) + str(Path(args.inference_dir)), "speedyspeech") speedyspeech_config.enable_use_gpu(100, 0) speedyspeech_config.enable_memory_optim() speedyspeech_predictor = inference.create_predictor(speedyspeech_config) - pwg_config = inference.Config( - str(Path(args.inference_dir) / "pwg.pdmodel"), - str(Path(args.inference_dir) / "pwg.pdiparams")) + pwg_config = inference.Config(str(Path(args.inference_dir)), "pwg") pwg_config.enable_use_gpu(100, 0) pwg_config.enable_memory_optim() pwg_predictor = inference.create_predictor(pwg_config)