From edb852e33ae534c5d185c3ea44e916247961511b Mon Sep 17 00:00:00 2001 From: zxcd <228587199@qq.com> Date: Tue, 3 Dec 2024 11:49:29 +0000 Subject: [PATCH] add version control --- .../t2s/exps/speedyspeech/inference.py | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/paddlespeech/t2s/exps/speedyspeech/inference.py b/paddlespeech/t2s/exps/speedyspeech/inference.py index f2688dbd0..5ae41abbb 100644 --- a/paddlespeech/t2s/exps/speedyspeech/inference.py +++ b/paddlespeech/t2s/exps/speedyspeech/inference.py @@ -48,13 +48,27 @@ def main(): phone_vocab_path=args.phones_dict, tone_vocab_path=args.tones_dict) print("frontend done!") - speedyspeech_config = inference.Config( - str(Path(args.inference_dir)), "speedyspeech") + # after paddle 3.0, support new inference interface + if paddle.__version__ >= '3.0.0' or paddle.__version__ == '0.0.0': + speedyspeech_config = inference.Config( + str(Path(args.inference_dir)), "speedyspeech") + else: + speedyspeech_config = inference.Config( + str(Path(args.inference_dir) / "speedyspeech.pdmodel"), + str(Path(args.inference_dir) / "speedyspeech.pdiparams")) + speedyspeech_config.enable_use_gpu(100, 0) speedyspeech_config.enable_memory_optim() speedyspeech_predictor = inference.create_predictor(speedyspeech_config) - pwg_config = inference.Config(str(Path(args.inference_dir)), "pwg") + # after paddle 3.0, support new inference interface + if paddle.__version__ >= '3.0.0' or paddle.__version__ == '0.0.0': + pwg_config = inference.Config(str(Path(args.inference_dir)), "pwg") + else: + pwg_config = inference.Config( + str(Path(args.inference_dir) / "pwg.pdmodel"), + str(Path(args.inference_dir) / "pwg.pdiparams")) + pwg_config.enable_use_gpu(100, 0) pwg_config.enable_memory_optim() pwg_predictor = inference.create_predictor(pwg_config)