diff --git a/paddlespeech/t2s/exps/speedyspeech/inference.py b/paddlespeech/t2s/exps/speedyspeech/inference.py index d4958bc4..7a6a4340 100644 --- a/paddlespeech/t2s/exps/speedyspeech/inference.py +++ b/paddlespeech/t2s/exps/speedyspeech/inference.py @@ -18,6 +18,7 @@ from pathlib import Path import soundfile as sf from paddle import inference +import paddlespeech.utils from paddlespeech.t2s.frontend.zh_frontend import Frontend @@ -48,16 +49,27 @@ def main(): phone_vocab_path=args.phones_dict, tone_vocab_path=args.tones_dict) print("frontend done!") - speedyspeech_config = inference.Config( - str(Path(args.inference_dir) / "speedyspeech.pdmodel"), - str(Path(args.inference_dir) / "speedyspeech.pdiparams")) + # after paddle 3.0, support new inference interface + if paddlespeech.utils.satisfy_paddle_version('3.0.0-beta'): + speedyspeech_config = inference.Config( + str(Path(args.inference_dir)), "speedyspeech") + else: + speedyspeech_config = inference.Config( + str(Path(args.inference_dir) / "speedyspeech.pdmodel"), + str(Path(args.inference_dir) / "speedyspeech.pdiparams")) + speedyspeech_config.enable_use_gpu(100, 0) speedyspeech_config.enable_memory_optim() speedyspeech_predictor = inference.create_predictor(speedyspeech_config) - pwg_config = inference.Config( - str(Path(args.inference_dir) / "pwg.pdmodel"), - str(Path(args.inference_dir) / "pwg.pdiparams")) + # after paddle 3.0, support new inference interface + if paddlespeech.utils.satisfy_paddle_version('3.0.0-beta'): + pwg_config = inference.Config(str(Path(args.inference_dir)), "pwg") + else: + pwg_config = inference.Config( + str(Path(args.inference_dir) / "pwg.pdmodel"), + str(Path(args.inference_dir) / "pwg.pdiparams")) + pwg_config.enable_use_gpu(100, 0) pwg_config.enable_memory_optim() pwg_predictor = inference.create_predictor(pwg_config)