diff --git a/paddlespeech/cls/exps/panns/deploy/predict.py b/paddlespeech/cls/exps/panns/deploy/predict.py index 1dd0fb531..a6b735335 100644 --- a/paddlespeech/cls/exps/panns/deploy/predict.py +++ b/paddlespeech/cls/exps/panns/deploy/predict.py @@ -15,12 +15,15 @@ import argparse import os import numpy as np +import paddle from paddle import inference from paddle.audio.datasets import ESC50 from paddle.audio.features import LogMelSpectrogram from paddleaudio.backends import soundfile_load as load_audio from scipy.special import softmax +import paddlespeech.utils + # yapf: disable parser = argparse.ArgumentParser() parser.add_argument("--model_dir", type=str, required=True, default="./export", help="The directory to static model.") @@ -56,7 +59,6 @@ def extract_features(files: str, **kwargs): feature_extractor = LogMelSpectrogram(sr, **kwargs) feat = feature_extractor(paddle.to_tensor(waveforms[i])) feat = paddle.transpose(feat, perm=[1, 0]).unsqueeze(0) - feats.append(feat) return np.stack(feats, axis=0) @@ -73,13 +75,18 @@ class Predictor(object): enable_mkldnn=False): self.batch_size = batch_size - model_file = os.path.join(model_dir, "inference.pdmodel") - params_file = os.path.join(model_dir, "inference.pdiparams") + if paddlespeech.utils.satisfy_paddle_version('3.0.0-beta'): + config = inference.Config(model_dir, 'inference') + config.disable_mkldnn() + else: + model_file = os.path.join(model_dir, 'inference.pdmodel') + params_file = os.path.join(model_dir, "inference.pdiparams") + + assert os.path.isfile(model_file) and os.path.isfile( + params_file), 'Please check model and parameter files.' - assert os.path.isfile(model_file) and os.path.isfile( - params_file), 'Please check model and parameter files.' + config = inference.Config(model_file, params_file) - config = inference.Config(model_file, params_file) if device == "gpu": # set GPU configs accordingly # such as intialize the gpu memory, enable tensorrt diff --git a/paddlespeech/cls/exps/panns/export_model.py b/paddlespeech/cls/exps/panns/export_model.py index 63b22981a..e860b54aa 100644 --- a/paddlespeech/cls/exps/panns/export_model.py +++ b/paddlespeech/cls/exps/panns/export_model.py @@ -39,7 +39,8 @@ if __name__ == '__main__': input_spec=[ paddle.static.InputSpec( shape=[None, None, 64], dtype=paddle.float32) - ]) + ], + full_graph=True) # Save in static graph model. paddle.jit.save(model, os.path.join(args.output_dir, "inference")) diff --git a/paddlespeech/utils/__init__.py b/paddlespeech/utils/__init__.py index 185a92b8d..66c492779 100644 --- a/paddlespeech/utils/__init__.py +++ b/paddlespeech/utils/__init__.py @@ -11,3 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from packaging.version import Version + + +def satisfy_version(source: str, target: str, dev_allowed: bool=True) -> bool: + if dev_allowed and source.startswith('0.0.0'): + target_version = Version('0.0.0') + else: + target_version = Version(target) + + source_version = Version(source) + return source_version >= target_version + + +def satisfy_paddle_version(target: str, dev_allowed: bool=True) -> bool: + import paddle + return satisfy_version(paddle.__version__, target, dev_allowed)