From 12ea02fc488df6d58ff8ded9f4c47c43c6367e5b Mon Sep 17 00:00:00 2001 From: Hui Zhang Date: Sun, 24 Oct 2021 13:38:26 +0000 Subject: [PATCH] fix no pos --- deepspeech/modules/embedding.py | 6 +++--- deepspeech/modules/encoder.py | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/deepspeech/modules/embedding.py b/deepspeech/modules/embedding.py index 52a64739..7e8a2a85 100644 --- a/deepspeech/modules/embedding.py +++ b/deepspeech/modules/embedding.py @@ -22,9 +22,9 @@ from deepspeech.utils.log import Log logger = Log(__name__).getlog() -__all__ = ["NonePositionalEncoding", "PositionalEncoding", "RelPositionalEncoding"] +__all__ = ["NoPositionalEncoding", "PositionalEncoding", "RelPositionalEncoding"] -class NonePositionalEncoding(nn.Layer): +class NoPositionalEncoding(nn.Layer): def __init__(self, d_model: int, dropout_rate: float, @@ -38,7 +38,7 @@ class NonePositionalEncoding(nn.Layer): def position_encoding(self, offset: int, size: int) -> paddle.Tensor: return None - + class PositionalEncoding(nn.Layer): def __init__(self, diff --git a/deepspeech/modules/encoder.py b/deepspeech/modules/encoder.py index 9e9e4849..bbe822af 100644 --- a/deepspeech/modules/encoder.py +++ b/deepspeech/modules/encoder.py @@ -26,7 +26,7 @@ from deepspeech.modules.attention import RelPositionMultiHeadedAttention from deepspeech.modules.conformer_convolution import ConvolutionModule from deepspeech.modules.embedding import PositionalEncoding from deepspeech.modules.embedding import RelPositionalEncoding -from deepspeech.modules.embedding import NonePositionalEncoding +from deepspeech.modules.embedding import NoPositionalEncoding from deepspeech.modules.encoder_layer import ConformerEncoderLayer from deepspeech.modules.encoder_layer import TransformerEncoderLayer from deepspeech.modules.mask import add_optional_chunk_mask @@ -56,7 +56,7 @@ class BaseEncoder(nn.Layer): positional_dropout_rate: float=0.1, attention_dropout_rate: float=0.0, input_layer: str="conv2d", - pos_enc_layer_type: Optional[str, None]="abs_pos", + pos_enc_layer_type: str="abs_pos", normalize_before: bool=True, concat_after: bool=False, static_chunk_size: int=0, @@ -77,8 +77,8 @@ class BaseEncoder(nn.Layer): positional encoding input_layer (str): input layer type. optional [linear, conv2d, conv2d6, conv2d8] - pos_enc_layer_type (str, or None): Encoder positional encoding layer type. - opitonal [abs_pos, scaled_abs_pos, rel_pos, None] + pos_enc_layer_type (str): Encoder positional encoding layer type. + opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos] normalize_before (bool): True: use layer_norm before each sub-block of a layer. False: use layer_norm after each sub-block of a layer. @@ -103,8 +103,8 @@ class BaseEncoder(nn.Layer): pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "rel_pos": pos_enc_class = RelPositionalEncoding - elif pos_enc_layer_type is None: - pos_enc_class = NonePositionalEncoding + elif pos_enc_layer_type is "no_pos": + pos_enc_class = NoPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type)