From 294b7b00bd419309651859825ecc2d05de951e55 Mon Sep 17 00:00:00 2001 From: 0x45f Date: Mon, 18 Jul 2022 08:36:45 +0000 Subject: [PATCH 1/3] Supprot dy2st for conformer --- paddlespeech/s2t/__init__.py | 21 ++++++++++++++------- paddlespeech/s2t/modules/encoder.py | 4 ++-- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/paddlespeech/s2t/__init__.py b/paddlespeech/s2t/__init__.py index 2da68435..99b8bcbe 100644 --- a/paddlespeech/s2t/__init__.py +++ b/paddlespeech/s2t/__init__.py @@ -159,9 +159,7 @@ if not hasattr(paddle.Tensor, 'new_full'): def eq(xs: paddle.Tensor, ys: Union[paddle.Tensor, float]) -> paddle.Tensor: if convert_dtype_to_string(xs.dtype) == paddle.bool: xs = xs.astype(paddle.int) - return xs.equal( - paddle.to_tensor( - ys, dtype=convert_dtype_to_string(xs.dtype), place=xs.place)) + return xs.equal(ys) if not hasattr(paddle.Tensor, 'eq'): @@ -219,13 +217,22 @@ def is_broadcastable(shp1, shp2): return True +def broadcast_shape(shp1, shp2): + result = [] + for a, b in zip(shp1[::-1], shp2[::-1]): + result.append(max(a, b)) + return result[::-1] + + def masked_fill(xs: paddle.Tensor, mask: paddle.Tensor, value: Union[float, int]): - assert is_broadcastable(xs.shape, mask.shape) is True, (xs.shape, - mask.shape) - bshape = paddle.broadcast_shape(xs.shape, mask.shape) - mask = mask.broadcast_to(bshape) + bshape = broadcast_shape(xs.shape, mask.shape) + mask.stop_gradient = True + tmp = paddle.ones(shape=[len(bshape)], dtype='int32') + for index in range(len(bshape)): + tmp[index] = bshape[index] + mask = mask.broadcast_to(tmp) trues = paddle.ones_like(xs) * value xs = paddle.where(mask, trues, xs) return xs diff --git a/paddlespeech/s2t/modules/encoder.py b/paddlespeech/s2t/modules/encoder.py index 72300579..ad73f5e9 100644 --- a/paddlespeech/s2t/modules/encoder.py +++ b/paddlespeech/s2t/modules/encoder.py @@ -253,8 +253,8 @@ class BaseEncoder(nn.Layer): # cnn_cache[i] = (B=1, hidden-dim, cache_t2) xs, _, new_att_cache, new_cnn_cache = layer( xs, att_mask, pos_emb, - att_cache=att_cache[i:i+1] if elayers > 0 else att_cache, - cnn_cache=cnn_cache[i] if paddle.shape(cnn_cache)[0] > 0 else cnn_cache, + att_cache=att_cache if elayers == 0 else att_cache[i:i+1], + cnn_cache=cnn_cache if paddle.shape(cnn_cache)[0] == 0 else cnn_cache[i], ) # new_att_cache = (1, head, attention_key_size, d_k*2) # new_cnn_cache = (B=1, hidden-dim, cache_t2) From e6ac8881f16969fefd5b151074b2e7f710ef1545 Mon Sep 17 00:00:00 2001 From: 0x45f Date: Tue, 19 Jul 2022 03:43:50 +0000 Subject: [PATCH 2/3] Fix comments --- paddlespeech/s2t/models/u2/u2.py | 6 ++++-- paddlespeech/s2t/modules/encoder.py | 6 +++--- paddlespeech/s2t/modules/encoder_layer.py | 1 + 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/paddlespeech/s2t/models/u2/u2.py b/paddlespeech/s2t/models/u2/u2.py index c7750184..76f698e6 100644 --- a/paddlespeech/s2t/models/u2/u2.py +++ b/paddlespeech/s2t/models/u2/u2.py @@ -625,10 +625,12 @@ class U2BaseModel(ASRInterface, nn.Layer): (elayers, head, cache_t1, d_k * 2), where `head * d_k == hidden-dim` and `cache_t1 == chunk_size * num_decoding_left_chunks`. - `d_k * 2` for att key & value. + `d_k * 2` for att key & value. Default is 0-dims Tensor, + it is used for dy2st. cnn_cache (paddle.Tensor): cache tensor for cnn_module in conformer, (elayers, b=1, hidden-dim, cache_t2), where - `cache_t2 == cnn.lorder - 1` + `cache_t2 == cnn.lorder - 1`. Default is 0-dims Tensor, + it is used for dy2st. Returns: paddle.Tensor: output of current input xs, diff --git a/paddlespeech/s2t/modules/encoder.py b/paddlespeech/s2t/modules/encoder.py index ad73f5e9..bff2d69b 100644 --- a/paddlespeech/s2t/modules/encoder.py +++ b/paddlespeech/s2t/modules/encoder.py @@ -250,11 +250,11 @@ class BaseEncoder(nn.Layer): r_cnn_cache = [] for i, layer in enumerate(self.encoders): # att_cache[i:i+1] = (1, head, cache_t1, d_k*2) - # cnn_cache[i] = (B=1, hidden-dim, cache_t2) + # cnn_cache[i:i+1] = (1, B=1, hidden-dim, cache_t2) xs, _, new_att_cache, new_cnn_cache = layer( xs, att_mask, pos_emb, - att_cache=att_cache if elayers == 0 else att_cache[i:i+1], - cnn_cache=cnn_cache if paddle.shape(cnn_cache)[0] == 0 else cnn_cache[i], + att_cache=att_cache[i:i+1] if elayers > 0 else att_cache, + cnn_cache=cnn_cache[i:i+1] if paddle.shape(cnn_cache)[0] > 0 else cnn_cache, ) # new_att_cache = (1, head, attention_key_size, d_k*2) # new_cnn_cache = (B=1, hidden-dim, cache_t2) diff --git a/paddlespeech/s2t/modules/encoder_layer.py b/paddlespeech/s2t/modules/encoder_layer.py index d91e3f6e..9e46cc54 100644 --- a/paddlespeech/s2t/modules/encoder_layer.py +++ b/paddlespeech/s2t/modules/encoder_layer.py @@ -250,6 +250,7 @@ class ConformerEncoderLayer(nn.Layer): # convolution module # Fake new cnn cache here, and then change it in conv_module new_cnn_cache = paddle.zeros([0,0,0], dtype=x.dtype) + cnn_cache = paddle.squeeze(cnn_cache, axis=0) if self.conv_module is not None: residual = x if self.normalize_before: From e21cceea5105eb5a5c9afd240329d2c8bce85af8 Mon Sep 17 00:00:00 2001 From: 0x45f Date: Tue, 19 Jul 2022 03:46:05 +0000 Subject: [PATCH 3/3] Remove blank line --- paddlespeech/s2t/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/paddlespeech/s2t/__init__.py b/paddlespeech/s2t/__init__.py index 5fe2e16b..f6476b9a 100644 --- a/paddlespeech/s2t/__init__.py +++ b/paddlespeech/s2t/__init__.py @@ -114,7 +114,6 @@ if not hasattr(paddle.Tensor, 'new_full'): paddle.Tensor.new_full = new_full paddle.static.Variable.new_full = new_full - def contiguous(xs: paddle.Tensor) -> paddle.Tensor: return xs