diff --git a/paddlespeech/t2s/modules/tacotron2/attentions.py b/paddlespeech/t2s/modules/tacotron2/attentions.py index cdaef4608..2256d8ea8 100644 --- a/paddlespeech/t2s/modules/tacotron2/attentions.py +++ b/paddlespeech/t2s/modules/tacotron2/attentions.py @@ -47,11 +47,13 @@ def _apply_attention_constraint(e, https://arxiv.org/abs/1710.07654 """ - if paddle.shape(e)[0] != 1: - raise NotImplementedError( - "Batch attention constraining is not yet supported.") - backward_idx = last_attended_idx - backward_window - forward_idx = last_attended_idx + forward_window + # for dygraph to static graph + # if e.shape[0] != 1: + # raise NotImplementedError( + # "Batch attention constraining is not yet supported.") + backward_idx = paddle.cast( + last_attended_idx - backward_window, dtype='int64') + forward_idx = paddle.cast(last_attended_idx + forward_window, dtype='int64') if backward_idx > 0: e[:, :backward_idx] = -float("inf") if forward_idx < paddle.shape(e)[1]: diff --git a/paddlespeech/t2s/modules/tacotron2/decoder.py b/paddlespeech/t2s/modules/tacotron2/decoder.py index 41c94b63f..6118a004e 100644 --- a/paddlespeech/t2s/modules/tacotron2/decoder.py +++ b/paddlespeech/t2s/modules/tacotron2/decoder.py @@ -562,7 +562,7 @@ class Decoder(nn.Layer): idx = 0 outs, att_ws, probs = [], [], [] prob = paddle.zeros([1]) - while True: + while paddle.to_tensor(True): # updated index idx += self.reduction_factor