@ -194,7 +194,7 @@ class AttLoc(nn.Layer):
e = masked_fill(e, self.mask, -float("inf"))
# apply monotonic attention constraint (mainly for TTS)
if last_attended_idx is not None:
if last_attended_idx != -1:
e = _apply_attention_constraint(e, last_attended_idx,
backward_window, forward_window)
@ -556,13 +556,15 @@ class Decoder(nn.Layer):
if use_att_constraint:
last_attended_idx = 0
else:
last_attended_idx = None
last_attended_idx = -1
# loop for an output sequence
idx = 0
outs, att_ws, probs = [], [], []
prob = paddle.zeros([1])
while paddle.to_tensor(True):
z_list = z_list
c_list = c_list
# updated index
idx += self.reduction_factor