|
|
|
@ -110,12 +110,10 @@ class PositionalEncoding(nn.Layer, PositionalEncodingInterface):
|
|
|
|
|
paddle.Tensor: Encoded tensor. Its shape is (batch, time, ...)
|
|
|
|
|
paddle.Tensor: for compatibility to RelPositionalEncoding, (batch=1, time, ...)
|
|
|
|
|
"""
|
|
|
|
|
T = x.shape[1]
|
|
|
|
|
assert offset + x.shape[
|
|
|
|
|
1] < self.max_len, "offset: {} + x.shape[1]: {} is larger than the max_len: {}".format(
|
|
|
|
|
offset, x.shape[1], self.max_len)
|
|
|
|
|
#TODO(Hui Zhang): using T = paddle.shape(x)[1], __getitem__ not support Tensor
|
|
|
|
|
pos_emb = self.pe[:, offset:offset + T]
|
|
|
|
|
pos_emb = self.pe[:, offset:offset + x.shape[1]]
|
|
|
|
|
x = x * self.xscale + pos_emb
|
|
|
|
|
return self.dropout(x), self.dropout(pos_emb)
|
|
|
|
|
|
|
|
|
@ -164,6 +162,5 @@ class RelPositionalEncoding(PositionalEncoding):
|
|
|
|
|
1] < self.max_len, "offset: {} + x.shape[1]: {} is larger than the max_len: {}".format(
|
|
|
|
|
offset, x.shape[1], self.max_len)
|
|
|
|
|
x = x * self.xscale
|
|
|
|
|
#TODO(Hui Zhang): using paddle.shape(x)[1], __getitem__ not support Tensor
|
|
|
|
|
pos_emb = self.pe[:, offset:offset + x.shape[1]]
|
|
|
|
|
return self.dropout(x), self.dropout(pos_emb)
|
|
|
|
|