adapt paddle 3.2, remove some hack code to avoid conflict. (#4120)

dependabot/npm_and_yarn/demos/speech_web/web_client/element-plus-2.11.0
zxcd 1 week ago committed by GitHub
parent 8247eba840
commit c0cf9e9d3d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -37,14 +37,6 @@ paddle.long = 'int64'
paddle.uint16 = 'uint16'
paddle.cdouble = 'complex128'
if not hasattr(paddle, 'softmax'):
logger.debug("register user softmax to paddle, remove this when fixed!")
setattr(paddle, 'softmax', paddle.nn.functional.softmax)
if not hasattr(paddle, 'log_softmax'):
logger.debug("register user log_softmax to paddle, remove this when fixed!")
setattr(paddle, 'log_softmax', paddle.nn.functional.log_softmax)
if not hasattr(paddle, 'sigmoid'):
logger.debug("register user sigmoid to paddle, remove this when fixed!")
setattr(paddle, 'sigmoid', paddle.nn.functional.sigmoid)

@ -891,7 +891,7 @@ class Wav2Vec2GumbelVectorQuantizer(nn.Layer):
hard=True).type_as(hidden_states)
# compute perplexity
codevector_soft_dist = paddle.softmax(
codevector_soft_dist = paddle.nn.functional.softmax(
hidden_states.reshape((batch_size * sequence_length,
self.num_groups, -1)).float(),
axis=-1)

@ -120,11 +120,11 @@ class MultiHeadedAttention(nn.Layer):
# for last chunk, time2 might be larger than scores.size(-1)
mask = mask[:, :, :, :scores.shape[-1]]
scores = scores.masked_fill(mask, -float('inf'))
attn = paddle.softmax(
attn = paddle.nn.functional.softmax(
scores, axis=-1).masked_fill(mask,
0.0) # (batch, head, time1, time2)
else:
attn = paddle.softmax(
attn = paddle.nn.functional.softmax(
scores, axis=-1) # (batch, head, time1, time2)
p_attn = self.dropout(attn)

@ -189,7 +189,7 @@ class TransformerDecoder(BatchScorerInterface, nn.Layer):
else:
y = x[:, -1]
if self.use_output_layer:
y = paddle.log_softmax(self.output_layer(y), axis=-1)
y = paddle.nn.functional.log_softmax(self.output_layer(y), axis=-1)
return y, new_cache
# beam search API (see ScorerInterface)

Loading…
Cancel
Save