# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from paddle import nn from paddle import Tensor from paddlespeech.s2t.utils.log import Log logger = Log(__name__).getlog() class NewGELUActivation(nn.Layer): """ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 """ def forward(self, input: Tensor) -> Tensor: return 0.5 * input * (1.0 + paddle.tanh( math.sqrt(2.0 / math.pi) * (input + 0.044715 * paddle.pow(input, 3.0)))) class GELUActivation(nn.Layer): """ Original Implementation of the GELU activation function in Google BERT repo when initially created. For information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 + paddle.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * paddle.pow(x, 3)))) This is now written in C in nn.functional Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415 """ def __init__(self, use_gelu_python: bool=False): super().__init__() self.act = nn.functional.gelu def _gelu_python(self, input: Tensor) -> Tensor: return input * 0.5 * (1.0 + paddle.erf(input / math.sqrt(2.0))) def forward(self, input: Tensor) -> Tensor: return self.act(input) class FastGELUActivation(nn.Layer): """ Applies GELU approximation that is slower than QuickGELU but more accurate. See: https://github.com/hendrycks/GELUs """ def forward(self, input: Tensor) -> Tensor: return 0.5 * input * ( 1.0 + paddle.tanh(input * 0.7978845608 * (1.0 + 0.044715 * input * input))) class QuickGELUActivation(nn.Layer): """ Applies GELU approximation that is fast but somewhat inaccurate. See: https://github.com/hendrycks/GELUs """ def forward(self, input: Tensor) -> Tensor: return input * paddle.sigmoid(1.702 * input) class ClippedGELUActivation(nn.Layer): """ Clip the range of possible GeLU outputs between [min, max]. This is especially useful for quantization purpose, as it allows mapping negatives values in the GeLU spectrum. For more information on this trick, please refer to https://arxiv.org/abs/2004.09602. Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + paddle.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * paddle.pow(x, 3)))). See https://arxiv.org/abs/1606.08415 """ def __init__(self, min: float, max: float): if min > max: raise ValueError( f"min should be < max (got min: {min}, max: {max})") super().__init__() self.min = min self.max = max def forward(self, x: Tensor) -> Tensor: return paddle.clip(gelu(x), self.min, self.max) class SiLUActivation(nn.Layer): """ See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with later. """ def __init__(self): super().__init__() self.act = nn.functional.silu def _silu_python(self, input: Tensor) -> Tensor: return input * paddle.sigmoid(input) def forward(self, input: Tensor) -> Tensor: return self.act(input) class MishActivation(nn.Layer): """ See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also visit the official repository for the paper: https://github.com/digantamisra98/Mish """ def __init__(self): super().__init__() self.act = nn.functional.mish def _mish_python(self, input: Tensor) -> Tensor: return input * paddle.tanh(nn.functional.softplus(input)) def forward(self, input: Tensor) -> Tensor: return self.act(input) class LinearActivation(nn.Layer): """ Applies the linear activation function, i.e. forwarding input directly to output. """ def forward(self, input: Tensor) -> Tensor: return input ACT2FN = { "gelu": GELUActivation(), "gelu_10": ClippedGELUActivation(-10, 10), "gelu_fast": FastGELUActivation(), "gelu_new": NewGELUActivation(), "gelu_python": GELUActivation(use_gelu_python=True), "linear": LinearActivation(), "mish": MishActivation(), "quick_gelu": QuickGELUActivation(), "relu": nn.ReLU(), "sigmoid": nn.Sigmoid(), "silu": SiLUActivation(), "swish": SiLUActivation(), "tanh": nn.Tanh(), } def get_activation(activation_string): if activation_string in ACT2FN: return ACT2FN[activation_string] else: raise KeyError( f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}" ) # For backwards compatibility with: from activations import gelu_python gelu_python = get_activation("gelu_python") gelu_new = get_activation("gelu_new") gelu = get_activation("gelu") gelu_fast = get_activation("gelu_fast") quick_gelu = get_activation("quick_gelu") silu = get_activation("silu") mish = get_activation("mish") linear_act = get_activation("linear")