|
|
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
from collections import OrderedDict
|
|
|
|
from dataclasses import dataclass
|
|
|
|
from dataclasses import fields
|
|
|
|
from typing import Optional
|
|
|
|
from typing import Tuple
|
|
|
|
|
|
|
|
import paddle
|
|
|
|
|
|
|
|
|
|
|
|
class ModelOutput(OrderedDict):
|
|
|
|
"""
|
|
|
|
Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a
|
|
|
|
tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular
|
|
|
|
python dictionary.
|
|
|
|
|
|
|
|
<Tip warning={true}>
|
|
|
|
|
|
|
|
You can't unpack a `ModelOutput` directly. Use the [`~utils.ModelOutput.to_tuple`] method to convert it to a tuple
|
|
|
|
before.
|
|
|
|
|
|
|
|
</Tip>
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __post_init__(self):
|
|
|
|
class_fields = fields(self)
|
|
|
|
|
|
|
|
# Safety and consistency checks
|
|
|
|
if not len(class_fields):
|
|
|
|
raise ValueError(f"{self.__class__.__name__} has no fields.")
|
|
|
|
if not all(field.default is None for field in class_fields[1:]):
|
|
|
|
raise ValueError(
|
|
|
|
f"{self.__class__.__name__} should not have more than one required field."
|
|
|
|
)
|
|
|
|
|
|
|
|
first_field = getattr(self, class_fields[0].name)
|
|
|
|
other_fields_are_none = all(
|
|
|
|
getattr(self, field.name) is None for field in class_fields[1:])
|
|
|
|
|
|
|
|
if other_fields_are_none and not paddle.is_tensor(first_field):
|
|
|
|
if isinstance(first_field, dict):
|
|
|
|
iterator = first_field.items()
|
|
|
|
first_field_iterator = True
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
iterator = iter(first_field)
|
|
|
|
first_field_iterator = True
|
|
|
|
except TypeError:
|
|
|
|
first_field_iterator = False
|
|
|
|
|
|
|
|
# if we provided an iterator as first field and the iterator is a (key, value) iterator
|
|
|
|
# set the associated fields
|
|
|
|
if first_field_iterator:
|
|
|
|
for element in iterator:
|
|
|
|
if (not isinstance(element, (list, tuple)) or
|
|
|
|
not len(element) == 2 or
|
|
|
|
not isinstance(element[0], str)):
|
|
|
|
break
|
|
|
|
setattr(self, element[0], element[1])
|
|
|
|
if element[1] is not None:
|
|
|
|
self[element[0]] = element[1]
|
|
|
|
elif first_field is not None:
|
|
|
|
self[class_fields[0].name] = first_field
|
|
|
|
else:
|
|
|
|
for field in class_fields:
|
|
|
|
v = getattr(self, field.name)
|
|
|
|
if v is not None:
|
|
|
|
self[field.name] = v
|
|
|
|
|
|
|
|
def __delitem__(self, *args, **kwargs):
|
|
|
|
raise Exception(
|
|
|
|
f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance."
|
|
|
|
)
|
|
|
|
|
|
|
|
def setdefault(self, *args, **kwargs):
|
|
|
|
raise Exception(
|
|
|
|
f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance."
|
|
|
|
)
|
|
|
|
|
|
|
|
def pop(self, *args, **kwargs):
|
|
|
|
raise Exception(
|
|
|
|
f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
|
|
|
|
|
|
|
|
def update(self, *args, **kwargs):
|
|
|
|
raise Exception(
|
|
|
|
f"You cannot use ``update`` on a {self.__class__.__name__} instance."
|
|
|
|
)
|
|
|
|
|
|
|
|
def __getitem__(self, k):
|
|
|
|
if isinstance(k, str):
|
|
|
|
inner_dict = {k: v for (k, v) in self.items()}
|
|
|
|
return inner_dict[k]
|
|
|
|
else:
|
|
|
|
return self.to_tuple()[k]
|
|
|
|
|
|
|
|
def __setattr__(self, name, value):
|
|
|
|
if name in self.keys() and value is not None:
|
|
|
|
# Don't call self.__setitem__ to avoid recursion errors
|
|
|
|
super().__setitem__(name, value)
|
|
|
|
super().__setattr__(name, value)
|
|
|
|
|
|
|
|
def __setitem__(self, key, value):
|
|
|
|
# Will raise a KeyException if needed
|
|
|
|
super().__setitem__(key, value)
|
|
|
|
# Don't call self.__setattr__ to avoid recursion errors
|
|
|
|
super().__setattr__(key, value)
|
|
|
|
|
|
|
|
def to_tuple(self) -> Tuple:
|
|
|
|
"""
|
|
|
|
Convert self to a tuple containing all the attributes/keys that are not `None`.
|
|
|
|
"""
|
|
|
|
return tuple(self[k] for k in self.keys())
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class BaseModelOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for model's outputs, with potential hidden states and attentions.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
|
|
Sequence of hidden-states at the output of the last layer of the model.
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_hidden_state: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class BaseModelOutputWithNoAttention(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for model's outputs, with potential hidden states.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
last_hidden_state (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)`):
|
|
|
|
Sequence of hidden-states at the output of the last layer of the model.
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, num_channels, height, width)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_hidden_state: paddle = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class BaseModelOutputWithPooling(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for model's outputs that also contains a pooling of the last hidden states.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
|
|
Sequence of hidden-states at the output of the last layer of the model.
|
|
|
|
pooler_output (`paddle.Tensor` of shape `(batch_size, hidden_size)`):
|
|
|
|
Last layer hidden-state of the first token of the sequence (classification token) after further processing
|
|
|
|
through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns
|
|
|
|
the classification token after processing through a linear layer and a tanh activation function. The linear
|
|
|
|
layer weights are trained from the next sentence prediction (classification) objective during pretraining.
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_hidden_state: paddle.Tensor = None
|
|
|
|
pooler_output: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class BaseModelOutputWithPoolingAndNoAttention(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for model's outputs that also contains a pooling of the last hidden states.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
last_hidden_state (`paddle.Tensor` of shape `(batch_size, num_channels, height, width)`):
|
|
|
|
Sequence of hidden-states at the output of the last layer of the model.
|
|
|
|
pooler_output (`paddle.Tensor` of shape `(batch_size, hidden_size)`):
|
|
|
|
Last layer hidden-state after a pooling operation on the spatial dimensions.
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, num_channels, height, width)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_hidden_state: paddle.Tensor = None
|
|
|
|
pooler_output: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class BaseModelOutputWithPast(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
|
|
|
|
|
|
|
|
Args:
|
|
|
|
last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
|
|
Sequence of hidden-states at the output of the last layer of the model.
|
|
|
|
|
|
|
|
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
|
|
|
|
hidden_size)` is output.
|
|
|
|
past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
|
|
|
Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
|
|
|
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
|
|
|
|
`config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
|
|
|
|
encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
|
|
|
|
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
|
|
|
|
input) to speed up sequential decoding.
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_hidden_state: paddle.Tensor = None
|
|
|
|
past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class BaseModelOutputWithCrossAttentions(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for model's outputs, with potential hidden states and attentions.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
|
|
Sequence of hidden-states at the output of the last layer of the model.
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
|
|
|
|
weighted average in the cross-attention heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_hidden_state: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
cross_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for model's outputs that also contains a pooling of the last hidden states.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
|
|
Sequence of hidden-states at the output of the last layer of the model.
|
|
|
|
pooler_output (`paddle.Tensor` of shape `(batch_size, hidden_size)`):
|
|
|
|
Last layer hidden-state of the first token of the sequence (classification token) after further processing
|
|
|
|
through the layers used for the auxiliary pretraining task. E.g. for BERT-family of models, this returns
|
|
|
|
the classification token after processing through a linear layer and a tanh activation function. The linear
|
|
|
|
layer weights are trained from the next sentence prediction (classification) objective during pretraining.
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
|
|
|
|
weighted average in the cross-attention heads.
|
|
|
|
past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
|
|
|
Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
|
|
|
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
|
|
|
|
`config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
|
|
|
|
encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
|
|
|
|
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
|
|
|
|
input) to speed up sequential decoding.
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_hidden_state: paddle.Tensor = None
|
|
|
|
pooler_output: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
cross_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class BaseModelOutputWithPastAndCrossAttentions(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
|
|
|
|
|
|
|
|
Args:
|
|
|
|
last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
|
|
Sequence of hidden-states at the output of the last layer of the model.
|
|
|
|
|
|
|
|
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
|
|
|
|
hidden_size)` is output.
|
|
|
|
past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
|
|
|
Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
|
|
|
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
|
|
|
|
`config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
|
|
|
|
encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
|
|
|
|
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
|
|
|
|
input) to speed up sequential decoding.
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
|
|
|
|
weighted average in the cross-attention heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_hidden_state: paddle.Tensor = None
|
|
|
|
past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
cross_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class Seq2SeqModelOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
|
|
|
|
decoding.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
|
|
Sequence of hidden-states at the output of the last layer of the decoder of the model.
|
|
|
|
|
|
|
|
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
|
|
|
|
hidden_size)` is output.
|
|
|
|
past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
|
|
|
Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
|
|
|
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
|
|
|
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
|
|
|
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
|
|
|
decoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
decoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
self-attention heads.
|
|
|
|
cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
|
|
|
|
weighted average in the cross-attention heads.
|
|
|
|
encoder_last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
|
|
|
Sequence of hidden-states at the output of the last layer of the encoder of the model.
|
|
|
|
encoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
encoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
self-attention heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_hidden_state: paddle.Tensor = None
|
|
|
|
past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
|
|
|
|
decoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
decoder_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
cross_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
encoder_last_hidden_state: Optional[paddle.Tensor] = None
|
|
|
|
encoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
encoder_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class CausalLMOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for causal language model (or autoregressive) outputs.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
|
|
Language modeling loss (for next-token prediction).
|
|
|
|
logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
|
|
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
logits: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class CausalLMOutputWithPast(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for causal language model (or autoregressive) outputs.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
|
|
Language modeling loss (for next-token prediction).
|
|
|
|
logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
|
|
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
|
|
|
past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
|
|
|
Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
|
|
|
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
|
|
|
|
`past_key_values` input) to speed up sequential decoding.
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
logits: paddle.Tensor = None
|
|
|
|
past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class CausalLMOutputWithCrossAttentions(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for causal language model (or autoregressive) outputs.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
|
|
Language modeling loss (for next-token prediction).
|
|
|
|
logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
|
|
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Cross attentions weights after the attention softmax, used to compute the weighted average in the
|
|
|
|
cross-attention heads.
|
|
|
|
past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
|
|
|
Tuple of `paddle.Tensor` tuples of length `config.n_layers`, with each tuple containing the cached key,
|
|
|
|
value states of the self-attention and the cross-attention layers if model is used in encoder-decoder
|
|
|
|
setting. Only relevant if `config.is_decoder = True`.
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
|
|
|
|
`past_key_values` input) to speed up sequential decoding.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
logits: paddle.Tensor = None
|
|
|
|
past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
cross_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class SequenceClassifierOutputWithPast(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for outputs of sentence classification models.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
|
|
Classification (or regression if config.num_labels==1) loss.
|
|
|
|
logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):
|
|
|
|
Classification (or regression if config.num_labels==1) scores (before SoftMax).
|
|
|
|
past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
|
|
|
Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
|
|
|
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
|
|
|
|
`past_key_values` input) to speed up sequential decoding.
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
logits: paddle.Tensor = None
|
|
|
|
past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class MaskedLMOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for masked language models outputs.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
|
|
Masked language modeling (MLM) loss.
|
|
|
|
logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
|
|
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
logits: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class Seq2SeqLMOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for sequence-to-sequence language models outputs.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
|
|
Language modeling loss.
|
|
|
|
logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
|
|
|
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
|
|
|
past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
|
|
|
Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
|
|
|
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
|
|
|
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
|
|
|
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
|
|
|
decoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
|
|
|
|
decoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
self-attention heads.
|
|
|
|
cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
|
|
|
|
weighted average in the cross-attention heads.
|
|
|
|
encoder_last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
|
|
|
Sequence of hidden-states at the output of the last layer of the encoder of the model.
|
|
|
|
encoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
|
|
|
|
encoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
self-attention heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
logits: paddle.Tensor = None
|
|
|
|
past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
|
|
|
|
decoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
decoder_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
cross_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
encoder_last_hidden_state: Optional[paddle.Tensor] = None
|
|
|
|
encoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
encoder_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class NextSentencePredictorOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for outputs of models predicting if two sentences are consecutive or not.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `next_sentence_label` is provided):
|
|
|
|
Next sequence prediction (classification) loss.
|
|
|
|
logits (`paddle.Tensor` of shape `(batch_size, 2)`):
|
|
|
|
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
|
|
|
|
before SoftMax).
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
logits: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class SequenceClassifierOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for outputs of sentence classification models.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
|
|
Classification (or regression if config.num_labels==1) loss.
|
|
|
|
logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):
|
|
|
|
Classification (or regression if config.num_labels==1) scores (before SoftMax).
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
logits: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class Seq2SeqSequenceClassifierOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for outputs of sequence-to-sequence sentence classification models.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `label` is provided):
|
|
|
|
Classification (or regression if config.num_labels==1) loss.
|
|
|
|
logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):
|
|
|
|
Classification (or regression if config.num_labels==1) scores (before SoftMax).
|
|
|
|
past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
|
|
|
Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
|
|
|
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
|
|
|
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
|
|
|
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
|
|
|
decoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
|
|
|
|
decoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
self-attention heads.
|
|
|
|
cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
|
|
|
|
weighted average in the cross-attention heads.
|
|
|
|
encoder_last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
|
|
|
Sequence of hidden-states at the output of the last layer of the encoder of the model.
|
|
|
|
encoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
|
|
|
|
encoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
self-attention heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
logits: paddle.Tensor = None
|
|
|
|
past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
|
|
|
|
decoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
decoder_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
cross_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
encoder_last_hidden_state: Optional[paddle.Tensor] = None
|
|
|
|
encoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
encoder_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class MultipleChoiceModelOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for outputs of multiple choice models.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
|
|
|
|
Classification loss.
|
|
|
|
logits (`paddle.Tensor` of shape `(batch_size, num_choices)`):
|
|
|
|
*num_choices* is the second dimension of the input tensors. (see *input_ids* above).
|
|
|
|
|
|
|
|
Classification scores (before SoftMax).
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
logits: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class TokenClassifierOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for outputs of token classification models.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
|
|
|
|
Classification loss.
|
|
|
|
logits (`paddle.Tensor` of shape `(batch_size, sequence_length, config.num_labels)`):
|
|
|
|
Classification scores (before SoftMax).
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
logits: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class QuestionAnsweringModelOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for outputs of question answering models.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
|
|
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
|
|
|
|
start_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):
|
|
|
|
Span-start scores (before SoftMax).
|
|
|
|
end_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):
|
|
|
|
Span-end scores (before SoftMax).
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
start_logits: paddle.Tensor = None
|
|
|
|
end_logits: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class Seq2SeqQuestionAnsweringModelOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for outputs of sequence-to-sequence question answering models.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
|
|
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
|
|
|
|
start_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):
|
|
|
|
Span-start scores (before SoftMax).
|
|
|
|
end_logits (`paddle.Tensor` of shape `(batch_size, sequence_length)`):
|
|
|
|
Span-end scores (before SoftMax).
|
|
|
|
past_key_values (`tuple(tuple(paddle.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
|
|
|
Tuple of `tuple(paddle.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
|
|
|
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
|
|
|
|
`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
|
|
|
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
|
|
|
|
decoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
|
|
|
|
decoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
self-attention heads.
|
|
|
|
cross_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
|
|
|
|
weighted average in the cross-attention heads.
|
|
|
|
encoder_last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
|
|
|
Sequence of hidden-states at the output of the last layer of the encoder of the model.
|
|
|
|
encoder_hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
|
|
|
|
encoder_attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
|
|
|
|
self-attention heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
start_logits: paddle.Tensor = None
|
|
|
|
end_logits: paddle.Tensor = None
|
|
|
|
past_key_values: Optional[Tuple[Tuple[paddle.Tensor]]] = None
|
|
|
|
decoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
decoder_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
cross_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
encoder_last_hidden_state: Optional[paddle.Tensor] = None
|
|
|
|
encoder_hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
encoder_attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class SemanticSegmenterOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for outputs of semantic segmentation models.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
|
|
Classification (or regression if config.num_labels==1) loss.
|
|
|
|
logits (`paddle.Tensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
|
|
|
|
Classification scores for each pixel.
|
|
|
|
|
|
|
|
<Tip warning={true}>
|
|
|
|
|
|
|
|
The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
|
|
|
|
to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
|
|
|
|
original image size as post-processing. You should always check your logits shape and resize as needed.
|
|
|
|
|
|
|
|
</Tip>
|
|
|
|
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, patch_size, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
logits: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class ImageClassifierOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for outputs of image classification models.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
|
|
Classification (or regression if config.num_labels==1) loss.
|
|
|
|
logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):
|
|
|
|
Classification (or regression if config.num_labels==1) scores (before SoftMax).
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states
|
|
|
|
(also called feature maps) of the model at the output of each stage.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
logits: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class ImageClassifierOutputWithNoAttention(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for outputs of image classification models.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
|
|
Classification (or regression if config.num_labels==1) loss.
|
|
|
|
logits (`paddle.Tensor` of shape `(batch_size, config.num_labels)`):
|
|
|
|
Classification (or regression if config.num_labels==1) scores (before SoftMax).
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each stage) of shape `(batch_size, num_channels, height, width)`. Hidden-states (also
|
|
|
|
called feature maps) of the model at the output of each stage.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
logits: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class DepthEstimatorOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for outputs of depth estimation models.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
|
|
Classification (or regression if config.num_labels==1) loss.
|
|
|
|
predicted_depth (`paddle.Tensor` of shape `(batch_size, height, width)`):
|
|
|
|
Predicted depth for each pixel.
|
|
|
|
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
|
|
one for the output of each layer) of shape `(batch_size, num_channels, height, width)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
predicted_depth: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class Wav2Vec2BaseModelOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Base class for models that have been trained with the Wav2Vec2 loss objective.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
last_hidden_state (`paddle.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
|
|
Sequence of hidden-states at the output of the last layer of the model.
|
|
|
|
extract_features (`paddle.Tensor` of shape `(batch_size, sequence_length, conv_dim[-1])`):
|
|
|
|
Sequence of extracted feature vectors of the last convolutional layer of the model.
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
last_hidden_state: paddle.Tensor = None
|
|
|
|
extract_features: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
class XVectorOutput(ModelOutput):
|
|
|
|
"""
|
|
|
|
Output type of [`Wav2Vec2ForXVector`].
|
|
|
|
|
|
|
|
Args:
|
|
|
|
loss (`paddle.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
|
|
|
|
Classification loss.
|
|
|
|
logits (`paddle.Tensor` of shape `(batch_size, config.xvector_output_dim)`):
|
|
|
|
Classification hidden states before AMSoftmax.
|
|
|
|
embeddings (`paddle.Tensor` of shape `(batch_size, config.xvector_output_dim)`):
|
|
|
|
Utterance embeddings used for vector similarity-based retrieval.
|
|
|
|
hidden_states (`tuple(paddle.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for the output of the embeddings + one for the output of each layer) of
|
|
|
|
shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
|
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
|
|
|
attentions (`tuple(paddle.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
|
|
Tuple of `paddle.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
|
|
sequence_length)`.
|
|
|
|
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
|
|
heads.
|
|
|
|
"""
|
|
|
|
|
|
|
|
loss: Optional[paddle.Tensor] = None
|
|
|
|
logits: paddle.Tensor = None
|
|
|
|
embeddings: paddle.Tensor = None
|
|
|
|
hidden_states: Optional[Tuple[paddle.Tensor]] = None
|
|
|
|
attentions: Optional[Tuple[paddle.Tensor]] = None
|