
    fTh                         S r SSKJr  SSKJr  SSKJr  SSKJr  SSK	J
r
  \
R                  " \5      r " S S	\5      r " S
 S\5      rS	S/rg)zERNIE model configuration    )OrderedDict)Mapping   )PretrainedConfig)
OnnxConfig)loggingc                   X   ^  \ rS rSrSrSr                  SU 4S jjrSrU =r$ )ErnieConfig   a  
This is the configuration class to store the configuration of a [`ErnieModel`] or a [`TFErnieModel`]. It is used to
instantiate a ERNIE model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the ERNIE
[nghuyong/ernie-3.0-base-zh](https://huggingface.co/nghuyong/ernie-3.0-base-zh) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.


Args:
    vocab_size (`int`, *optional*, defaults to 30522):
        Vocabulary size of the ERNIE model. Defines the number of different tokens that can be represented by the
        `inputs_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
    hidden_size (`int`, *optional*, defaults to 768):
        Dimensionality of the encoder layers and the pooler layer.
    num_hidden_layers (`int`, *optional*, defaults to 12):
        Number of hidden layers in the Transformer encoder.
    num_attention_heads (`int`, *optional*, defaults to 12):
        Number of attention heads for each attention layer in the Transformer encoder.
    intermediate_size (`int`, *optional*, defaults to 3072):
        Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
    hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
        The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
        `"relu"`, `"silu"` and `"gelu_new"` are supported.
    hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
        The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
    attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
        The dropout ratio for the attention probabilities.
    max_position_embeddings (`int`, *optional*, defaults to 512):
        The maximum sequence length that this model might ever be used with. Typically set this to something large
        just in case (e.g., 512 or 1024 or 2048).
    type_vocab_size (`int`, *optional*, defaults to 2):
        The vocabulary size of the `token_type_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`].
    task_type_vocab_size (`int`, *optional*, defaults to 3):
        The vocabulary size of the `task_type_ids` for ERNIE2.0/ERNIE3.0 model
    use_task_id (`bool`, *optional*, defaults to `False`):
        Whether or not the model support `task_type_ids`
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    layer_norm_eps (`float`, *optional*, defaults to 1e-12):
        The epsilon used by the layer normalization layers.
    pad_token_id (`int`, *optional*, defaults to 0):
        Padding token id.
    position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
        Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
        positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
        [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
        For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
        with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
    use_cache (`bool`, *optional*, defaults to `True`):
        Whether or not the model should return the last key/values attentions (not used by all models). Only
        relevant if `config.is_decoder=True`.
    classifier_dropout (`float`, *optional*):
        The dropout ratio for the classification head.

Examples:

```python
>>> from transformers import ErnieConfig, ErnieModel

>>> # Initializing a ERNIE nghuyong/ernie-3.0-base-zh style configuration
>>> configuration = ErnieConfig()

>>> # Initializing a model (with random weights) from the nghuyong/ernie-3.0-base-zh style configuration
>>> model = ErnieModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```erniec                    > [         TU ]  " SSU0UD6  Xl        X l        X0l        X@l        X`l        XPl        Xpl        Xl	        Xl
        Xl        Xl        Xl        Xl        Xl        UU l        UU l        UU l        g )Npad_token_id )super__init__
vocab_sizehidden_sizenum_hidden_layersnum_attention_heads
hidden_actintermediate_sizehidden_dropout_probattention_probs_dropout_probmax_position_embeddingstype_vocab_sizetask_type_vocab_sizeuse_task_idinitializer_rangelayer_norm_epsposition_embedding_type	use_cacheclassifier_dropout)selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   kwargs	__class__s                       e/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/ernie/configuration_ernie.pyr   ErnieConfig.__init__g   s    , 	=l=f=$&!2#6 $!2#6 ,H)'>$.$8!&!2,'>$""4    )r   r"   r   r   r   r   r   r   r   r   r   r    r   r   r!   r   r   )i:w  i      r)   i   gelu皙?r+   i      r   Fg{Gz?g-q=r   absoluteTN)	__name__
__module____qualname____firstlineno____doc__
model_typer   __static_attributes____classcell__)r%   s   @r&   r
   r
      sS    EN J %( # *'(5 (5r(   r
   c                   @    \ rS rSr\S\\\\\4   4   4S j5       rSr	g)ErnieOnnxConfig   returnc                 h    U R                   S:X  a  SSSS.nOSSS.n[        SU4SU4S	U4S
U4/5      $ )Nzmultiple-choicebatchchoicesequence)r      r,   )r   r>   	input_idsattention_masktoken_type_idstask_type_ids)taskr   )r#   dynamic_axiss     r&   inputsErnieOnnxConfig.inputs   sW    99))&8
CL&:6Ll+!<0!<0 ,/	
 	
r(   r   N)
r.   r/   r0   r1   propertyr   strintrE   r4   r   r(   r&   r7   r7      s.    
WS#X%6 67 
 
r(   r7   N)r2   collectionsr   typingr   configuration_utilsr   onnxr   utilsr   
get_loggerr.   loggerr
   r7   __all__r   r(   r&   <module>rR      sT       #  3   
		H	%r5" r5j
j 
" +
,r(   