
    fThT!                         S r SSKJr  SSKJr  SSKJr  SSKJr  SSK	J
r
  \
R                  " \5      r " S S	\5      r " S
 S\5      rS	/rg)zPLBART model configuration    OrderedDict)Mapping   )PretrainedConfig)OnnxConfigWithPast)loggingc                   t   ^  \ rS rSrSrSrS/rSSS.r                        S
U 4S jjrS	r	U =r
$ )PLBartConfig   a  
This is the configuration class to store the configuration of a [`PLBartModel`]. It is used to instantiate an
PLBART model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the PLBART
[uclanlp/plbart-base](https://huggingface.co/uclanlp/plbart-base) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.


Args:
    vocab_size (`int`, *optional*, defaults to 50005):
        Vocabulary size of the PLBART model. Defines the number of different tokens that can be represented by the
        `inputs_ids` passed when calling [`PLBartModel`].
    d_model (`int`, *optional*, defaults to 768):
        Dimensionality of the layers and the pooler layer.
    encoder_layers (`int`, *optional*, defaults to 6):
        Number of encoder layers.
    decoder_layers (`int`, *optional*, defaults to 6):
        Number of decoder layers.
    encoder_attention_heads (`int`, *optional*, defaults to 12):
        Number of attention heads for each attention layer in the Transformer encoder.
    decoder_attention_heads (`int`, *optional*, defaults to 12):
        Number of attention heads for each attention layer in the Transformer decoder.
    decoder_ffn_dim (`int`, *optional*, defaults to 3072):
        Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
    encoder_ffn_dim (`int`, *optional*, defaults to 3072):
        Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
    activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
        The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
        `"relu"`, `"silu"` and `"gelu_new"` are supported.
    dropout (`float`, *optional*, defaults to 0.1):
        The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
    attention_dropout (`float`, *optional*, defaults to 0.1):
        The dropout ratio for the attention probabilities.
    activation_dropout (`float`, *optional*, defaults to 0.0):
        The dropout ratio for activations inside the fully connected layer.
    classifier_dropout (`float`, *optional*, defaults to 0.0):
        The dropout ratio for classifier.
    max_position_embeddings (`int`, *optional*, defaults to 1024):
        The maximum sequence length that this model might ever be used with. Typically set this to something large
        just in case (e.g., 512 or 1024 or 2048).
    init_std (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    encoder_layerdrop (`float`, *optional*, defaults to 0.0):
        The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
        for more details.
    decoder_layerdrop (`float`, *optional*, defaults to 0.0):
        The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
        for more details.
    scale_embedding (`bool`, *optional*, defaults to `True`):
        Scale embeddings by diving by sqrt(d_model).
    use_cache (`bool`, *optional*, defaults to `True`):
        Whether or not the model should return the last key/values attentions (not used by all models)
    forced_eos_token_id (`int`, *optional*, defaults to 2):
        The id of the token to force as the last generated token when `max_length` is reached. Usually set to
        `eos_token_id`.

Example:

```python
>>> from transformers import PLBartConfig, PLBartModel

>>> # Initializing a PLBART uclanlp/plbart-base style configuration
>>> configuration = PLBartConfig()

>>> # Initializing a model (with random weights) from the uclanlp/plbart-base style configuration
>>> model = PLBartModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```plbartpast_key_valuesencoder_attention_headsd_model)num_attention_headshidden_sizec           	      ,  > Xl         X l        Xl        X@l        X0l        XPl        Xpl        X`l        Xl        Xl	        UU l
        UU l        Xl        UU l        Xl        Xl        UU l        Xl        X0l        UU l        [(        TU ]T  " SUUUUUS.UD6  g )N)pad_token_idbos_token_ideos_token_idis_encoder_decoderforced_eos_token_id )
vocab_sizemax_position_embeddingsr   encoder_ffn_dimencoder_layersr   decoder_ffn_dimdecoder_layersdecoder_attention_headsdropoutattention_dropoutactivation_dropoutactivation_functioninit_stdencoder_layerdropdecoder_layerdropclassifier_dropout	use_cachenum_hidden_layersscale_embeddingsuper__init__)selfr   r   r   r   r   r   r   r    r&   r'   r)   r   r$   r   r!   r"   r#   r%   r(   r+   r   r   r   r   kwargs	__class__s                             g/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/plbart/configuration_plbart.pyr-   PLBartConfig.__init__j   s    8 %'>$.,'>$.,'>$!2"4#6  !2!2"4"!/. 	
%%%1 3	
 	
    )r#   r$   r"   r(   r   r    r   r'   r   r!   r   r   r&   r   r%   r   r*   r+   r)   r   )iU  i            r4   r5   r6           r7   TTgelui   皙?r9   r7   g{Gz?r7   T   r      r;   )__name__
__module____qualname____firstlineno____doc__
model_typekeys_to_ignore_at_inferenceattribute_mapr-   __static_attributes____classcell__)r0   s   @r1   r   r      sy    GR J#4"5,EV_`M  $ " ""37
 7
r3   r   c                   l    \ rS rSr\S\\\\\4   4   4S j5       r\S\\\\\4   4   4S j5       r	Sr
g)PLBartOnnxConfig   returnc                 0    [        SSSS.4SSSS.4/5      $ )N	input_idsbatchsequencer   r:   attention_maskr   r.   s    r1   inputsPLBartOnnxConfig.inputs   s.    'j9:!w:#>?
 	
r3   c                     U R                   (       a  [        SSSS.4SSSS.4SSSS.4/5      $ [        SSSS.4SSSS.4/5      $ )Nlast_hidden_staterL   rM   rN   	past_keys)r   r;   encoder_last_hidden_state)use_pastr   rP   s    r1   outputsPLBartOnnxConfig.outputs   sp    ==(g**EF g*"=>0g*2MN  (g**EF0g*2MN r3   r   N)r<   r=   r>   r?   propertyr   strintrQ   rX   rD   r   r3   r1   rG   rG      s\    
WS#X%6 67 
 
 gc3h&7!78  r3   rG   N)r@   collectionsr   typingr   configuration_utilsr   onnxr   utilsr	   
get_loggerr<   loggerr   rG   __all__r   r3   r1   <module>re      sR    ! #  3 &  
		H	%E
# E
P) : 
r3   