
    fTh/                         S r SSKJr  SSKJrJrJrJr  SSKJ	r	J
r
Jr  SSKJr  SSKJrJr  SSKJr  \R&                  " \5      r " S	 S
\5      r " S S\5      rS
S/rg)zOpenAI GPT-2 configuration    )OrderedDict)AnyListMappingOptional   )PreTrainedTokenizer
TensorTypeis_torch_available)PretrainedConfig)OnnxConfigWithPastPatchingSpec)loggingc                   v   ^  \ rS rSrSrSrS/rSSSSS	.r                       SU 4S
 jjrSr	U =r
$ )
GPT2Config   a  
This is the configuration class to store the configuration of a [`GPT2Model`] or a [`TFGPT2Model`]. It is used to
instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the GPT-2
[openai-community/gpt2](https://huggingface.co/openai-community/gpt2) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.


Args:
    vocab_size (`int`, *optional*, defaults to 50257):
        Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
        `inputs_ids` passed when calling [`GPT2Model`] or [`TFGPT2Model`].
    n_positions (`int`, *optional*, defaults to 1024):
        The maximum sequence length that this model might ever be used with. Typically set this to something large
        just in case (e.g., 512 or 1024 or 2048).
    n_embd (`int`, *optional*, defaults to 768):
        Dimensionality of the embeddings and hidden states.
    n_layer (`int`, *optional*, defaults to 12):
        Number of hidden layers in the Transformer encoder.
    n_head (`int`, *optional*, defaults to 12):
        Number of attention heads for each attention layer in the Transformer encoder.
    n_inner (`int`, *optional*):
        Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
    activation_function (`str`, *optional*, defaults to `"gelu_new"`):
        Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
    resid_pdrop (`float`, *optional*, defaults to 0.1):
        The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
    embd_pdrop (`float`, *optional*, defaults to 0.1):
        The dropout ratio for the embeddings.
    attn_pdrop (`float`, *optional*, defaults to 0.1):
        The dropout ratio for the attention.
    layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
        The epsilon to use in the layer normalization layers.
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    summary_type (`string`, *optional*, defaults to `"cls_index"`):
        Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
        [`TFGPT2DoubleHeadsModel`].

        Has to be one of the following options:

            - `"last"`: Take the last token hidden state (like XLNet).
            - `"first"`: Take the first token hidden state (like BERT).
            - `"mean"`: Take the mean of all tokens hidden states.
            - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
            - `"attn"`: Not implemented now, use multi-head attention.
    summary_use_proj (`bool`, *optional*, defaults to `True`):
        Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
        [`TFGPT2DoubleHeadsModel`].

        Whether or not to add a projection after the vector extraction.
    summary_activation (`str`, *optional*):
        Argument used when doing sequence summary. Used in for the multiple choice head in
        [`GPT2DoubleHeadsModel`].

        Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
    summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
        Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
        [`TFGPT2DoubleHeadsModel`].

        Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
    summary_first_dropout (`float`, *optional*, defaults to 0.1):
        Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
        [`TFGPT2DoubleHeadsModel`].

        The dropout ratio to be used after the projection and activation.
    scale_attn_weights (`bool`, *optional*, defaults to `True`):
        Scale attention weights by dividing by sqrt(hidden_size)..
    use_cache (`bool`, *optional*, defaults to `True`):
        Whether or not the model should return the last key/values attentions (not used by all models).
    bos_token_id (`int`, *optional*, defaults to 50256):
        Id of the beginning of sentence token in the vocabulary.
    eos_token_id (`int`, *optional*, defaults to 50256):
        Id of the end of sentence token in the vocabulary.
    scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
        Whether to additionally scale attention weights by `1 / layer_idx + 1`.
    reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
        Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
        dot-product/softmax to float() when training with mixed precision.

Example:

```python
>>> from transformers import GPT2Config, GPT2Model

>>> # Initializing a GPT2 configuration
>>> configuration = GPT2Config()

>>> # Initializing a model (with random weights) from the configuration
>>> model = GPT2Model(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```gpt2past_key_valuesn_embdn_positionsn_headn_layer)hidden_sizemax_position_embeddingsnum_attention_headsnum_hidden_layersc                 P  > Xl         X l        X0l        X@l        XPl        X`l        Xpl        Xl        Xl        Xl	        Xl
        Xl        Xl        Xl        Xl        UU l        UU l        UU l        UU l        UU l        UU l        UU l        UU l        [.        TU ]`  " SUUS.UD6  g )N)bos_token_ideos_token_id )
vocab_sizer   r   r   r   n_inneractivation_functionresid_pdrop
embd_pdrop
attn_pdroplayer_norm_epsiloninitializer_rangesummary_typesummary_use_projsummary_activationsummary_first_dropoutsummary_proj_to_labelsscale_attn_weights	use_cachescale_attn_by_inverse_layer_idxreorder_and_upcast_attnr   r   super__init__)selfr!   r   r   r   r   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r-   r,   r.   r/   r   r   r0   r1   kwargs	__class__s                            c/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/gpt2/configuration_gpt2.pyr3   GPT2Config.__init__   s    6 %&#6 &$$"4!2( 0"4%:"&<#"4"/N,'>$((XlXQWX    )r#   r&   r   r%   r   r(   r'   r   r   r"   r   r   r1   r$   r0   r.   r+   r,   r-   r)   r*   r/   r!   )iQ  i   i      r:   Ngelu_new皙?r<   r<   gh㈵>g{Gz?	cls_indexTNTr<   TTP  r>   FF)__name__
__module____qualname____firstlineno____doc__
model_typekeys_to_ignore_at_inferenceattribute_mapr3   __static_attributes____classcell__r6   s   @r7   r   r      s    _B J#4"5#0'&	M & #!(- %14Y 4Yr9   r   c                   
  ^  \ rS rSr   SS\S\S\\\      S\	4U 4S jjjr
\S\\\\\4   4   4S j5       r\S\4S	 j5       r\S\4S
 j5       r    SS\S\S\S\	S\\   S\\\4   4U 4S jjjr\S\4S j5       rSrU =r$ )GPT2OnnxConfig   configtaskpatching_specsuse_pastc                 ~   > [         TU ]  XX4S9  [        U R                  SS 5      (       d  SU R                  l        g g )N)rN   rO   rP   pad_token_idr   )r2   r3   getattr_configrR   )r4   rM   rN   rO   rP   r6   s        r7   r3   GPT2OnnxConfig.__init__   s;     	>]t||^T::()DLL% ;r9   returnc                     [        SSSS.05      nU R                  (       a  U R                  USS9  SSS.US'   U$ SSS.US'   U$ )	N	input_idsbatchsequence)r      inputs)	directionzpast_sequence + sequenceattention_mask)r   rP   fill_with_past_key_values_)r4   common_inputss     r7   r\   GPT2OnnxConfig.inputs   sa    #[g*2M$NO==++MX+N29>X.YM*+  3:j.IM*+r9   c                 .    U R                   R                  $ N)rT   r   r4   s    r7   
num_layersGPT2OnnxConfig.num_layers   s    ||###r9   c                 .    U R                   R                  $ rc   )rT   r   rd   s    r7   r   "GPT2OnnxConfig.num_attention_heads   s    ||"""r9   	tokenizer
batch_size
seq_lengthis_pair	frameworkc           	      r  > [         [        U ]  XX4US9n[        SUS   05      nU R                  (       a  [        5       (       d  [        S5      eSS KnUS   R                  u  pU
S-   nU	U R                  UU R                  R                  U R                  -  4n[        U R                  5       Vs/ s H$  oR                  U5      UR                  U5      4PM&     snUS'   US   US'   U R                  (       a6  US   R                  nWR!                  US   UR#                  W	WUS9/S	S
9US'   U$ s  snf )N)rj   rk   rl   rm   rX   zACannot generate dummy past_keys inputs without PyTorch installed.r      r   r^   )dtyper[   )dim)r2   r   generate_dummy_inputsr   rP   r   
ValueErrortorchshaper   rT   r   rangere   zerosrp   catones)r4   ri   rj   rk   rl   rm   r`   ordered_inputsrt   rY   seqlenpast_key_values_length
past_shape_
mask_dtyper6   s                  r7   rr   $GPT2OnnxConfig.generate_dummy_inputs   s^    0$M`i N 

 %k=3M%NO ==%'' !dee -k : @ @)/!&,,*LL,,0H0HH	
 QVVZVeVePf5Pf1[[,ekk*.EFPf501 ,99I+J'(=='(89??J/4yy 015::eE[cm:3nouv 09 0N+, 5s   2+D4c                     g)N   r    rd   s    r7   default_onnx_opset!GPT2OnnxConfig.default_onnx_opset  s    r9   r    )defaultNF)r   FN)r?   r@   rA   rB   r   strr   r   r   boolr3   propertyr   intr\   re   r   r	   r
   r   rr   r   rG   rH   rI   s   @r7   rK   rK      s(    7;
* 
* 
* !l!34	
*
 
* 
* WS#X%6 67   $C $ $ #S # # *.*&* * 	*
 * J'* 
c	* *X C  r9   rK   N)rC   collectionsr   typingr   r   r   r    r	   r
   r   configuration_utilsr   onnxr   r   utilsr   
get_loggerr?   loggerr   rK   __all__r    r9   r7   <module>r      sc     ! # / / C C 3 4  
		H	%_Y! _YDN' Nb )
*r9   