
    fTh*                     h    S r SSKrSSKJr  SSKJr  \R                  " \5      r " S S\5      r	S/r
g)zXLNet configuration    N   )PretrainedConfig)loggingc                      ^  \ rS rSrSrSrS/rSSSSS	.r                           SU 4S
 jjr\	S 5       r
\
R                  S 5       r
SrU =r$ )XLNetConfig   a0  
This is the configuration class to store the configuration of a [`XLNetModel`] or a [`TFXLNetModel`]. It is used to
instantiate a XLNet model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the
[xlnet/xlnet-large-cased](https://huggingface.co/xlnet/xlnet-large-cased) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.

Args:
    vocab_size (`int`, *optional*, defaults to 32000):
        Vocabulary size of the XLNet model. Defines the number of different tokens that can be represented by the
        `inputs_ids` passed when calling [`XLNetModel`] or [`TFXLNetModel`].
    d_model (`int`, *optional*, defaults to 1024):
        Dimensionality of the encoder layers and the pooler layer.
    n_layer (`int`, *optional*, defaults to 24):
        Number of hidden layers in the Transformer encoder.
    n_head (`int`, *optional*, defaults to 16):
        Number of attention heads for each attention layer in the Transformer encoder.
    d_inner (`int`, *optional*, defaults to 4096):
        Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
    ff_activation (`str` or `Callable`, *optional*, defaults to `"gelu"`):
        The non-linear activation function (function or string) in the If string, `"gelu"`, `"relu"`, `"silu"` and
        `"gelu_new"` are supported.
    untie_r (`bool`, *optional*, defaults to `True`):
        Whether or not to untie relative position biases
    attn_type (`str`, *optional*, defaults to `"bi"`):
        The attention type used by the model. Set `"bi"` for XLNet, `"uni"` for Transformer-XL.
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    layer_norm_eps (`float`, *optional*, defaults to 1e-12):
        The epsilon used by the layer normalization layers.
    dropout (`float`, *optional*, defaults to 0.1):
        The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
    mem_len (`int` or `None`, *optional*):
        The number of tokens to cache. The key/value pairs that have already been pre-computed in a previous
        forward pass won't be re-computed. See the
        [quickstart](https://huggingface.co/transformers/quickstart.html#using-the-past) for more information.
    reuse_len (`int`, *optional*):
        The number of tokens in the current batch to be cached and reused in the future.
    bi_data (`bool`, *optional*, defaults to `False`):
        Whether or not to use bidirectional input pipeline. Usually set to `True` during pretraining and `False`
        during finetuning.
    clamp_len (`int`, *optional*, defaults to -1):
        Clamp all relative distances larger than clamp_len. Setting this attribute to -1 means no clamping.
    same_length (`bool`, *optional*, defaults to `False`):
        Whether or not to use the same attention length for each token.
    summary_type (`str`, *optional*, defaults to "last"):
        Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.

        Has to be one of the following options:

            - `"last"`: Take the last token hidden state (like XLNet).
            - `"first"`: Take the first token hidden state (like BERT).
            - `"mean"`: Take the mean of all tokens hidden states.
            - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
            - `"attn"`: Not implemented now, use multi-head attention.
    summary_use_proj (`bool`, *optional*, defaults to `True`):
        Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.

        Whether or not to add a projection after the vector extraction.
    summary_activation (`str`, *optional*):
        Argument used when doing sequence summary. Used in the sequence classification and multiple choice models.

        Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
    summary_proj_to_labels (`boo`, *optional*, defaults to `True`):
        Used in the sequence classification and multiple choice models.

        Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
    summary_last_dropout (`float`, *optional*, defaults to 0.1):
        Used in the sequence classification and multiple choice models.

        The dropout ratio to be used after the projection and activation.
    start_n_top (`int`, *optional*, defaults to 5):
        Used in the SQuAD evaluation script.
    end_n_top (`int`, *optional*, defaults to 5):
        Used in the SQuAD evaluation script.
    use_mems_eval (`bool`, *optional*, defaults to `True`):
        Whether or not the model should make use of the recurrent memory mechanism in evaluation mode.
    use_mems_train (`bool`, *optional*, defaults to `False`):
        Whether or not the model should make use of the recurrent memory mechanism in train mode.

        <Tip>

        For pretraining, it is recommended to set `use_mems_train` to `True`. For fine-tuning, it is recommended to
        set `use_mems_train` to `False` as discussed
        [here](https://github.com/zihangdai/xlnet/issues/41#issuecomment-505102587). If `use_mems_train` is set to
        `True`, one has to make sure that the train batches are correctly pre-processed, *e.g.* `batch_1 = [[This
        line is], [This is the]]` and `batch_2 = [[ the first line], [ second line]]` and that all batches are of
        equal size.

        </Tip>

Examples:

```python
>>> from transformers import XLNetConfig, XLNetModel

>>> # Initializing a XLNet configuration
>>> configuration = XLNetConfig()

>>> # Initializing a model (with random weights) from the configuration
>>> model = XLNetModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```xlnetmems
vocab_sized_modeln_headn_layer)n_tokenhidden_sizenum_attention_headsnum_hidden_layersc                 j  > Xl         X l        X0l        X@l        X$-  S:w  a  [	        SX$-   S35      eSU;   a"  US   X$-  :w  a  [	        SUS    SX$-   S35      eX$-  U l        X`l        XPl        Xpl        Xl	        Xl
        Xl        Xl        Xl        Xl        UU l        UU l        UU l        UU l        UU l        UU l        UU l        UU l        UU l        UU l        UU l        UU l        SU;   a   [6        R8                  " S	[:        5        US   nXl        Xl        [@        TU ]  " SUUUS
.UD6  g)zConstructs XLNetConfig.r   z'd_model % n_head' (z) should be equal to 0d_headz
`d_head` (z*) should be equal to `d_model // n_head` ()	use_cachezlThe `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval` instead.)pad_token_idbos_token_ideos_token_idN )"r   r   r   r   
ValueErrorr   ff_activationd_inneruntie_r	attn_typeinitializer_rangelayer_norm_epsdropoutmem_len	reuse_lenbi_data	clamp_lensame_lengthsummary_typesummary_use_projsummary_activationsummary_last_dropoutstart_n_top	end_n_topr   r   r   warningswarnFutureWarninguse_mems_evaluse_mems_trainsuper__init__)selfr   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r1   r2   r%   r&   r'   r(   r)   r*   r+   r,   r-   r   r   r   kwargs	__class__s                                e/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/xlnet/configuration_xlnet.pyr4   XLNetConfig.__init__   sk   @ %q 3G4D3EE[\]]vh7#44  !1 22\]d]n\oopq  '*"!2,""&( 0"4$8!&"(((& MM
 #;/M*,sl\hslrs    c                 J    [         R                  SU R                   S35        g)N
The model < is one of the few models that has no sequence length limit.)loggerinfo
model_type)r5   s    r8   max_position_embeddings#XLNetConfig.max_position_embeddings   s     j 11mnor:   c                 4    [        SU R                   S35      e)Nr<   r=   )NotImplementedErrorrA   )r5   values     r8   rB   rC      s#     "))ef
 	
r:   )r   r%   r   r&   r   r   r   r"   r-   r   r   r    r!   r#   r   r   r   r$   r'   r,   r*   r+   r(   r)   r   r1   r2   r   )i }  i         i   geluTbig{Gz?g-q=皙?i   NTFFr>   FlastTtanhrK      rN   rN         )__name__
__module____qualname____firstlineno____doc__rA   keys_to_ignore_at_inferenceattribute_mapr4   propertyrB   setter__static_attributes____classcell__)r7   s   @r8   r   r      s    jX J#)( '&	M ! 9Ptd   ##
 $
r:   r   )rU   r.   configuration_utilsr   utilsr   
get_loggerrQ   r?   r   __all__r   r:   r8   <module>r`      s>       3  
		H	%R
" R
j /r:   