
    fTh/)                         S r SSKJrJrJr  \(       a   SSKJr  SSKJr  \R                  " \
5      r " S S\5      r " S S	\5      rS	/rg
)zMpt configuration    )TYPE_CHECKINGOptionalUnion   )PretrainedConfig)loggingc                   H   ^  \ rS rSrSrSr          SU 4S jjrSrU =r$ )MptAttentionConfig   a
  
This is the configuration class to store the configuration of a [`MptAttention`] class. It is used to instantiate
attention layers according to the specified arguments, defining the layers architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MPT
[mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b) architecture. Most of the arguments are kept for backward
compatibility with previous MPT models that are hosted on the Hub (previously with `trust_remote_code=True`).

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.

Args:
    attn_type (`str`, *optional*, defaults to `"multihead_attention"`):
        type of attention to use. Options: `"multihead_attention"`, `"multiquery_attention"`.
    attn_pdrop (`float`, *optional*, defaults to `0.0`):
        The dropout probability for the attention layers.
    attn_impl (`str`, *optional*, defaults to `"torch"`):
        The attention implementation to use. One of `"torch"`, `"flash"`, or `"triton"`.
    clip_qkv (`float`, *optional*):
        If not `None`, clip the queries, keys, and values in the attention layer to this value.
    softmax_scale (`float`, *optional*):
        If not `None`, scale the softmax in the attention layer by this value. If `None`, will default to
        `1/sqrt(hidden_size)`.
    prefix_lm (`bool`, *optional*, defaults to `False`):
        Whether the model should operate as a Prefix LM. This requires passing an extra `prefix_mask` argument
        which indicates which tokens belong to the prefix. Tokens in the prefix can attend to one another
        bi-directionally. Tokens outside the prefix use causal attention.
    qk_ln (`bool`, *optional*, defaults to `False`):
        Whether to apply layer normalization to the queries and keys in the attention layer.
    attn_uses_sequence_id (`bool`, *optional*, defaults to `False`):
        Whether to restrict attention to tokens that have the same token_type_ids. When the model is in `train`
        mode, this requires passing an extra *token_type_ids* argument which indicates which sub-sequence each
        token belongs to. Defaults to `False` meaning any provided *token_type_ids* will be ignored.
    alibi (`bool`, *optional*, defaults to `True`):
        Whether or not to use the alibi bias instead of positional embedding.
    alibi_bias_max (`int`, *optional*, defaults to 8):
        The maximum value of the alibi bias.
attn_configc                    > [         TU ]  5         Xl        X l        X0l        X@l        XPl        X`l        Xl        Xl	        Xpl
        Xl        US;  a  [        SU 35      eg )N)multihead_attentionmultiquery_attentionzX`attn_type` has to be either `multihead_attention` or `multiquery_attention`. Received: )super__init__	attn_type
attn_pdrop	attn_implclip_qkvsoftmax_scale	prefix_lmattn_uses_sequence_idalibiqk_lnalibi_bias_max
ValueError)selfr   r   r   r   r   r   r   r   r   r   kwargs	__class__s               a/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/mpt/configuration_mpt.pyr   MptAttentionConfig.__init__G   sm     	"$" *"%:"

,KKjktjuv  L    )
r   r   r   r   r   r   r   r   r   r   )
r   r   torchNNFFFT   )	__name__
__module____qualname____firstlineno____doc__base_config_keyr   __static_attributes____classcell__r   s   @r    r
   r
      s:    $L $O (# r"   r
   c            %          ^  \ rS rSrSrSrS\0rSSSS.r                   SS\	S\	S\	S	\	S
\	S\	S\
S\
S\
S\S\S\S\\\
\4      S\S\	S\
S\S\4$U 4S jjjrSrU =r$ )	MptConfigg   aC  
This is the configuration class to store the configuration of a [`MptModel`]. It is used to instantiate a Mpt model
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to the Mpt-7b architecture
[mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b).

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.


Args:
    d_model (`int`, *optional*, defaults to 2048):
        Dimensionality of the embeddings and hidden states.
    n_heads (`int`, *optional*, defaults to 16):
        Number of attention heads for each attention layer in the Transformer encoder.
    n_layers (`int`, *optional*, defaults to 24):
        Number of hidden layers in the Transformer encoder.
    expansion_ratio (`int`, *optional*, defaults to 4):
        The ratio of the up/down scale in the MLP.
    max_seq_len (`int`, *optional*, defaults to 2048):
        The maximum sequence length of the model.
    vocab_size (`int`, *optional*, defaults to 50368):
        Vocabulary size of the Mpt model. Defines the maximum number of different tokens that can be represented by
        the `inputs_ids` passed when calling [`MptModel`]. Check [this
        discussion](https://huggingface.co/bigscience/mpt/discussions/120#633d28389addb8530b406c2a) on how the
        `vocab_size` has been defined.
    resid_pdrop (`float`, *optional*, defaults to 0.0):
        The dropout probability applied to the attention output before combining with residual.
    layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
        The epsilon to use in the layer normalization layers.
    emb_pdrop (`float`, *optional*, defaults to 0.0):
        The dropout probability for the embedding layer.
    learned_pos_emb (`bool`, *optional*, defaults to `True`):
        Whether to use learned positional embeddings.
    attn_config (`dict`, *optional*):
        A dictionary used to configure the model's attention module.
    init_device (`str`, *optional*, defaults to `"cpu"`):
        The device to use for parameter initialization. Defined for backward compatibility
    logit_scale (`float`, *optional*):
        If not None, scale the logits by this value.
    no_bias (`bool`, *optional*, defaults to `True`):
        Whether to use bias in all linear layers.
    verbose (`int`, *optional*, defaults to 0):
        The verbosity level to use for logging. Used in the previous versions of MPT models for logging. This
        argument is deprecated.
    embedding_fraction (`float`, *optional*, defaults to 1.0):
        The fraction to scale the gradients of the embedding layer by.
    norm_type (`str`, *optional*, defaults to `"low_precision_layernorm"`):
        Type of layer norm to use. All MPT models uses the same layer norm implementation. Defined for backward
        compatibility.
    use_cache (`bool`, *optional*, defaults to `False`):
        Whether or not the model should return the last key/values attentions (not used by all models).
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.

Example:

```python
>>> from transformers import MptConfig, MptModel

>>> # Initializing a Mpt configuration
>>> configuration = MptConfig()

>>> # Initializing a model (with random weights) from the configuration
>>> model = MptModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```
mptr   n_headsd_modeln_layers)num_attention_headshidden_sizenum_hidden_layersexpansion_ratiomax_seq_len
vocab_sizeresid_pdroplayer_norm_epsilon	emb_pdroplearned_pos_embinit_devicelogit_scaleno_biasverboseembedding_fraction	norm_type	use_cachec                   > Uc  [        5       U l        O,[        U[        5      (       a  [        S0 UD6U l        OXl        Xl        X l        X0l        X@l        XPl        X`l	        Xpl
        Xl        Xl        Xl        Xl        Xl        Xl        UU l        UU l        Xl        UU l        UU l        [,        TU ]\  " S0 UD6  g )N )r
   r   
isinstancedictr3   r2   r4   r8   r9   r:   r;   r=   r>   r?   r@   rA   rB   rC   rD   r<   rE   initializer_ranger   r   )r   r3   r2   r4   r8   r9   r:   r;   r<   r=   r>   r   r?   r@   rA   rB   rC   rD   rE   rJ   r   r   s                        r    r   MptConfig.__init__   s    . 13DT**1@K@D* .&$&".&&"4""4"!2"6"r"   )r   r3   r=   rC   r8   r?   rJ   r<   r>   r@   r9   r2   r4   rA   rD   r;   rE   rB   r:   )            rL   i          gh㈵>rP   TNcpuNTr   g      ?low_precision_layernormFg{Gz?)r%   r&   r'   r(   r)   
model_typer
   sub_configsattribute_mapintfloatboolstrr   r   r   r+   r,   r-   s   @r    r/   r/   g   s1   EN J "45K( 'M   $( $*. 37$'2)/#/# /# 	/#
 /# /# /# /# "/# /# /# (/# /# eE3J/0/# /#  !/#" "#/#$ %/#& '/# /#r"   r/   N)r)   typingr   r   r   configuration_utilsr   utilsr   
get_loggerr%   loggerr
   r/   __all__rG   r"   r    <module>r`      sY     1 1  3  
		H	%F) FR#  #D -r"   