o
    Zh1                     @   s&   d dl mZ G dd deZdgZdS )   )PretrainedConfigc                       st   e Zd ZdZdZdgZ								
																													d fdd	Z  ZS )Zamba2Configam  
    This is the configuration class to store the configuration of a [`Zamba2Model`]. It is used to instantiate a
    Zamba2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the Zamba2 model.

    [Zyphra/Zamba2-2.7B](https://huggingface.co/Zyphra/Zamba2-2.7B)

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.
    Args:
        vocab_size (`int`, *optional*, defaults to 32000):
            Vocabulary size of the Zamba2 model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`Zamba2Model`]
        max_position_embeddings (`int`, *optional*, defaults to 4096):
            The maximum sequence length that this model might ever be used with.
        hidden_size (`int`, *optional*, defaults to 2560):
            Dimension of the hidden representations.
        num_hidden_layers (`int`, *optional*, defaults to 54):
            Number of hidden layers in the model.
        layers_block_type (`list`, *optional*):
            List of layer types, which can be either "mamba" or "hybrid".
        mamba_d_state (`int`, *optional*, defaults to 64): shape of the state space latents.
        mamba_d_conv (`int`, *optional*, defaults to 4): Size of the convolution kernel.
        mamba_expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size.
        mamba_ngroups (`int`, *optional*, defaults to 1):
            Number of groups for the evolution matrices of mamba 2.
        time_step_min (`float`, *optional*, defaults to 0.001):
            Minimum `time_step` used to bound `dt_proj.bias`.
        time_step_max (`float`, *optional*, defaults to 0.1):
            Maximum `time_step` used to bound `dt_proj.bias`.
        time_step_floor (`float`, *optional*, defaults to 0.0001):
            Minimum clamping value of the `dt_proj.bias` layer initialization.
        time_step_limit (`tuple`, *optional*):
            Accepted range of time step values.
        n_mamba_heads (`int`, *optional*, defaults to 8):
            Number of heads for the evolution matrices of mamba 2.
        use_conv_bias (`bool`, *optional*, defaults to `True`):
            Whether or not to use bias in the convolution layer of the mixer block.
        chunk_size (`int`, *optional*, defaults to 256):
            Size of the chunks that will comprise the sequence.
        use_mem_eff_path (`bool`, *optional*, defaults to `False`):
            Whether or not to use the fused conv1d and scan in mamba2 layers.
        add_bias_linear (`bool`, *optional*, defaults to `False`):
            Flag indicating whether or not to use bias in various layers
        intermediate_size (`int`, *optional*, defaults to 4 * hidden_size):
            Dimension of the MLP representations.
        hidden_act (`str`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the MLP.
        num_attention_heads (`int`, *optional*, defaults to 32):
            Number of attention heads for each attention layer in the Transformer decoder.
        num_key_value_heads (`int`, *optional*):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
            `num_key_value_heads=None`, the model will use Multi Head Attention (MHA), if
            `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
            by meanpooling all the original heads within that group. For more details checkout [this
            paper](https://arxiv.org/pdf/2305.13245.pdf).
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        num_mem_blocks (`int`, *optional*, defaults to 1):
            Number of unshared transformer blocks.
        use_shared_attention_adapter (`bool`, *optional*, defaults to `False`):
            If True, unshared adapters (formally the same as LoRA but used in the base model) will be added to the q, k, v projectors in the shared attention layers.
        adapter_rank (`int`, *optional*, defaults to 128):
            Rank of the adapter in the shared MLP and shared attention layers.
        use_mem_rope (`bool`, *optional*, defaults to `False`):
            If True, includes RoPE in the shared attention layers.
        rope_theta (`float`, *optional*, defaults to `10000.0`):
            The base period of the RoPE embeddings.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        rms_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the rms normalization layers.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`.
        num_logits_to_keep (`int` or `None`, *optional*, defaults to 1):
            Number of prompt logits to calculate during generation. If `None`, all logits will be calculated. If an
            integer value, only last `num_logits_to_keep` logits will be calculated. Default is 1 because only the
            logits of the last prompt token are needed for generation. For long sequences, the logits for the entire
            sequence may use a lot of memory so, setting `num_logits_to_keep=1` will reduce memory footprint
            significantly.
        pad_token_id (`int`, *optional*, defaults to 0):
            The id of the padding token.
        bos_token_id (`int`, *optional*, defaults to 1):
            The id of the "beginning-of-sequence" token.
        eos_token_id (`int`, *optional*, defaults to 2):
            The id of the "end-of-sequence" token.
        use_long_context (`bool`, *optional*, defaults to `False`):
            Activates the context-extended version of Zamba by modifying RoPE.
    ```python
    >>> from transformers import Zamba2Model, Zamba2Config
    >>> # Initializing a Zamba2-2.7B style configuration
    >>> configuration = Zamba2Config()
    >>> # Initializing a model from the Zamba2-2.7B style configuration
    >>> model = Zamba2Model(configuration)
    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Zzamba2Zpast_key_values }      
  6   N@            MbP?皙?-C6?   T   Fgelu               '  {Gz?h㈵>    c%           '         s  t  jd|!|"|#d|% || _|| _|| _|d u r d| | _n|| _|| _|| _|| _|| _	d| | _
d| j | j | _|| _|| _|$| _|rX|$rXd}&||&| j| jd    }|| _|| _|| _|| _|| _|	| _|| _t|| | | _|| _|| _|| _|| _|| _|
| _|| _|| _|$rd| _|d u r|}|| _ || _| j| j | _!| j| _"|d u rdgdgd dg d	  dgd  dg dgd
  dg dgd  | _#n|| _#|| _$|| _%|| _&| | _'dd t(| j#D | _)|| _*d S )N)pad_token_idbos_token_ideos_token_idr	   r
   r   i @  Zmamba   hybrid   r   c                 S   s   g | ]
\}}|d kr|qS )r    ).0indextyper   r   ^/var/www/auris/lib/python3.10/site-packages/transformers/models/zamba2/configuration_zamba2.py
<listcomp>   s    z)Zamba2Config.__init__.<locals>.<listcomp>r   )+super__init__
vocab_sizemax_position_embeddingshidden_sizeintermediate_size
hidden_actnum_hidden_layersnum_attention_headsnum_mem_blocksZattention_hidden_sizeZattention_head_dimattention_dropoutuse_mem_ropeuse_long_context
rope_thetamamba_d_statemamba_d_convmamba_expandadd_bias_linearmamba_ngroupsn_mamba_headsintZmamba_headdimuse_conv_bias
chunk_sizetime_step_limituse_shared_attention_adapteradapter_ranktime_step_mintime_step_maxtime_step_floornum_key_value_headsZkv_channelsZnum_query_groupslayers_block_typeinitializer_rangerms_norm_eps	use_cachenum_logits_to_keep	enumerateZhybrid_layer_idsuse_mem_eff_path)'selfr'   r(   r)   r,   rC   r3   r4   r5   r7   r?   r@   rA   r<   r8   r:   r;   rI   r6   r*   r+   r-   rB   r/   r.   r=   r>   r0   r2   rD   rE   rF   rG   r   r   r   r1   kwargsa	__class__r   r#   r&      s   (


zZamba2Config.__init__)$r   r   r   r   Nr   r	   r
   r   r   r   r   Nr   Tr   FFNr   r   Nr   r   Fr   Fr   r   r   Tr   r   r   r
   F)__name__
__module____qualname____doc__Z
model_typeZkeys_to_ignore_at_inferencer&   __classcell__r   r   rM   r#   r      sR    dr   N)Zconfiguration_utilsr   r   __all__r   r   r   r#   <module>   s    
W