o
    Zhg                     @   sV  d dl mZmZmZmZmZ d dlZd dlmZ ddlm	Z	 ddl
mZmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZmZmZ ddlmZmZ ddlmZmZ ddl m!Z! ddl"m#Z#m$Z$m%Z%m&Z&m'Z' ddl(m)Z) e& rd dl*m+Z+ ddl,m-Z- e'.e/Z0G dd dej1Z2G dd dej1Z3G dd dej1Z4dd Z5d@ddZ6dej7de8d ej7fd!d"Z9	#dAd$ej1d%ej7d&ej7d'ej7d(eej7 d)e:d*e:fd+d,Z;G d-d. d.ej1Z<G d/d0 d0eZ=e$G d1d2 d2eZ>e$G d3d4 d4e>Z?G d5d6 d6ee#Z@e$G d7d8 d8e>eZAe$d9d:G d;d< d<e>ZBe$G d=d> d>e>ZCg d?ZDdS )B    )CallableListOptionalTupleUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tupleis_torch_flex_attn_availablelogging   )GemmaConfig)	BlockMask)make_flex_block_causal_maskc                       s@   e Zd Zddedef fddZdd Zdd	 Zd
d Z  Z	S )GemmaRMSNormư>dimepsc                    s&   t    || _tt|| _d S N)super__init__r%   r   	ParametertorchZzerosweight)selfr$   r%   	__class__ W/var/www/auris/lib/python3.10/site-packages/transformers/models/gemma/modeling_gemma.pyr(   8   s   
zGemmaRMSNorm.__init__c                 C   s$   |t |djddd| j  S )N   T)Zkeepdim)r*   Zrsqrtpowmeanr%   )r,   xr/   r/   r0   _norm=   s   $zGemmaRMSNorm._normc                 C   s*   |  | }|d| j   }||S )N      ?)r6   floatr+   Ztype_as)r,   r5   outputr/   r/   r0   forward@   s   
zGemmaRMSNorm.forwardc                 C   s   t | jj d| j S )Nz, eps=)tupler+   shaper%   r,   r/   r/   r0   
extra_reprG   s   zGemmaRMSNorm.extra_repr)r#   )
__name__
__module____qualname__intr8   r(   r6   r:   r>   __classcell__r/   r/   r-   r0   r"   7   s
    r"   c                       s$   e Zd Z fddZdd Z  ZS )GemmaMLPc                    sr   t    || _|j| _|j| _tj| j| jdd| _tj| j| jdd| _tj| j| jdd| _	t
|j | _d S NFbias)r'   r(   confighidden_sizeZintermediate_sizer   Linear	gate_projup_proj	down_projr	   Z
hidden_actact_fnr,   rH   r-   r/   r0   r(   L   s   
zGemmaMLP.__init__c                 C   s$   |  | | || | }|S r&   )rM   rN   rK   rL   )r,   r5   rM   r/   r/   r0   r:   V   s    zGemmaMLP.forward)r?   r@   rA   r(   r:   rC   r/   r/   r-   r0   rD   K   s    
rD   c                       s8   e Zd Zddef fddZe edd Z  Z	S )GemmaRotaryEmbeddingNrH   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r'   r(   hasattrrQ   getrR   Zmax_position_embeddingsZmax_seq_len_cachedZoriginal_max_seq_lenrH   r   Zrope_init_fnattention_scalingZregister_bufferrU   Zoriginal_inv_freq)r,   rH   devicerU   r-   r/   r0   r(   \   s   
zGemmaRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r2   r   ZmpscpuF)device_typeenabledr1   r$   dtype)rU   r8   expandr<   torZ   
isinstancerS   strr*   Zautocast	transposecatcosrY   sinr`   )
r,   r5   position_idsZinv_freq_expandedZposition_ids_expandedr\   ZfreqsZembrg   rh   r/   r/   r0   r:   m   s   0&zGemmaRotaryEmbedding.forwardr&   )
r?   r@   rA   r   r(   r*   Zno_gradr   r:   rC   r/   r/   r-   r0   rP   [   s
    rP   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..Nr2   r1   r^   )r<   r*   rf   )r5   x1Zx2r/   r/   r0   rotate_half}   s   rk   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezerk   )qkrg   rh   ri   Zunsqueeze_dimZq_embedZk_embedr/   r/   r0   apply_rotary_pos_emb   s
   

ro   hidden_statesn_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r<   ra   reshape)rp   rq   batchnum_key_value_headsslenhead_dimr/   r/   r0   	repeat_kv   s
   0rx           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr1   r   r2   )r$   r`   )ptrainingr   )rx   num_key_value_groupsr*   matmulre   r<   r   Z
functionalZsoftmaxZfloat32rb   r`   r   r   
contiguous)rz   r{   r|   r}   r~   r   r   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr/   r/   r0   eager_attention_forward   s   
&r   c                       s   e Zd ZdZdedef fddZ		ddejde	ejejf d	e
ej d
e
e de
ej dee de	eje
ej e
e	ej  f fddZ  ZS )GemmaAttentionz=Multi-headed attention from 'Attention Is All You Need' paperrH   	layer_idxc                    s   t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _d S )Nrw   g      TrF   )r'   r(   rH   r   getattrrI   Znum_attention_headsrw   ru   r   r   attention_dropoutZ	is_causalr   rJ   Zattention_biasq_projk_projv_projo_projr,   rH   r   r-   r/   r0   r(      s(   
zGemmaAttention.__init__Nrp   position_embeddingsr~   past_key_valuecache_positionr   rr   c                 K   sH  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
}| jjdkrw| jjdkrq|ddrqtd	 nt| jj }|| |	|
||f| jsd
n| j| jd|\}}|jg |dR   }| |}||fS )Nr2   r   r1   )rh   rg   r   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.ry   )r   r   )r<   rw   r   viewre   r   r   ro   updater   r   rH   _attn_implementationrX   loggerwarning_oncer   r   r   r   rs   r   r   )r,   rp   r   r~   r   r   r   Zinput_shapeZhidden_shapeZquery_statesr   r   rg   rh   Zcache_kwargsZattention_interfacer   r   r/   r/   r0   r:      s@   	

zGemmaAttention.forward)NN)r?   r@   rA   __doc__r   rB   r(   r*   Tensorr   r   r
   
LongTensorr   r   r:   rC   r/   r/   r-   r0   r      s(    r   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  dee deejeeejejf  f fddZ  ZS )GemmaDecoderLayerrH   r   c                    sR   t    |j| _t||d| _t|| _t|j|jd| _	t|j|jd| _
d S )N)rH   r   r%   )r'   r(   rI   r   	self_attnrD   mlpr"   rms_norm_epsinput_layernormpost_attention_layernormr   r-   r/   r0   r(     s   

zGemmaDecoderLayer.__init__NFrp   r~   ri   r   r   	use_cacher   r   r   rr   c	                 K   st   |}
|  |}| jd||||||||d|	\}}|
| }|}
| |}| |}|
| }|f}|r8||f7 }|S )N)rp   r~   ri   r   r   r   r   r   r/   )r   r   r   r   )r,   rp   r~   ri   r   r   r   r   r   r   ZresidualZself_attn_weightsoutputsr/   r/   r0   r:     s.   
	



zGemmaDecoderLayer.forward)NNNFFNN)r?   r@   rA   r   rB   r(   r*   r   r   r   r
   boolr   r   r   FloatTensorr:   rC   r/   r/   r-   r0   r     s<    	
r   c                   @   sH   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdd ZdS )GemmaPreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|trQ|jjd d S d S )Nry   )r4   stdr7   )rH   Zinitializer_rangerc   r   rJ   r+   dataZnormal_rG   Zzero_	Embeddingpadding_idxr"   Zfill_)r,   rz   r   r/   r/   r0   _init_weightsV  s   


z"GemmaPreTrainedModel._init_weightsN)r?   r@   rA   r   Zconfig_classZbase_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesZ_skip_keys_device_placementZ_supports_flash_attn_2Z_supports_sdpaZ_supports_flex_attnZ_supports_cache_classZ_supports_quantized_cacheZ_supports_static_cacheZ_supports_attention_backendr   r/   r/   r/   r0   r   G  s    r   c                       s  e Zd Zdef fddZdd Zdd Zee									d d	e	e
j d
e	e
j de	e
j de	eeee
j f  de	e
j de	e de	e de	e de	e
j defddZ	d!d
ee
jdf de
jde
jdedef
ddZed
e
jdedede
jde
jdefddZ  ZS )"
GemmaModelrH   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t j jd| _t d| _d| _|   d S )Nc                    s   g | ]}t  |qS r/   )r   ).0r   rH   r/   r0   
<listcomp>m  s    z'GemmaModel.__init__.<locals>.<listcomp>r   r   F)r'   r(   pad_token_idr   
vocab_sizer   r   rI   embed_tokensZ
ModuleListrangenum_hidden_layerslayersr"   r   normrP   
rotary_embgradient_checkpointing	post_initrO   r-   r   r0   r(   f  s   zGemmaModel.__init__c                 C      | j S r&   r   r=   r/   r/   r0   get_input_embeddingsv     zGemmaModel.get_input_embeddingsc                 C   
   || _ d S r&   r   r,   r}   r/   r/   r0   set_input_embeddingsy     
zGemmaModel.set_input_embeddingsN	input_idsr~   ri   r   inputs_embedsr   r   output_hidden_statesr   rr   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}|d u rB| 	|}|rK|d u rKt
 }|	d u rg|d urW| nd}tj|||jd  |jd}	|d u rp|	d}| |||	||}|}| ||}tj| j jd |jd}|| }|rd	nd }|rd	nd }| jd | j j D ]"}|r||f7 }||||||||	|d
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||dS )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.Fr   r   rZ   g      ?r_   r/   )r~   ri   r   r   r   r   r   )last_hidden_stater   rp   
attentions)rH   r   r   r   
ValueErrorr   r   r   r   r   r   get_seq_lengthr*   aranger<   rZ   rl   _update_causal_maskr   ZtensorrI   r`   r   r   r   r   )r,   r   r~   ri   r   r   r   r   r   r   r   past_seen_tokensr   rp   r   Z
normalizerZall_hidden_statesZall_self_attnsZdecoder_layerZlayer_outputsr/   r/   r0   r:   |  sr   






zGemmaModel.forwardFr    input_tensorc                 C   s:  | j jdkr|d ur|dk r|S d S | j jdkr&t|tjr$t|}|S |d ur.| nd}|d ur7|jnd}| j jdkrO|sO|sOt	j
|||| jdrOd S |j}|jd }	|r^| }
nt|tjri|jd	 n||	 d }
| j||	|
|||jd d
}| j jdkr|d ur|jjdv r|st|j}t	||}|S )NZflash_attention_2ry   Zflex_attentionr   Fr   )r   Zpast_key_values_lengthZis_trainingr   r2   )sequence_lengthtarget_lengthr`   r   
batch_size)cudaZxpuZnpu)rH   r   anyrc   r*   r   r!   r   Zis_compileabler   Z_ignore_causal_mask_sdpar   r`   r<   Zget_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionrZ   rS   finfominZ_unmask_unattended)r,   r~   r   r   r   r   r   Zusing_compilable_cacher`   r   r   r   	min_dtyper/   r/   r0   r     sT   




zGemmaModel._update_causal_maskr   r   r`   r   c                 K   sD  | dur|   dkr| }|S t|j}tj||f|||jd}|dkr+tj|dd}|tj||jd|ddk9 }|ddddddf 	|ddd}| dur|
 }| jd }	|ddddddd|	f | ddddddf |j }
|
dk}
|ddddddd|	f |
||ddddddd|	f< |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )Z
fill_valuer`   rZ   r   )Zdiagonalr   r2   r   )r$   r*   r   r   fullrZ   Ztriur   rs   ra   cloner<   rb   Zmasked_fill)r~   r   r   r`   r   r   r   r   r   Zmask_lengthZpadding_maskr/   r/   r0   r   "  s,    $
6  z@GemmaModel._prepare_4d_causal_attention_mask_with_cache_position	NNNNNNNNN)F)r?   r@   rA   r   r(   r   r   r   r   r   r*   r   r   r   r
   r   r   r   r   r:   r   staticmethodrB   r`   r   rC   r/   r/   r-   r0   r   d  s|    	
f
Dr   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r?   r@   rA   r/   r/   r/   r0   r   Z  s    r   c                       s
  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej dee deej deej dee dee dee deej d eeejf d!ee d"efd#d$Z  ZS )&GemmaForCausalLMzlm_head.weightlm_headZcolwise_reprp   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S rE   )
r'   r(   r   r   r   r   rJ   rI   r   r   rO   r-   r/   r0   r(   c  s
   
zGemmaForCausalLM.__init__c                 C      | j jS r&   r   r   r=   r/   r/   r0   r   l     z%GemmaForCausalLM.get_input_embeddingsc                 C      || j _d S r&   r   r   r/   r/   r0   r   o     z%GemmaForCausalLM.set_input_embeddingsc                 C   r   r&   r   r=   r/   r/   r0   get_output_embeddingsr  r   z&GemmaForCausalLM.get_output_embeddingsc                 C   r   r&   r   )r,   Znew_embeddingsr/   r/   r0   set_output_embeddingsu  r   z&GemmaForCausalLM.set_output_embeddingsc                 C   r   r&   r   )r,   decoderr/   r/   r0   set_decoderx  r   zGemmaForCausalLM.set_decoderc                 C   r   r&   r   r=   r/   r/   r0   get_decoder{  r   zGemmaForCausalLM.get_decoderNr   r   r~   ri   r   r   labelsr   r   r   r   logits_to_keepr   rr   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }d}|durX| j	d||| j j
d|}t|||j|j|jdS )a!  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, GemmaForCausalLM

        >>> model = GemmaForCausalLM.from_pretrained("google/gemma-7b")
        >>> tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b")

        >>> prompt = "What is your favorite condiment?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "What is your favorite condiment?"
        ```N)	r   r~   ri   r   r   r   r   r   r   )r   r   r   lossr   r   rp   r   r/   )rH   r   r   r   r   rc   rB   slicer   loss_functionr   r   r   rp   r   )r,   r   r~   ri   r   r   r   r   r   r   r   r   r   r   rp   Zslice_indicesr   r   r/   r/   r0   r:   ~  s:   '
zGemmaForCausalLM.forward)NNNNNNNNNNr   )r?   r@   rA   Z_tied_weights_keysZ_tp_planZ_pp_planr(   r   r   r   r   r   r   r   r   r   r*   r   r   r
   r   r   r   rB   r   r   r   r:   rC   r/   r/   r-   r0   r   ]  sf    		
r   a  
    The Gemma Model transformer with a sequence classification head on top (linear layer).

    [`GemmaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )Zcustom_introc                          e Zd Z fddZdd Zdd Zee									ddee	j
 d	ee	j d
ee	j
 dee dee	j dee	j
 dee dee dee defddZ  ZS )GemmaForSequenceClassificationc                    s@   t  | |j| _t|| _tj|j| jdd| _| 	  d S rE   )
r'   r(   
num_labelsr   r   r   rJ   rI   scorer   rO   r-   r/   r0   r(     s
   
z'GemmaForSequenceClassification.__init__c                 C   r   r&   r   r=   r/   r/   r0   r     r   z3GemmaForSequenceClassification.get_input_embeddingsc                 C   r   r&   r   r   r/   r/   r0   r     r   z3GemmaForSequenceClassification.set_input_embeddingsNr   r~   ri   r   r   r   r   r   r   rr   c
              
   C   s(  | j ||||||||	d}
|
j}| |}|dur|jd }n|jd }| jjdu r2|dkr2td| jjdu r;d}n1|dur`|| jjk|jt	j
}t	j|jd |jt	j
d}|| d}nd}t| jj d |t	j||jd	|f }d}|dur| j|||| jd
}t|||
j|
j|
jdS )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        r~   ri   r   r   r   r   r   Nr   r   z=Cannot handle batch sizes > 1 if no padding token is defined.r2   )rZ   r`   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r   )r   r   pooled_logitsrH   r   )r   r   r  r<   rH   r   r   rb   rZ   r*   Zint32r   Zargmaxr   r   r.   r?   r   r   r   rp   r   )r,   r   r~   ri   r   r   r   r   r   r   Ztransformer_outputsrp   r   r   Zlast_non_pad_tokenZnon_pad_maskZtoken_indicesr  r   r/   r/   r0   r:     sL   


z&GemmaForSequenceClassification.forwardr   )r?   r@   rA   r(   r   r   r   r   r   r*   r   r   r
   r   r   r   r:   rC   r/   r/   r-   r0   r     sH    		
r   c                       r   )GemmaForTokenClassificationc                    s|   t  | |j| _t|| _t|dd d ur|j}nt|dd d ur'|j}nd}t	|| _
t|j|j| _|   d S )Nclassifier_dropouthidden_dropoutg?)r'   r(   r   r   r   r   r  r  r   ZDropoutr   rJ   rI   r  r   )r,   rH   r  r-   r/   r0   r(   0  s   
z$GemmaForTokenClassification.__init__c                 C   r   r&   r   r=   r/   r/   r0   r   @  r   z0GemmaForTokenClassification.get_input_embeddingsc                 C   r   r&   r   r   r/   r/   r0   r   C  r   z0GemmaForTokenClassification.set_input_embeddingsNr   r~   ri   r   r   r   r   r   r   rr   c
              
   C   sd   | j ||||||||	d}
|
j}| |}| |}d}|dur(| ||| j}t|||
j|
jdS )r  r  N)r   r   rp   r   )	r   r   r   r  r   rH   r   rp   r   )r,   r   r~   ri   r   r   r   r   r   r   r   Zsequence_outputr   r   r/   r/   r0   r:   F  s,   


z#GemmaForTokenClassification.forwardr   )r?   r@   rA   r(   r   r   r   r   r   r*   r   r   r
   r   r   r   r:   rC   r/   r/   r-   r0   r  .  sH    	
r  )r   r   r   r  r   )Nr   )ry   )Etypingr   r   r   r   r   r*   r   Zactivationsr	   Zcache_utilsr
   r   Z
generationr   Zmodeling_attn_mask_utilsr   Zmodeling_flash_attention_utilsr   Zmodeling_layersr   Zmodeling_outputsr   r   r   r   Zmodeling_rope_utilsr   r   Zmodeling_utilsr   r   Zprocessing_utilsr   utilsr   r   r   r   r   Zconfiguration_gemmar   Z!torch.nn.attention.flex_attentionr    Zintegrations.flex_attentionr!   Z
get_loggerr?   r   Moduler"   rD   rP   rk   ro   r   rB   rx   r8   r   r   r   r   r   r   r   r   r  __all__r/   r/   r/   r0   <module>   st   
"

M5 vlVF