o
    Zhw                     @   sL  d dl mZmZmZmZmZ d dlZd dlmZ ddlm	Z	 ddl
mZmZmZmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZmZmZ ddlmZmZ ddlm Z m!Z! ddl"m#Z# ddl$m%Z%m&Z&m'Z'm(Z(m)Z) ddl*m+Z+ e( rd dl,m-Z- ddl.m/Z/ e)0e1Z2G dd dej3Z4dd Z5d>ddZ6dej7de8dej7fddZ9	d?d ej3d!ej7d"ej7d#ej7d$eej7 d%e:d&e:fd'd(Z;G d)d* d*ej3Z<G d+d, d,eZ=G d-d. d.ej3Z>e&G d/d0 d0e!Z?e&G d1d2 d2e?Z@G d3d4 d4ee%ZAe&G d5d6 d6e?eZBe&d7d8G d9d: d:e?ZCe&G d;d< d<e?ZDg d=ZEdS )@    )CallableListOptionalTupleUnionN)nn   )ACT2FN)CacheDynamicCacheSlidingWindowCacheStaticCache)GenerationMixin)AttentionMaskConverter)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tupleis_torch_flex_attn_availablelogging   )Starcoder2Config)	BlockMask)make_flex_block_causal_maskc                       s@   e Zd Zdef fddZdeeej  dejfddZ	  Z
S )Starcoder2MLPconfigc                    sT   t    |j}tj||j|jd| _tj|j||jd| _t	|j
 | _|j| _d S )Nbias)super__init__hidden_sizer   LinearZintermediate_sizeuse_biasc_fcc_projr	   Z
hidden_actactresidual_dropout)selfr%   Z	embed_dim	__class__ a/var/www/auris/lib/python3.10/site-packages/transformers/models/starcoder2/modeling_starcoder2.pyr)   =   s   
zStarcoder2MLP.__init__hidden_statesreturnc                 C   s8   |  |}| |}| |}tjj|| j| jd}|S )Nptraining)r-   r/   r.   r   
functionaldropoutr0   r:   )r1   r6   r4   r4   r5   forwardE   s
   


zStarcoder2MLP.forward)__name__
__module____qualname__r!   r)   r   r   torchFloatTensorr=   __classcell__r4   r4   r2   r5   r$   <   s    &r$   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..N   dim)shaperA   cat)xx1Zx2r4   r4   r5   rotate_halfM   s   rL   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezerL   )qkcossinposition_idsZunsqueeze_dimZq_embedZk_embedr4   r4   r5   apply_rotary_pos_embT   s
   

rS   r6   n_repr7   c                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r    N)rH   expandreshape)r6   rT   batchnum_key_value_headsslenhead_dimr4   r4   r5   	repeat_kvo   s
   0r[           modulequerykeyvalueattention_maskscalingr<   c                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )NrE   r   rD   )rG   dtyper8   r    )r[   num_key_value_groupsrA   matmul	transposerH   r   r;   ZsoftmaxZfloat32tord   r<   r:   
contiguous)r]   r^   r_   r`   ra   rb   r<   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr4   r4   r5   eager_attention_forward{   s   
&rp   c                       s   e Zd ZdZddedee f fddZ		ddej	de
ej	ej	f d	eej	 d
ee deej dee de
ej	eej	 ee
ej	  f fddZ  ZS )Starcoder2Attentionz=Multi-headed attention from 'Attention Is All You Need' paperNr%   	layer_idxc                    s   t    || _|| _t|dd p|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j|j| j |jd| _tj|j| j |j|jd| _|j| _d S )NrZ   g      Tr&   )r(   r)   r%   rr   getattrr*   Znum_attention_headsrZ   rX   re   rb   attention_dropoutZ	is_causalr   r+   r,   q_projk_projv_projo_projr0   r1   r%   rr   r2   r4   r5   r)      s   
zStarcoder2Attention.__init__r6   position_embeddingsra   past_key_valuecache_positionrj   r7   c                 K   sj  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}|\}}t|	|
||\}	}
|d urW|||d}||
|| j	|\}
}t
}| jjdkrw| jjdkrq|ddrqtd	 nt| jj }|| |	|
||f| jsd
n| j| jt| jdd d|\}}|jg |dR   }| |}tjj|| j| jd}||fS )NrD   r    rE   )rQ   rP   r|   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.r\   sliding_window)r<   rb   r   r8   )rH   rZ   ru   viewrg   rv   rw   rS   updaterr   rp   r%   _attn_implementationgetloggerwarning_oncer   r:   rt   rb   rs   rV   ri   rx   r   r;   r<   r0   )r1   r6   rz   ra   r{   r|   rj   Zinput_shapeZhidden_shapeZquery_statesrk   rl   rP   rQ   Zcache_kwargsZattention_interfacero   rm   r4   r4   r5   r=      sH   		


zStarcoder2Attention.forwardN)NN)r>   r?   r@   __doc__r!   r   intr)   rA   Tensorr   r
   
LongTensorr   r   r=   rC   r4   r4   r2   r5   rq      s(    rq   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
 dee dee deej	 deeejejf  dee deejeeejejf  f fddZ  ZS )Starcoder2DecoderLayerr%   rr   c                    sV   t    |j| _t||d| _t|| _tj|j|j	d| _
tj|j|j	d| _d S )N)r%   rr   Zeps)r(   r)   r*   rq   	self_attnr$   mlpr   	LayerNormnorm_epsiloninput_layernormpost_attention_layernormry   r2   r4   r5   r)      s   

zStarcoder2DecoderLayer.__init__NFr6   ra   rR   r{   r   	use_cacher|   rz   rj   r7   c	                 K   st   |}
|  |}| jd||||||||d|	\}}|
| }|}
| |}| |}|
| }|f}|r8||f7 }|S )N)r6   ra   rR   r{   r   r   r|   rz   r4   )r   r   r   r   )r1   r6   ra   rR   r{   r   r   r|   rz   rj   ZresidualZself_attn_weightsoutputsr4   r4   r5   r=      s.   
	



zStarcoder2DecoderLayer.forward)NNNFFNN)r>   r?   r@   r!   r   r)   rA   r   r   r   r
   boolr   r   r   rB   r=   rC   r4   r4   r2   r5   r      s<    	
r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )Starcoder2RotaryEmbeddingNr%   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r(   r)   hasattrr   r   r   Zmax_position_embeddingsZmax_seq_len_cachedZoriginal_max_seq_lenr%   r   Zrope_init_fnattention_scalingZregister_bufferr   Zoriginal_inv_freq)r1   r%   devicer   r2   r4   r5   r)     s   
z"Starcoder2RotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   rD   r    ZmpscpuF)device_typeenabledrE   rF   )rd   )r   floatrU   rH   rh   r   
isinstancer   strrA   Zautocastrg   rI   rP   r   rQ   rd   )
r1   rJ   rR   Zinv_freq_expandedZposition_ids_expandedr   ZfreqsZembrP   rQ   r4   r4   r5   r=   #  s   0&z!Starcoder2RotaryEmbedding.forwardr   )
r>   r?   r@   r!   r)   rA   Zno_gradr   r=   rC   r4   r4   r2   r5   r     s
    r   c                   @   sH   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdd ZdS )Starcoder2PreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|tjrX|jjd |jj	  d S d S )Nr\   )meanstdg      ?)r%   Zinitializer_ranger   r   r+   weightdataZnormal_r'   Zzero_	Embeddingpadding_idxr   Zfill_)r1   r]   r   r4   r4   r5   _init_weightsB  s   

z'Starcoder2PreTrainedModel._init_weightsN)r>   r?   r@   r!   Zconfig_classZbase_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesZ_skip_keys_device_placementZ_supports_flash_attn_2Z_supports_sdpaZ_supports_flex_attnZ_supports_cache_classZ_supports_quantized_cacheZ_supports_static_cacheZ_supports_attention_backendr   r4   r4   r4   r5   r   3  s    r   c                       s(  e Zd Zdef fddZdd Zdd Zee									d!d	e	e
j d
e	e
j de	e
j de	eeee
j f  de	e
j de	e de	e de	e de	e
j dee defddZ	d"d
ee
jdf de
jde
jdedef
ddZed
e
jdedede
jde
jdededefdd Z  ZS )#Starcoder2Modelr%   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _tj j jd| _t d| _d| _ j| _|   d S )Nc                    s   g | ]}t  |qS r4   )r   ).0rr   r%   r4   r5   
<listcomp>Z  s    z,Starcoder2Model.__init__.<locals>.<listcomp>r   r   F)r(   r)   pad_token_idr   
vocab_sizer   r   r*   embed_tokensZ
ModuleListrangenum_hidden_layerslayersr   r   normr   
rotary_embgradient_checkpointingembedding_dropout	post_initr1   r%   r2   r   r5   r)   S  s   zStarcoder2Model.__init__c                 C      | j S r   r   r1   r4   r4   r5   get_input_embeddingsd     z$Starcoder2Model.get_input_embeddingsc                 C   
   || _ d S r   r   r1   r`   r4   r4   r5   set_input_embeddingsg     
z$Starcoder2Model.set_input_embeddingsN	input_idsra   rR   r   inputs_embedsr   r   output_hidden_statesr|   flash_attn_kwargsr7   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}|d u rB| 	|}|rK|d u rKt
 }|	d u rg|d urW| nd}tj|||jd  |jd}	|d u rp|	d}| |||	||}|}tjj|| j| jd}| ||}|rdnd }|rdnd }| jd | j j D ]&}|r||f7 }||f||||||	|d	|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||d
S )Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.Fr   r    r   r8   r4   )ra   rR   r{   r   r   r|   rz   )last_hidden_stater   r6   
attentions)r%   r   r   r   
ValueErrorr   r:   r   r   r   r   get_seq_lengthrA   arangerH   r   rM   _update_causal_maskr   r;   r<   r   r   r   r   r   r   )r1   r   ra   rR   r   r   r   r   r   r|   r   past_seen_tokensrn   r6   rz   Zall_hidden_statesZall_self_attnsZdecoder_layerZlayer_outputsr4   r4   r5   r=   j  sz   




	


zStarcoder2Model.forwardFr"   input_tensorc              
   C   s  | j jdkr2|d ur&|d ur&|d d df   | d k}|r&td|d ur0d|v r0|S d S | j jdkrDt|tjrBt	|}|S |d urL|
 nd}t|t}t|t}	| j jdkrs|ss|	ss|sstj|||| j j| jdrsd S |j}
t|
j}|jd	 }|	s|r| }nt|tjr|jd n|| d	 }| j||||
||jd | j |d
}| j jdkr|d ur|jjdv r|st||}|S )NZflash_attention_2rD   r   zYou are attempting to perform batched generation with padding_side='right' this may lead to unexpected behaviour for Flash Attention version of Starcoder2. Make sure to  call `tokenizer.padding_side  = 'left'` before tokenizing the input. r\   Zflex_attentionr~   )r   Zpast_key_values_lengthr   Zis_trainingr    )sequence_lengthtarget_lengthrd   r|   
batch_sizer%   r   )cudaZxpuZnpu)r%   r   sumitemsizer   r   rA   r   r#   r   r   r   r   Z_ignore_causal_mask_sdpar   r:   rd   finfominrH   Zget_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr   r   Z_unmask_unattended)r1   ra   r   r|   r   r   Zis_padding_rightr   Zusing_static_cacheZusing_sliding_window_cacherd   	min_dtyper   r   rn   r4   r4   r5   r     st   $





z#Starcoder2Model._update_causal_maskr   r   rd   r   c                 C   s  | dur|   dkr| }|S t|j}	tj||f|	||jd}tj||jd|ddk}
| }t	|ddr\|j
dur\t|trF||kr\tj||jd|dd|j
 k}|
| ||
9 }|ddddddf |ddd}| dur| }| jd |kr| ddd|f } | jd }|ddddddd|f | ddddddf |j }|d	k}|ddddddd|f ||	|ddddddd|f< |S )
a  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
            config (`Starcoder2Config`):
                The model's configuration class
            past_key_values (`Cache`):
                The cache class that is being used currently to generate
        N   )Z
fill_valuerd   r   r   rD   r    Zuse_sliding_windowTr   )rG   rA   r   r   fullr   r   rV   Zget_text_configrs   r   r   r   Zbitwise_or_rU   clonerH   rh   Zmasked_fill)ra   r   r   rd   r|   r   r%   r   rn   r   Zdiagonal_attend_maskZtext_configZsliding_attend_maskZmask_lengthZpadding_maskr4   r4   r5   r     s@   ! 
$
6  zEStarcoder2Model._prepare_4d_causal_attention_mask_with_cache_position	NNNNNNNNN)F)r>   r?   r@   r!   r)   r   r   r   r   r   rA   r   r   r   r
   r   rB   r   r   r   r   r=   r   staticmethodr   rd   r   rC   r4   r4   r2   r5   r   Q  s    	
c
Vr   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r>   r?   r@   r4   r4   r4   r5   r   e  s    r   c                       s
  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej dee deej deej dee dee dee deej d eeejf d!ee d"efd#d$Z  ZS )&Starcoder2ForCausalLMzlm_head.weightlm_headZcolwise_repr6   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S NFr&   )
r(   r)   r   r   r   r   r+   r*   r   r   r   r2   r4   r5   r)   n  s
   
zStarcoder2ForCausalLM.__init__c                 C      | j jS r   r   r   r   r4   r4   r5   r   w     z*Starcoder2ForCausalLM.get_input_embeddingsc                 C      || j _d S r   r   r   r4   r4   r5   r   z     z*Starcoder2ForCausalLM.set_input_embeddingsc                 C   r   r   r   r   r4   r4   r5   get_output_embeddings}  r   z+Starcoder2ForCausalLM.get_output_embeddingsc                 C   r   r   r   )r1   Znew_embeddingsr4   r4   r5   set_output_embeddings  r   z+Starcoder2ForCausalLM.set_output_embeddingsc                 C   r   r   r   )r1   decoderr4   r4   r5   set_decoder  r   z!Starcoder2ForCausalLM.set_decoderc                 C   r   r   r   r   r4   r4   r5   get_decoder  r   z!Starcoder2ForCausalLM.get_decoderNr   r   ra   rR   r   r   labelsr   r   r   r|   logits_to_keeprj   r7   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }d}|durX| j	d||| j j
d|}t|||j|j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, Starcoder2ForCausalLM

        >>> model = Starcoder2ForCausalLM.from_pretrained("meta-starcoder2/Starcoder2-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-starcoder2/Starcoder2-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   ra   rR   r   r   r   r   r   r|   )r   r   r   lossr   r   r6   r   r4   )r%   r   r   r   r   r   r   slicer   loss_functionr   r   r   r6   r   )r1   r   ra   rR   r   r   r   r   r   r   r|   r   rj   r   r6   Zslice_indicesr   r   r4   r4   r5   r=     s:   '
zStarcoder2ForCausalLM.forward)NNNNNNNNNNr   )r>   r?   r@   Z_tied_weights_keysZ_tp_planZ_pp_planr)   r   r   r   r   r   r   r   r   r   rA   r   r   r
   rB   r   r   r   r   r   r   r=   rC   r4   r4   r2   r5   r   h  sf    		
r   a  
    The Starcoder2 Model transformer with a sequence classification head on top (linear layer).

    [`Starcoder2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )Zcustom_introc                          e Zd Z fddZdd Zdd Zee									ddee	j
 d	ee	j d
ee	j
 dee dee	j dee	j
 dee dee dee defddZ  ZS )#Starcoder2ForSequenceClassificationc                    s@   t  | |j| _t|| _tj|j| jdd| _| 	  d S r   )
r(   r)   
num_labelsr   r   r   r+   r*   scorer   r   r2   r4   r5   r)     s
   
z,Starcoder2ForSequenceClassification.__init__c                 C   r   r   r   r   r4   r4   r5   r     r   z8Starcoder2ForSequenceClassification.get_input_embeddingsc                 C   r   r   r   r   r4   r4   r5   r     r   z8Starcoder2ForSequenceClassification.set_input_embeddingsNr   ra   rR   r   r   r   r   r   r   r7   c
              
   C   s(  | j ||||||||	d}
|
j}| |}|dur|jd }n|jd }| jjdu r2|dkr2td| jjdu r;d}n1|dur`|| jjk|jt	j
}t	j|jd |jt	j
d}|| d}nd}t| jj d |t	j||jd	|f }d}|dur| j|||| jd
}t|||
j|
j|
jdS )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        ra   rR   r   r   r   r   r   Nr   r    z=Cannot handle batch sizes > 1 if no padding token is defined.rD   )r   rd   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r   )r   r   pooled_logitsr%   r   )r   r   r  rH   r%   r   r   rh   r   rA   Zint32r   Zargmaxr   r   r3   r>   r   r   r   r6   r   )r1   r   ra   rR   r   r   r   r   r   r   Ztransformer_outputsr6   r   r   Zlast_non_pad_tokenZnon_pad_maskZtoken_indicesr  r   r4   r4   r5   r=     sL   


z+Starcoder2ForSequenceClassification.forwardr   )r>   r?   r@   r)   r   r   r   r   r   rA   r   r   r
   rB   r   r   r=   rC   r4   r4   r2   r5   r     sH    		
r   c                       r   ) Starcoder2ForTokenClassificationc                    s|   t  | |j| _t|| _t|dd d ur|j}nt|dd d ur'|j}nd}t	|| _
t|j|j| _|   d S )Nclassifier_dropouthidden_dropoutg?)r(   r)   r   r   r   rs   r  r  r   ZDropoutr<   r+   r*   r  r   )r1   r%   r  r2   r4   r5   r)   ;  s   
z)Starcoder2ForTokenClassification.__init__c                 C   r   r   r   r   r4   r4   r5   r   K  r   z5Starcoder2ForTokenClassification.get_input_embeddingsc                 C   r   r   r   r   r4   r4   r5   r   N  r   z5Starcoder2ForTokenClassification.set_input_embeddingsNr   ra   rR   r   r   r   r   r   r   r7   c
              
   C   sd   | j ||||||||	d}
|
j}| |}| |}d}|dur(| ||| j}t|||
j|
jdS )r  r  N)r   r   r6   r   )	r   r   r<   r  r   r%   r   r6   r   )r1   r   ra   rR   r   r   r   r   r   r   r   Zsequence_outputr   r   r4   r4   r5   r=   Q  s,   


z(Starcoder2ForTokenClassification.forwardr   )r>   r?   r@   r)   r   r   r   r   r   rA   r   r   r
   rB   r   r   r=   rC   r4   r4   r2   r5   r  9  sH    	
r  )r   r   r   r   r  )Nr    )r\   )Ftypingr   r   r   r   r   rA   r   Zactivationsr	   Zcache_utilsr
   r   r   r   Z
generationr   Zmodeling_attn_mask_utilsr   Zmodeling_flash_attention_utilsr   Zmodeling_layersr   Zmodeling_outputsr   r   r   r   Zmodeling_rope_utilsr   r   Zmodeling_utilsr   r   Zprocessing_utilsr   utilsr   r   r   r   r   Zconfiguration_starcoder2r!   Z!torch.nn.attention.flex_attentionr"   Zintegrations.flex_attentionr#   Z
get_loggerr>   r   Moduler$   rL   rS   r   r   r[   r   rp   rq   r   r   r   r   r   r   r   r  __all__r4   r4   r4   r5   <module>   st   


I3"  lVF