o
    Zh                     @   sB  d dl mZ d dlmZmZmZmZ d dlZd dlm	Z	 ddl
mZ ddlmZmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZmZmZ ddlmZmZ ddlmZmZ ddl m!Z! ddl"m#Z#m$Z$m%Z%m&Z&m'Z' ddl(m)Z) e& rd dl*m+Z+ ddl,m-Z- e'.e/Z0dd Z1d=ddZ2dej3de4dej3fddZ5	d>de	j6dej3dej3d ej3d!eej3 d"e7d#e7fd$d%Z8G d&d' d'e	j6Z9G d(d) d)e	j6Z:G d*d+ d+e	j6Z;G d,d- d-e	j6Z<e$G d.d/ d/eZ=e$G d0d1 d1e=Z>G d2d3 d3ee#Z?e$G d4d5 d5e=eZ@e$d6d7G d8d9 d9e=ZAe$G d:d; d;e=ZBg d<ZCdS )?    )partial)CallableOptionalTupleUnionN   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter)FlashAttentionKwargs)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tupleis_torch_flex_attn_availablelogging   )	PhiConfig)	BlockMask)make_flex_block_causal_maskc                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..N   dim)shapetorchcat)xx1Zx2 r)   S/var/www/auris/lib/python3.10/site-packages/transformers/models/phi/modeling_phi.pyrotate_half(   s   r+   c                 C   sD   | |}| |}| | t| |  }|| t||  }||fS )a  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`, *optional*):
            Deprecated and unused.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer+   )qkcossinposition_idsZunsqueeze_dimZq_embedZk_embedr)   r)   r*   apply_rotary_pos_emb/   s
   

r2   hidden_statesn_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r   N)r$   expandreshape)r3   r4   batchnum_key_value_headsslenhead_dimr)   r)   r*   	repeat_kvJ   s
   0r<           modulequerykeyvalueattention_maskscalingdropoutc                 K   s   t || j}t || j}	t||dd| }
|d ur3|d d d d d d d |jd f }|
| }
tjj|
dtj	d
|j}
tjj|
|| jd}
t|
|	}|dd }||
fS )Nr!   r   r    )r#   dtype)ptrainingr   )r<   num_key_value_groupsr%   matmul	transposer$   nnZ
functionalZsoftmaxZfloat32torF   rD   rH   
contiguous)r>   r?   r@   rA   rB   rC   rD   kwargs
key_statesvalue_statesattn_weightscausal_maskattn_outputr)   r)   r*   eager_attention_forwardV   s   
&rU   c                       s   e Zd ZdZdedef fddZ		ddejde	ejejf d	e
ej d
e
e de
ej de	eje
ej e
e	ej  f fddZ  ZS )PhiAttentionz=Multi-headed attention from 'Attention Is All You Need' paperconfig	layer_idxc                    s$  t    || _|| _t|d|j|j | _|j|j | _	| jd | _
|j| _d| _tj|j|j| j dd| _tj|j|j| j dd| _tj|j|j| j dd| _tj|j| j |jdd| _t| j|j | _|j| _| jrtj|j|j |jdd| _tj|j|j |jdd| _d S d S )Nr;   g      Tbias)epsZelementwise_affine)super__init__rW   rX   getattrhidden_sizeZnum_attention_headsr;   r9   rI   rC   attention_dropoutZ	is_causalrL   Linearq_projk_projv_projdenseintZpartial_rotary_factorrotary_ndimsqk_layernorm	LayerNormlayer_norm_epsq_layernormk_layernormselfrW   rX   	__class__r)   r*   r]   s   s,   
zPhiAttention.__init__Nr3   position_embeddingsrB   past_key_valuecache_positionr5   c                 K   s  |j d d }g |d| jR }| ||dd}	| ||dd}
| ||dd}| jrB| |	}	| 	|
}
|\}}|	dd | j
f |	d| j
d f }}|
dd | j
f |
d| j
d f }}t||||\}}tj||fdd}	tj||fdd}
|d ur|||d}||
|| j|\}
}t}| jjdkr| jjdkr|d	d
rtd nt| jj }|| |	|
||f| jsdn| j| jd|\}}|jg |dR   }| |}||fS )Nr    r   r!   .r"   )r0   r/   rs   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.r=   )rD   rC   )r$   r;   rb   viewrK   rc   rd   rh   rk   rl   rg   r2   r%   r&   updaterX   rU   rW   _attn_implementationgetloggerwarning_oncer   rH   r`   rC   r7   rN   re   )rn   r3   rq   rB   rr   rs   rO   Zinput_shapeZhidden_shapeZquery_statesrP   rQ   r/   r0   Z	query_rotZ
query_passZkey_rotZkey_passZcache_kwargsZattention_interfacerT   rR   r)   r)   r*   forward   sV   	



zPhiAttention.forward)NN)__name__
__module____qualname____doc__r   rf   r]   r%   Tensorr   r   r	   
LongTensorr}   __classcell__r)   r)   ro   r*   rV   p   s$    rV   c                       s2   e Zd Z fddZdejdejfddZ  ZS )PhiMLPc                    sD   t    || _t|j | _t|j|j	| _
t|j	|j| _d S N)r\   r]   rW   r   Z
hidden_actactivation_fnrL   ra   r_   Zintermediate_sizefc1fc2rn   rW   ro   r)   r*   r]      s
   
zPhiMLP.__init__r3   r5   c                 C   s"   |  |}| |}| |}|S r   )r   r   r   )rn   r3   r)   r)   r*   r}      s   


zPhiMLP.forward)r~   r   r   r]   r%   r   r}   r   r)   r)   ro   r*   r      s    r   c                       s   e Zd Zdedef fddZ							ddejdeej d	eej	 d
ee
ej  dee dee deej	 dee
ejejf  de
ejee
ejejf  f fddZ  ZS )PhiDecoderLayerrW   rX   c                    sH   t    t||d| _t|| _tj|j|j	d| _
t|j| _d S )N)rX   r[   )r\   r]   rV   	self_attnr   mlprL   ri   r_   rj   input_layernormDropoutZresid_pdropresid_dropoutrm   ro   r)   r*   r]      s
   

zPhiDecoderLayer.__init__NFr3   rB   r1   rr   rv   	use_cachers   rq   r5   c	                 K   sr   |}
|  |}| jd||||||||d|	\}}| |}| | |}|| |
 }|f}|r7||f7 }|S )N)r3   rB   r1   rr   rv   r   rs   rq   r)   )r   r   r   r   )rn   r3   rB   r1   rr   rv   r   rs   rq   rO   ZresidualZattn_outputsZself_attn_weightsZfeed_forward_hidden_statesoutputsr)   r)   r*   r}      s*   
	


zPhiDecoderLayer.forward)NNNFFNN)r~   r   r   r   rf   r]   r%   r   r   r   r   boolFloatTensorr}   r   r)   r)   ro   r*   r      s8    
	r   c                       s8   e Zd Zddef fddZe edd Z  Z	S )PhiRotaryEmbeddingNrW   c                    s   t    t|dr|jd ur|jd|jd| _nd| _|j| _|j| _|| _	t
| j | _| | j	|\}| _| jd|dd | j| _d S )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)r\   r]   hasattrr   rz   r   Zmax_position_embeddingsZmax_seq_len_cachedZoriginal_max_seq_lenrW   r   Zrope_init_fnattention_scalingZregister_bufferr   Zoriginal_inv_freq)rn   rW   devicer   ro   r)   r*   r]     s   
zPhiRotaryEmbedding.__init__c           
      C   s   | j d d d d f  |jd dd|j}|d d d d d f  }t|jjtr6|jjdkr6|jjnd}t	j
|dd+ | |  dd}t	j||fdd	}| | j }| | j }	W d    n1 smw   Y  |j|jd
|	j|jd
fS )Nr   r    r   ZmpscpuF)device_typeenabledr!   r"   )rF   )r   floatr6   r$   rM   r   
isinstancer   strr%   ZautocastrK   r&   r/   r   r0   rF   )
rn   r'   r1   Zinv_freq_expandedZposition_ids_expandedr   ZfreqsZembr/   r0   r)   r)   r*   r}     s   0&zPhiRotaryEmbedding.forwardr   )
r~   r   r   r   r]   r%   Zno_gradr   r}   r   r)   r)   ro   r*   r     s
    r   c                   @   sH   e Zd ZeZdZdZdgZdgZdZ	dZ
dZdZdZdZdZdd ZdS )PhiPreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rC|jjjd|d |jd urA|jj|j 	  d S d S t|tjrX|jjd |jj	  d S d S )Nr=   )meanstdg      ?)rW   Zinitializer_ranger   rL   ra   weightdataZnormal_rZ   Zzero_	Embeddingpadding_idxri   Zfill_)rn   r>   r   r)   r)   r*   _init_weights>  s   

z PhiPreTrainedModel._init_weightsN)r~   r   r   r   Zconfig_classZbase_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesZ_skip_keys_device_placementZ_supports_flash_attn_2Z_supports_sdpaZ_supports_flex_attnZ_supports_cache_classZ_supports_quantized_cacheZ_supports_static_cacheZ_supports_attention_backendr   r)   r)   r)   r*   r   /  s    r   c                       s  e Zd Zdef fddZdd Zdd Zee									d!d	e	e
j d
e	e
j de	e
j de	e de	e
j de	e de	e de	e de	e
j dee defddZ	d"d
ee
jdf de
jde
jdedef
ddZed
e
jdedede
jde
jdefdd Z  ZS )#PhiModelrW   c                    s   t     j| _ j| _t j j| j| _t	 fddt
 jD | _t d| _d| _t j| _tj j jd| _|   d S )Nc                    s   g | ]}t  |qS r)   )r   ).0rX   rW   r)   r*   
<listcomp>V  s    z%PhiModel.__init__.<locals>.<listcomp>r   Fr   )r\   r]   pad_token_idr   
vocab_sizerL   r   r_   embed_tokensZ
ModuleListrangenum_hidden_layerslayersr   
rotary_embgradient_checkpointingr   Z
embd_pdropembed_dropoutri   rj   final_layernorm	post_initr   ro   r   r*   r]   O  s   zPhiModel.__init__c                 C      | j S r   r   rn   r)   r)   r*   get_input_embeddings`     zPhiModel.get_input_embeddingsc                 C   
   || _ d S r   r   rn   rA   r)   r)   r*   set_input_embeddingsc     
zPhiModel.set_input_embeddingsN	input_idsrB   r1   r   inputs_embedsr   rv   output_hidden_statesrs   flash_attn_kwargsr5   c
                 K   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u |d uA r*td| jr9| jr9|r9td d}|d u rB| 	|}|rK|d u rKt
 }|	d u rg|d urW| nd}tj|||jd  |jd}	|d u rp|	d}| |||	||}| |}|}| ||}|rdnd }|rdnd }| jd | j j D ]A}|r||f7 }| jr| jr| t|jfi |
|||||||	|	}n||f||||||	|d|
}|d }|r||d f7 }q| |}|r||f7 }t||r|nd ||d	S )
Nz:You must specify exactly one of input_ids or inputs_embedszX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.Fr   r   r   r)   )rB   r1   rr   rv   r   rs   rq   )last_hidden_stater   r3   
attentions)rW   rv   r   r   
ValueErrorr   rH   r{   r|   r   r
   get_seq_lengthr%   aranger$   r   r,   _update_causal_maskr   r   r   r   Z_gradient_checkpointing_funcr   __call__r   r   )rn   r   rB   r1   r   r   r   rv   r   rs   r   past_seen_tokensrS   r3   rq   Zall_hidden_statesZall_self_attnsZdecoder_layerZlayer_outputsr)   r)   r*   r}   f  s   




	


zPhiModel.forwardFr   input_tensorc                 C   s:  | j jdkr|d ur|dk r|S d S | j jdkr&t|tjr$t|}|S |d ur.| nd}|d ur7|jnd}| j jdkrO|sO|sOt	j
|||| jdrOd S |j}|jd }	|r^| }
nt|tjri|jd	 n||	 d }
| j||	|
|||jd d
}| j jdkr|d ur|jjdv r|st|j}t	||}|S )NZflash_attention_2r=   Zflex_attentionr   Fru   )r   Zpast_key_values_lengthZis_trainingr   r    )sequence_lengthtarget_lengthrF   rs   
batch_size)cudaZxpuZnpu)rW   ry   anyr   r%   r   r   r   Zis_compileabler   Z_ignore_causal_mask_sdparH   rF   r$   Zget_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr   r   finfominZ_unmask_unattended)rn   rB   r   rs   r   rv   r   Zusing_compilable_cacherF   r   r   rS   	min_dtyper)   r)   r*   r     sT   




zPhiModel._update_causal_maskr   r   rF   r   c                 K   sD  | dur|   dkr| }|S t|j}tj||f|||jd}|dkr+tj|dd}|tj||jd|ddk9 }|ddddddf 	|ddd}| dur|
 }| jd }	|ddddddd|	f | ddddddf |j }
|
dk}
|ddddddd|	f |
||ddddddd|	f< |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )Z
fill_valuerF   r   r   )Zdiagonalr   r    r   )r#   r%   r   r   fullr   Ztriur   r7   r6   cloner$   rM   Zmasked_fill)rB   r   r   rF   rs   r   rO   rS   r   Zmask_lengthZpadding_maskr)   r)   r*   r     s,    $
6  z>PhiModel._prepare_4d_causal_attention_mask_with_cache_position	NNNNNNNNN)F)r~   r   r   r   r]   r   r   r   r   r   r%   r   r   r	   r   r   r   r   r   r}   r   r   staticmethodrf   rF   r   r   r)   r)   ro   r*   r   M  s    	
n
Dr   c                   @   s   e Zd ZdS )KwargsForCausalLMN)r~   r   r   r)   r)   r)   r*   r   L  s    r   c                       s
  e Zd ZdgZddiZddgdgfiZ fddZdd	 Zd
d Zdd Z	dd Z
dd Zdd Zee											d%deej deej deej dee deej deej dee dee dee deej d eeejf d!ee d"efd#d$Z  ZS )&PhiForCausalLMzlm_head.weightlm_headZcolwise_repr3   logitsc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S )NTrY   )
r\   r]   r   r   r   rL   ra   r_   r   r   r   ro   r)   r*   r]   U  s
   
zPhiForCausalLM.__init__c                 C      | j jS r   r   r   r   r)   r)   r*   r   ^     z#PhiForCausalLM.get_input_embeddingsc                 C      || j _d S r   r   r   r)   r)   r*   r   a     z#PhiForCausalLM.set_input_embeddingsc                 C   r   r   r   r   r)   r)   r*   get_output_embeddingsd  r   z$PhiForCausalLM.get_output_embeddingsc                 C   r   r   r   )rn   Znew_embeddingsr)   r)   r*   set_output_embeddingsg  r   z$PhiForCausalLM.set_output_embeddingsc                 C   r   r   r   )rn   decoderr)   r)   r*   set_decoderj  r   zPhiForCausalLM.set_decoderc                 C   r   r   r   r   r)   r)   r*   get_decoderm  r   zPhiForCausalLM.get_decoderNr   r   rB   r1   r   r   labelsr   rv   r   rs   logits_to_keeprO   r5   c                 K   s   |dur|n| j j}|	dur|	n| j j}	| jd||||||||	|
d	|}|j}t|tr4t| dn|}| |dd|ddf }d}|durX| j	d||| j j
d|}t|||j|j|jdS )ah  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, PhiForCausalLM

        >>> model = PhiForCausalLM.from_pretrained("meta-phi/Phi-2-7b-hf")
        >>> tokenizer = AutoTokenizer.from_pretrained("meta-phi/Phi-2-7b-hf")

        >>> prompt = "Hey, are you conscious? Can you talk to me?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
        ```N)	r   rB   r1   r   r   r   rv   r   rs   )r   r   r   lossr   r   r3   r   r)   )rW   rv   r   r   r   r   rf   slicer   loss_functionr   r   r   r3   r   )rn   r   rB   r1   r   r   r   r   rv   r   rs   r   rO   r   r3   Zslice_indicesr   r   r)   r)   r*   r}   p  s:   '
zPhiForCausalLM.forward)NNNNNNNNNNr   )r~   r   r   Z_tied_weights_keysZ_tp_planZ_pp_planr]   r   r   r   r   r   r   r   r   r   r%   r   r   r	   r   r   r   rf   r   r   r   r}   r   r)   r)   ro   r*   r   O  sf    		
r   a  
    The Phi Model transformer with a sequence classification head on top (linear layer).

    [`PhiForSequenceClassification`] uses the last token in order to do the classification, as other causal models
    (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )Zcustom_introc                          e Zd Z fddZdd Zdd Zee									ddee	j
 d	ee	j d
ee	j
 dee dee	j dee	j
 dee dee dee defddZ  ZS )PhiForSequenceClassificationc                    s@   t  | |j| _t|| _tj|j| jdd| _| 	  d S )NFrY   )
r\   r]   
num_labelsr   r   rL   ra   r_   scorer   r   ro   r)   r*   r]     s
   
z%PhiForSequenceClassification.__init__c                 C   r   r   r   r   r)   r)   r*   r     r   z1PhiForSequenceClassification.get_input_embeddingsc                 C   r   r   r   r   r)   r)   r*   r     r   z1PhiForSequenceClassification.set_input_embeddingsNr   rB   r1   r   r   r   r   rv   r   r5   c
              
   C   s(  | j ||||||||	d}
|
j}| |}|dur|jd }n|jd }| jjdu r2|dkr2td| jjdu r;d}n1|dur`|| jjk|jt	j
}t	j|jd |jt	j
d}|| d}nd}t| jj d |t	j||jd	|f }d}|dur| j|||| jd
}t|||
j|
j|
jdS )  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        rB   r1   r   r   r   rv   r   Nr   r   z=Cannot handle batch sizes > 1 if no padding token is defined.r    )r   rF   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r   )r   r   pooled_logitsrW   r   )r   r   r   r$   rW   r   r   rM   r   r%   Zint32r   Zargmaxr{   r|   rp   r~   r   r   r   r3   r   )rn   r   rB   r1   r   r   r   r   rv   r   Ztransformer_outputsr3   r   r   Zlast_non_pad_tokenZnon_pad_maskZtoken_indicesr   r   r)   r)   r*   r}     sL   


z$PhiForSequenceClassification.forwardr   )r~   r   r   r]   r   r   r   r   r   r%   r   r   r	   r   r   r   r}   r   r)   r)   ro   r*   r     sH    		
r   c                       r   )PhiForTokenClassificationc                    s|   t  | |j| _t|| _t|dd d ur|j}nt|dd d ur'|j}nd}t	|| _
t|j|j| _|   d S )Nclassifier_dropouthidden_dropoutg?)r\   r]   r   r   r   r^   r  r  rL   r   rD   ra   r_   r   r   )rn   rW   r  ro   r)   r*   r]   "  s   
z"PhiForTokenClassification.__init__c                 C   r   r   r   r   r)   r)   r*   r   2  r   z.PhiForTokenClassification.get_input_embeddingsc                 C   r   r   r   r   r)   r)   r*   r   5  r   z.PhiForTokenClassification.set_input_embeddingsNr   rB   r1   r   r   r   r   rv   r   r5   c
              
   C   sd   | j ||||||||	d}
|
j}| |}| |}d}|dur(| ||| j}t|||
j|
jdS )r   r   N)r   r   r3   r   )	r   r   rD   r   r   rW   r   r3   r   )rn   r   rB   r1   r   r   r   r   rv   r   r   Zsequence_outputr   r   r)   r)   r*   r}   8  s,   


z!PhiForTokenClassification.forwardr   )r~   r   r   r]   r   r   r   r   r   r%   r   r   r	   r   r   r   r}   r   r)   r)   ro   r*   r     sH    	
r  )r   r   r   r   r  )Nr   )r=   )D	functoolsr   typingr   r   r   r   r%   Ztorch.nnrL   Zactivationsr   Zcache_utilsr	   r
   Z
generationr   Zmodeling_attn_mask_utilsr   Zmodeling_flash_attention_utilsr   Zmodeling_outputsr   r   r   r   Zmodeling_rope_utilsr   r   Zmodeling_utilsr   r   Zprocessing_utilsr   utilsr   r   r   r   r   Zconfiguration_phir   Z!torch.nn.attention.flex_attentionr   Zintegrations.flex_attentionr   Z
get_loggerr~   r{   r+   r2   r   rf   r<   Moduler   rU   rV   r   r   r   r   r   r   r   r   r  __all__r)   r)   r)   r*   <module>   sr   


^0" lVF