
    fTh:3                        S r SSKJrJrJr  SSKrSSKrSSKJr  SSKJ	r	  SSK
Jr  SSKJr  SS	KJr  SS
KJr  SSKJr  SSKJrJrJrJrJrJrJr  SSKJr  \R<                  " \5      r Sr!Sr" " S S\RF                  5      r$S"S jr% " S S\RF                  5      r& " S S\5      r' " S S\5      r( " S S\\(5      r) " S S\5      r* " S S \5      r+/ S!Qr,g)#zPyTorch Phi-3 model.    )CallableOptionalTupleN)nn   )ACT2FN)Cache)FlashAttentionKwargs)ALL_ATTENTION_FUNCTIONS)Unpack)logging   )MistralDecoderLayerMistralForCausalLM MistralForSequenceClassificationMistralForTokenClassificationMistralPreTrainedModeleager_attention_forwardrotate_half   )
Phi3Configz microsoft/Phi-3-mini-4k-instructr   c                   b   ^  \ rS rSrU 4S jrS\R                  S\R                  4S jrSrU =r	$ )Phi3MLP0   c                    > [         TU ]  5         Xl        [        R                  " UR
                  SUR                  -  SS9U l        [        R                  " UR                  UR
                  SS9U l        [        UR                     U l        g )Nr   Fbias)super__init__configr   Linearhidden_sizeintermediate_sizegate_up_proj	down_projr   
hidden_actactivation_fn)selfr    	__class__s     ]/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/phi3/modular_phi3.pyr   Phi3MLP.__init__1   sn    IIf&8&8!f>V>V:V]bc6#;#;V=O=OV[\#F$5$56    hidden_statesreturnc                     U R                  U5      nUR                  SSS9u  p2X R                  U5      -  nU R                  U5      $ )Nr   dim)r$   chunkr'   r%   )r(   r-   	up_statesgates       r*   forwardPhi3MLP.forward9   sH    %%m4	#//!/4 2 24 88	~~i((r,   )r'   r    r%   r$   )
__name__
__module____qualname____firstlineno__r   torchFloatTensorr6   __static_attributes____classcell__r)   s   @r*   r   r   0   s,    7)U%6%6 )5;L;L ) )r,   r   c                 N   UR                  U5      nUR                  U5      nUR                  S   nU SSU24   U SUS24   pUSSU24   USUS24   p[        R                  " Xr-  [	        U5      U-  -   U/SS9n[        R                  " X-  [	        U	5      U-  -   U
/SS9nX4$ )a  Applies Rotary Position Embedding to the query and key tensors.

Args:
    q (`torch.Tensor`): The query tensor.
    k (`torch.Tensor`): The key tensor.
    cos (`torch.Tensor`): The cosine part of the rotary embedding.
    sin (`torch.Tensor`): The sine part of the rotary embedding.
    position_ids (`torch.Tensor`, *optional*):
        Deprecated and unused.
    unsqueeze_dim (`int`, *optional*, defaults to 1):
        The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
        sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
        that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
        k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
        cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
        the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
    `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
r0   .Nr1   )	unsqueezeshaper<   catr   )qkcossinposition_idsunsqueeze_dim
rotary_dimq_rotq_passk_rotk_passq_embedk_embeds                r*   apply_rotary_pos_embrR   B   s    ( --
&C
--
&C2Jc;J;&'3
+;)<6c;J;&'3
+;)<6ii%++e*<s*BCVLRTUGii%++e*<s*BCVLRTUGr,   c                   P  ^  \ rS rSrSrSS\S\\   4U 4S jjjr  SS\	R                  S\\	R                  \	R                  4   S\\	R                     S	\\   S
\\	R                     S\\   S\\	R                  \\	R                     \\\	R                        4   4S jjrSrU =r$ )Phi3Attentionb   z=Multi-headed attention from 'Attention Is All You Need' paperr    	layer_idxc                 p  > [         TU ]  5         Xl        X l        [	        USUR
                  UR                  -  5      U l        UR                  UR                  -  U l	        UR                  U l        U R                  S-  U l
        UR                  U l        SU l        UR                  U R                  -  SUR                  U R                  -  -  -   n[        R                  " UR                  U R                  -  UR
                  SS9U l        [        R                  " UR
                  USS9U l        g )Nhead_dimg      Tr   Fr   )r   r   r    rV   getattrr"   num_attention_headsrX   num_key_value_headsnum_key_value_groupsscalingattention_dropout	is_causalr   r!   o_projqkv_proj)r(   r    rV   op_sizer)   s       r*   r   Phi3Attention.__init__e   s    "
F4F4F&JdJd4de$*$>$>&B\B\$\!#)#=#= }}d*!'!9!9,,t}}<qFD^D^aeananDn?ooii : :T]] JFL^L^ejk		&"4"4gEJr,   r-   position_embeddingsattention_maskpast_key_valuecache_positionkwargsr.   c           
         UR                   S S n/ UQSPU R                  P7nU R                  U5      n	U R                  R                  U R                  -  n
U	SS U
24   nU	SXU R
                  U R                  -  -   24   nU	SXR
                  U R                  -  -   S 24   nUR                  U5      R                  SS5      nUR                  U5      R                  SS5      nUR                  U5      R                  SS5      nUu  p[        XX5      u  pUb$  XUS.nUR                  XU R                  U5      u  p[        nU R                  R                  S:w  ad  U R                  R                  S:X  a-  UR                  SS	5      (       a  [        R                  S
5        O[         U R                  R                     nU" U UUUU4U R"                  (       d  SOU R$                  U R&                  [)        U R                  SS 5      S.UD6u  nnUR*                  " / UQSP76 R-                  5       nU R/                  U5      nUU4$ )Nr0   .r   r   )rH   rG   rg   eagersdpaoutput_attentionsFz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.g        sliding_window)dropoutr]   rm   )rC   rX   ra   r    rZ   r[   view	transposerR   updaterV   r   _attn_implementationgetloggerwarning_oncer   trainingr^   r]   rY   reshape
contiguousr`   )r(   r-   rd   re   rf   rg   rh   input_shapehidden_shapeqkv	query_posquery_states
key_statesvalue_statesrG   rH   cache_kwargsattention_interfaceattn_outputattn_weightss                       r*   r6   Phi3Attention.forwardt   sI    $))#2.88b8$--8mmM*KK33dmmC	3

?+id6N6NQUQ^Q^6^*^^^_
3	,D,Dt}},T T VVW#((6@@AF__\2<<QB
#((6@@AF&#7RU#[ %#&nUL'5'<'<ZW[WeWegs't$J(?;;++w6{{//69fjjI\^c>d>d##L
 '>dkk>^>^&_#$7
%
  $}}C$2H2HLL"4;;0@$G
%
 
%
!\ "));;;;FFHkk+.L((r,   )
r^   r    rX   r_   rV   r\   r[   r`   ra   r]   )N)NN)r8   r9   r:   r;   __doc__r   r   intr   r<   Tensorr   r	   
LongTensorr   r
   r6   r>   r?   r@   s   @r*   rT   rT   b   s    GKz Khsm K K( +/596)||6) #5<<#=>6) !.	6)
 !6) !!1!126) -.6) 
u||Xell3XeELL>Q5RR	S6) 6)r,   rT   c                     ^  \ rS rSrS\S\4U 4S jjr       SS\R                  S\	\R                     S\	\R                     S\	\   S	\	\   S
\	\   S\	\R                     S\	\\R                  \R                  4      S\\   S\\R                   \	\\R                   \R                   4      4   4S jjrSrU =r$ )Phi3DecoderLayer   r    rV   c                    > [         TU ]  X5        Xl        [        XS9U l        [        U5      U l        [        R                  " UR                  5      U l
        [        R                  " UR                  5      U l        g )N)r    rV   )r   r   r    rT   	self_attnr   mlpr   Dropoutresid_pdropresid_attn_dropoutresid_mlp_dropout)r(   r    rV   r)   s      r*   r   Phi3DecoderLayer.__init__   sZ    +&fJ6?"$**V-?-?"@!#F,>,>!?r,   r-   re   rI   rf   rl   	use_cacherg   rd   rh   r.   c	                    Un
U R                  U5      nU R                  " SUUUUUUUUS.U	D6u  pXR                  U5      -   nUn
U R                  U5      nU R	                  U5      nXR                  U5      -   nU4nU(       a  X4-  nU$ )a5  
Args:
    hidden_states (`torch.FloatTensor`):
        input to the layer of shape `(batch, seq_len, embed_dim)`
    attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
        `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
    position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
        Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
        `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
    past_key_value (`Cache`, *optional*): cached past key and value projection states
    output_attentions (`bool`, *optional*):
        Whether or not to return the attentions tensors of all attention layers. See `attentions` under
        returned tensors for more detail.
    use_cache (`bool`, *optional*):
        If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
        (see `past_key_values`).
    cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
        Indices depicting the position of the input sequence tokens in the sequence
    kwargs (`dict`, *optional*):
        Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
        into the model
)r-   re   rI   rf   rl   r   rg   rd    )input_layernormr   r   post_attention_layernormr   r   )r(   r-   re   rI   rf   rl   r   rg   rd   rh   residualself_attn_weightsoutputss                r*   r6   Phi3DecoderLayer.forward   s    D !,,]; ,0>> 
,
')%)/) 3
,
 
,
( !#:#:=#II 55mD/ #9#9-#HH "++Gr,   )r    r   r   r   r   )NNNFFNN)r8   r9   r:   r;   r   r   r   r<   r   r   r   r	   boolr   r   r
   r=   r6   r>   r?   r@   s   @r*   r   r      s   @z @c @ 2637*.,1$)59KO=||= !.= u//0	=
 != $D>= D>= !!1!12= &eELL%,,,F&GH= -.= 
u  (51B1BEDUDU1U+V"WW	X= =r,   r   c                       \ rS rSrSrSrg)Phi3PreTrainedModel   z0.0.5r   N)r8   r9   r:   r;   _versionr>   r   r,   r*   r   r      s    Hr,   r   c                   ,    \ rS rSr       SS jrSrg)Phi3ForCausalLM   Nc	                 $   U(       ae  U R                   R                  (       aJ  UR                  S   U R                   R                  S-   :  a   US   n
XR                   R                  ::  a  S n[	        5       R
                  " SUUUUUUUUS.U	D6nU$ )Nr   r   )	input_idspast_key_valuesre   inputs_embedsrg   rI   r   logits_to_keepr   )r    rope_scalingrC    original_max_position_embeddingsr   prepare_inputs_for_generation)r(   r   r   re   r   rg   rI   r   r   rh   past_lengthmodel_inputss               r*   r   -Phi3ForCausalLM.prepare_inputs_for_generation   s    $ (("dkk&R&RUV&VV(+KkkJJJ"&*,JJ 

+)')%)

 

 r,   r   )NNNNNTN)r8   r9   r:   r;   r   r>   r   r,   r*   r   r      s     %r,   r   c                       \ rS rSrSrg)Phi3ForSequenceClassificationi#  r   Nr8   r9   r:   r;   r>   r   r,   r*   r   r   #      r,   r   c                       \ rS rSrSrg)Phi3ForTokenClassificationi'  r   Nr   r   r,   r*   r   r   '  r   r,   r   )r   	Phi3Modelr   r   r   )Nr   )-r   typingr   r   r   r<   torch.utils.checkpointr   activationsr   cache_utilsr	   modeling_flash_attention_utilsr
   modeling_utilsr   processing_utilsr   utilsr   mistral.modeling_mistralr   r   r   r   r   r   r   configuration_phi3r   
get_loggerr8   rt   _CHECKPOINT_FOR_DOC_CONFIG_FOR_DOCModuler   rR   rT   r   r   r   r   r   __all__r   r,   r*   <module>r      s      , ,    !   B 5 &    + 
		H	%8 )bii )$@H)BII H)VF* FR0 &(*= &R	$D 		!> 	r,   