
    fTh                        S r SSKrSSKJrJrJrJr  SSKrSSKrSSKJ	r	  SSK
Jr  SSKJrJr  SSKJr  SS	KJr  SS
KJrJrJrJr  SSKJrJr  SSKJr  SSKJrJrJ r J!r!  SSK"J#r#  \ " 5       (       a  SSK$J%r%  SSK&J'r'  \!RP                  " \)5      r* " S S\	RV                  5      r,S r-S)S jr. " S S\	RV                  5      r/ " S S\	RV                  5      r0 " S S\	RV                  5      r1\ " S S\5      5       r2\ " S S\25      5       r3 " S  S!\2\5      r4\" S"S#9 " S$ S%\25      5       r5\ " S& S'\25      5       r6/ S(Qr7g)*zPyTorch Persimmon model.    N)ListOptionalTupleUnion)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPastTokenClassifierOutput)ROPE_INIT_FUNCTIONSdynamic_rope_update)PreTrainedModel)auto_docstringcan_return_tupleis_torch_flex_attn_availablelogging   )PersimmonConfig)	BlockMask)make_flex_block_causal_maskc                   l   ^  \ rS rSrSS\4U 4S jjjr\R                  " 5       \S 5       5       r	Sr
U =r$ )PersimmonRotaryEmbedding7   configc                   > [         TU ]  5         [        US5      (       aH  UR                  b;  UR                  R	                  SUR                  R	                  S5      5      U l        OSU l        UR                  U l        UR                  U l        Xl	        [        U R
                     U l        U R                  U R                  U5      u  o0l        U R                  SUSS9  U R                  U l        g )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)super__init__hasattrr"   getr#   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr    r   rope_init_fnattention_scalingregister_bufferr&   original_inv_freq)selfr    devicer&   	__class__s       h/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/persimmon/modeling_persimmon.pyr)   !PersimmonRotaryEmbedding.__init__8   s    6>**v/B/B/N#0044[&BUBUBYBYZ`BabDN&DN"("@"@$*$B$B!/?+/+<+<T[[&+Q((ZeD!%    c                 b   U R                   S S S 2S 4   R                  5       R                  UR                  S   SS5      R	                  UR
                  5      nUS S 2S S S 24   R                  5       n[        UR
                  R                  [        5      (       a0  UR
                  R                  S:w  a  UR
                  R                  OSn[        R                  " USS9   UR                  5       UR                  5       -  R                  SS5      n[        R                  " Xf4SS	9nUR                  5       U R                  -  nUR                  5       U R                  -  n	S S S 5        WR	                  UR                   S
9W	R	                  UR                   S
94$ ! , (       d  f       N@= f)Nr   r   mpscpuF)device_typeenabled   dim)dtype)r&   floatexpandshapetor4   
isinstancer$   strtorchautocast	transposecatcosr0   sinrB   )
r3   xposition_idsinv_freq_expandedposition_ids_expandedr=   freqsembrM   rN   s
             r6   forward PersimmonRotaryEmbedding.forwardI   sR    !MM$4-8>>@GGHZHZ[\H]_acdehhijiqiqr ,QaZ 8 > > @'1!((--'E'E!((--[`J`ahhmmfk^^UC&,,.1F1L1L1NNYYZ[]^_E))UN3C'')d444C'')d444C	 D vvAGGv$cff177f&;;; DCs   $BF  
F.)r0   r    r-   r2   r.   r/   r#   N)__name__
__module____qualname____firstlineno__r   r)   rI   no_gradr   rU   __static_attributes____classcell__r5   s   @r6   r   r   7   s6    / / /" ]]_<  <r8   r   c                     U SSU R                   S   S-  24   nU SU R                   S   S-  S24   n[        R                  " U* U4SS9$ )z*Rotates half the hidden dims of the input..Nr:   r?   r@   )rE   rI   rL   )rO   x1x2s      r6   rotate_halfrc   Z   sZ    	
3"!''"+"""	#B	
3q ""	#B99rc2YB''r8   c                     UR                  U5      nUR                  U5      nX-  [        U 5      U-  -   nX-  [        U5      U-  -   nXg4$ )a  Applies Rotary Position Embedding to the query and key tensors.

Args:
    q (`torch.Tensor`): The query tensor.
    k (`torch.Tensor`): The key tensor.
    cos (`torch.Tensor`): The cosine part of the rotary embedding.
    sin (`torch.Tensor`): The sine part of the rotary embedding.
    position_ids (`torch.Tensor`, *optional*):
        Deprecated and unused.
    unsqueeze_dim (`int`, *optional*, defaults to 1):
        The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
        sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
        that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
        k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
        cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
        the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
    `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
)	unsqueezerc   )qkrM   rN   rP   unsqueeze_dimq_embedk_embeds           r6   apply_rotary_pos_embrk   b   sS    ( --
&C
--
&Cw;q>C/0Gw;q>C/0Gr8   c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )PersimmonMLP~   c                   > [         TU ]  5         [        R                  " UR                  UR
                  5      U l        [        R                  " UR
                  UR                  5      U l        [        UR                     U l
        g rW   )r(   r)   r   Linearhidden_sizeintermediate_sizedense_h_to_4hdense_4h_to_hr	   
hidden_actactr3   r    r5   s     r6   r)   PersimmonMLP.__init__   s^    YYv'9'96;S;STYYv'?'?ASAST&++,r8   c                 l    U R                  U5      nU R                  U5      nU R                  U5      nU$ rW   )rs   rv   rt   )r3   hidden_statess     r6   rU   PersimmonMLP.forward   s6    **=9/**=9r8   )rv   rt   rs   )rX   rY   rZ   r[   r)   rU   r]   r^   r_   s   @r6   rm   rm   ~   s    - r8   rm   c                     ^  \ rS rSrSrSS\S\\   4U 4S jjjrS\	R                  S\\	R                  \	R                  \	R                  4   4S jr       SS	\	R                  S
\\	R                     S\\	R                     S\\   S\S\S\\	R                     S\\\	R                  \	R                  4      S\\	R                  \\	R                     \\\	R                        4   4S jjrSrU =r$ )PersimmonAttention   z=Multi-headed attention from 'Attention Is All You Need' paperr    	layer_idxc                   > [         TU ]  5         Xl        X l        Uc-  [        R                  SU R                  R                   S35        UR                  U l        UR                  U l
        U R                  U R                  -  U l        UR                  U l        [        U R                  UR                  -  5      U l        SU l        U R                  U R                  -  U R                  :w  a&  [#        SU R                   SU R                   S35      e[$        R&                  " U R                  SU R                  -  SS9U l        [$        R&                  " U R                  U R                  -  U R                  SS9U l        UR,                  U l        U R,                  (       ax  [$        R.                  " UR                  U R                  -  UR0                  SS	9U l        [$        R.                  " UR                  U R                  -  UR0                  SS	9U l        [$        R6                  " UR8                  5      U l        [;        U R                  S
9U l        g )NzInstantiating z without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.Tz?hidden_size must be divisible by num_heads (got `hidden_size`: z and `num_heads`: z).r   bias)epselementwise_affiner    )r(   r)   r    r   loggerwarning_oncer5   rX   rq   num_attention_heads	num_headshead_dim
rope_thetaintpartial_rotary_factorrotary_ndims	is_causal
ValueErrorr   rp   query_key_valuedenseqk_layernorm	LayerNormlayer_norm_epsq_layernormk_layernormDropoutattention_dropoutr   
rotary_embr3   r    r   r5   s      r6   r)   PersimmonAttention.__init__   s   " !8!8 9 :, , "--33((DNN: ++0L0L LMMMDNN*t/?/??QRVRbRbQc$T^^$4B8   "yy)9)91t?O?O;OVZ[YYt~~=t?O?OVZ[
"//!||""dnn4&:O:Odh D  "||""dnn4&:O:Odh D "$F,D,D!E2$++Fr8   	fused_qkvreturnc                     UR                   u  p#nUR                  X#U R                  SU R                  5      nUSSSS24   USSSS24   USSSS24   4$ )a  
Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory
storage as `fused_qkv`

Args:
    fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]

Returns:
    query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]
    value: [batch_size, seq_length, num_heads, head_dim]
r   .r   Nr   r?   )rE   viewr   r   )r3   r   
batch_size
seq_lengththree_times_hidden_sizes        r6   _split_headsPersimmonAttention._split_heads   s^     ;D//7
 7NN:4>>1dmm\	a#YsAqy%99S!QY;OOOr8   rz   attention_maskrP   past_key_valueoutput_attentions	use_cachecache_positionposition_embeddingsc	                    UR                  5       u  pnU R                  U5      nU R                  U5      u  pnU R                  (       a"  U R	                  U5      nU R                  U5      nUR                  SS5      nUR                  SS5      nUR                  SS5      nUu  nnUSS U R                  24   USU R                  S 24   nnUSS U R                  24   USU R                  S 24   nn[        UUUU5      u  nn[        R                  " UU4SS9n[        R                  " UU4SS9nUb0  UUU R                  US.nUR                  XU R                  U5      u  p[        R                  " XR                  SS5      5      [        R                  " U R                   5      -  nUb#  US S 2S S 2S S 2S UR"                  S   24   nUU-   n[$        R&                  R)                  U[        R*                  SS	9R-                  UR.                  5      nU R1                  U5      n[        R                  " UU5      nUR                  5       XR2                  XR                   4:w  a5  [5        S
XR2                  XR                   4 SUR                  5        35      eUR                  SS5      R7                  5       nUR9                  XU R:                  5      nU R=                  U5      nU(       d  S nUUU4$ )Nr   r?   .r:   r@   )rN   rM   partial_rotation_sizer   r   )rB   rA   z `attn_output` should be of size z	, but is )sizer   r   r   r   r   rK   r   rk   rI   rL   updater   matmulmathsqrtr   rE   r   
functionalsoftmaxfloat32rF   rB   r   r   r   
contiguousreshaperq   r   )r3   rz   r   rP   r   r   r   r   r   bszq_len_r   query_states
key_statesvalue_statesrM   rN   	query_rot
query_passkey_rotkey_passcache_kwargsattn_weightscausal_maskattn_outputs                             r6   rU   PersimmonAttention.forward   s    &**,A ((7	 483D3DY3O0<++L9L))*5J $--a3#--a3))!Q/
&S 1 1 1112d//112 	
 s/d////0sD--//0 
 2)Wc3O	7 yy)Z!8bAYY2;
% )-):):"0	L (6'<'<ZW[WeWegs't$J||L2F2Fq!2LMPTPYPYZ^ZgZgPhh%(Aq2HJ4D4DR4H2H)HIK'+5L }},,\TV,WZZ[g[m[mn--l;ll<>#~~umm!LL2CP]P]3^2_ `$$&') 
 "++Aq1<<>!))#d6F6FGjj- LL.88r8   )r   r    r   r   rq   r   r   r   r   r   r   r   r   r   r   rW   NNNFFNN)rX   rY   rZ   r[   __doc__r   r   r   r)   rI   Tensorr   r   
LongTensorr
   boolrU   r]   r^   r_   s   @r6   r}   r}      sE   G#G #G8C= #G #GJPell PuU\\5<<Y^YeYe=e7f P& 2637*."'59KOR9||R9 !.R9 u//0	R9
 !R9  R9 R9 !!1!12R9 &eELL%,,,F&GHR9 
u||Xell3XeELL>Q5RR	SR9 R9r8   r}   c                     ^  \ rS rSrS\S\4U 4S jjr       SS\R                  S\	\R                     S\	\R                     S\	\\R                        S	\	\   S
\	\   S\	\R                     S\	\\R                  \R                  4      S\\R                  \	\\R                  \R                  4      4   4S jjrSrU =r$ )PersimmonDecoderLayeri  r    r   c                   > [         TU ]  5         UR                  U l        [        XS9U l        [        U5      U l        [        R                  " UR                  UR                  S9U l
        [        R                  " UR                  UR                  S9U l        [        R                  " UR                  5      U l        g )N)r    r   r   )r(   r)   rq   r}   	self_attnrm   mlpr   r   r   input_layernormpost_attention_layernormr   hidden_dropoutdropoutr   s      r6   r)   PersimmonDecoderLayer.__init__  s    !--+6O'!||F,>,>FDYDYZ(*V5G5GVMbMb(c%zz&"7"78r8   rz   r   rP   r   r   r   r   r   r   c	                    Un	U R                  U5      nU R                  UUUUUUUUS9u  pnX-   nUn	U R                  U5      nU R                  U5      nU R	                  U5      nX-   nU4nU(       a  X4-  nU(       a  X4-  nU$ )a  
Args:
    hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
    attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
        `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
    position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
        Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
        `[0, config.n_positions - 1]`.
        [What are position IDs?](../glossary#position-ids)
    past_key_value (`Tuple(torch.FloatTensor)`, *optional*):
        cached past key and value projection states
    output_attentions (`bool`, *optional*):
        Whether or not to return the attentions tensors of all attention layers. See `attentions` under
        returned tensors for more detail.
    use_cache (`bool`, *optional*):
        If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
        (see `past_key_values`).
    cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
        Indices depicting the position of the input sequence tokens in the sequence
    position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
        Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
        with `head_dim` being the embedding dimension of each attention head.
)rz   r   rP   r   r   r   r   r   )r   r   r   r   r   )r3   rz   r   rP   r   r   r   r   r   residualself_attn_weightspresent_key_valueoutputss                r6   rU   PersimmonDecoderLayer.forward#  s    F !,,]; ?Cnn')%)/) 3 ?M 	?
;*; !0 !55mD/]3%0 "++G++Gr8   )r   rq   r   r   r   r   r   )rX   rY   rZ   r[   r   r   r)   rI   r   r   r   r   r   FloatTensorrU   r]   r^   r_   s   @r6   r   r     s   9 93 9 26378<,1$)59KOD||D !.D u//0	D
 !u||!45D $D>D D>D !!1!12D &eELL%,,,F&GHD 
u  (51B1BEDUDU1U+V"WW	XD Dr8   r   c                   <    \ rS rSr\rSrSrS/rSr	Sr
SrSrS rSrg)	PersimmonPreTrainedModelij  modelTr   past_key_valuesc                    U R                   R                  n[        U[        R                  5      (       aW  UR
                  R                  R                  SUS9  UR                  b%  UR                  R                  R                  5         g g [        U[        R                  5      (       ad  UR
                  R                  R                  SUS9  UR                  b2  UR
                  R                  UR                     R                  5         g g [        U[        R                  5      (       aJ  UR
                  R                  R                  S5        UR                  R                  R                  5         g g )N        )meanstdg      ?)r    initializer_rangerG   r   rp   weightdatanormal_r   zero_	Embeddingpadding_idxr   fill_)r3   moduler   s      r6   _init_weights&PersimmonPreTrainedModel._init_weightsu  s   kk++fbii((MM&&CS&9{{&  &&( '--MM&&CS&9!!-""6#5#56<<> .--MM$$S)KK""$ .r8    N)rX   rY   rZ   r[   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_cache_class_supports_quantized_cache_supports_static_cacher   r]   r   r8   r6   r   r   j  s9    "L&*#01"3  $!%r8   r   c                   "  ^  \ rS rSrSrS\4U 4S jjrS rS r\	\
         SS\\R                     S\\R                     S	\\R                     S
\\\R                         S\\R                      S\\   S\\   S\\   S\\R                     S\4S jj5       5       r SS\\R                  S4   S\R                  S\R                  S
\S\4
S jjr\S\R                  S\S\S\R2                  S\R                  S\4S j5       rSrU =r$ )PersimmonModeli  z
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`PersimmonDecoderLayer`]

Args:
    config: PersimmonConfig
r    c           	        > [         TU ]  U5        UR                  U l        UR                  U l        [
        R                  " UR                  UR                  U R                  5      U l        [
        R                  " [        UR                  5       Vs/ s H  n[        X5      PM     sn5      U l        [
        R                  " UR                  UR                  S9U l        [#        US9U l        SU l        U R)                  5         g s  snf )Nr   r   F)r(   r)   pad_token_idr   
vocab_sizer   r   rq   embed_tokens
ModuleListrangenum_hidden_layersr   layersr   r   final_layernormr   r   gradient_checkpointing	post_initr   s      r6   r)   PersimmonModel.__init__  s     !.. ++LL):):F<N<NPTP`P`ammGLVMeMeGfgGf)"65Gfg
  "||F,>,>FDYDYZ2&A&+# hs   D
c                     U R                   $ rW   r   r3   s    r6   get_input_embeddings#PersimmonModel.get_input_embeddings  s       r8   c                     Xl         g rW   r
  r3   values     r6   set_input_embeddings#PersimmonModel.set_input_embeddings  s    !r8   	input_idsr   rP   r   inputs_embedsr   r   output_hidden_statesr   r   c
                 4   Ub  UOU R                   R                  nUb  UOU R                   R                  nUb  UOU R                   R                  nUS L US L-  (       a  [	        S5      eU R
                  (       a/  U R                  (       a  U(       a  [        R                  S5        SnSn
U(       aP  [        U[        5      (       d;  Sn
Uc  [        5       nO+[        R                  " U5      n[        R                  S5        Uc  U R                  U5      nU	cD  Ub  UR                  5       OSn[        R                   " XUR"                  S   -   UR$                  S9n	Uc  U	R'                  S5      nU R)                  X%XU5      nUnU R+                  X5      nU(       a  S	OS nU(       a  S	OS nS nU R,                   H  nU(       a  X4-  nU R
                  (       a5  U R                  (       a$  U R/                  UR0                  UUUUUUU	U5	      nOU" UUUUUUU	US
9nUS   nU(       a  UU(       a  SOS   nU(       d  M  UUS   4-  nM     U R3                  U5      nU(       a  X4-  nU(       a  UOS nU
(       a  UR5                  5       n[7        UUUUS9$ )Nz:You must specify exactly one of input_ids or inputs_embedszZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...FTzWe detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class (https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)r   r   r4   r   )r   rP   r   r   r   r   r   r?   )last_hidden_stater   rz   
attentions)r    r   r  r   r   r  trainingr   r   rG   r
   r   from_legacy_cacher   get_seq_lengthrI   arangerE   r4   re   _update_causal_maskr   r  _gradient_checkpointing_func__call__r  to_legacy_cacher   )r3   r  r   rP   r   r  r   r   r  r   return_legacy_cachepast_seen_tokensr   rz   r   all_hidden_statesall_self_attnsnext_decoder_cachedecoder_layerlayer_outputs
next_caches                        r6   rU   PersimmonModel.forward  s    2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	-t";<YZZ&&4==##p "	 $Z??"&&".."."@"@"Q##^   --i8M!CRC^==?de"\\ ]5H5H5K"KTaThThN )33A6L..>L]
 & #oomJ #7BD0d!![[M#!%55!**t}} $ A A!**! #%"'
! !.!#.!-#2&7'#1(;	! *!,M%28I1q%Q"  =#3"55E )H ,,];  !11+4'$
#335J&+&+%	
 	
r8   r   input_tensorc           	         U R                   R                  S:X  a  Ub  US:H  R                  5       (       a  U$ g U R                   R                  S:X  a,  [        U[        R
                  5      (       a  [        U5      nU$ Ub  UR                  5       OSnUb  UR                  OSnU R                   R                  S:X  a5  U(       d.  U(       d'  [        R                  " UUUU R                  S9(       a  g UR                  nUR                  S   n	U(       a  UR                  5       n
O5[        U[        R
                  5      (       a  UR                  S	   OXi-   S-   n
U R                  UU	U
UUUR                  S   S
9nU R                   R                  S:X  aZ  UbW  UR                   R"                  S;   a=  U(       d6  [        R$                  " U5      R&                  n[        R(                  " X5      nU$ )Nflash_attention_2r   flex_attentionr   Fsdpa)r  past_key_values_lengthis_trainingr   r:   )sequence_lengthtarget_lengthrB   r   r   )cudaxpunpu)r    _attn_implementationanyrG   rI   r   r   r  is_compileabler   _ignore_causal_mask_sdpar  rB   rE   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr4   r$   finfomin_unmask_unattended)r3   r   r+  r   r   r   r#  using_compilable_cacherB   r2  r3  r   	min_dtypes                r6   r  "PersimmonModel._update_causal_mask  s    ;;++/BB)~/D.I.I.K.K%%;;++/??.%,,77!<^!L!!
 @O?Z?99;`aCRC^!?!?di ;;++v5>T]n%>>*'7 MM	 ""&,,Q/!+??AM nell;; $$R(%7!;  PP+')#))!, Q 
 KK,,6*%%**.DD%
 E*..I0CCK[Kr8   r2  r3  rB   r   c                    U b  U R                  5       S:X  a  U nU$ [        R                  " U5      R                  n[        R                  " X4XUR
                  S9nUS:w  a  [        R                  " USS9nU[        R                  " X$R
                  S9UR                  SS5      :  -  nUSSSS2SS24   R                  USSS5      nU b  UR                  5       nU R                  S   n	USS2SS2SS2SU	24   U SS2SSSS24   R                  UR
                  5      -   n
U
S:H  n
USS2SS2SS2SU	24   R                  X5      USS2SS2SS2SU	24'   U$ )	a  
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

Args:
    attention_mask (`torch.Tensor`):
        A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
        `(batch_size, 1, query_length, key_value_length)`.
    sequence_length (`int`):
        The sequence length being processed.
    target_length (`int`):
        The target length: when generating with static cache, the mask should be as long as the static cache,
        to account for the 0 padding, the part of the cache that is not filled yet.
    dtype (`torch.dtype`):
        The dtype to use for the 4D attention mask.
    cache_position (`torch.Tensor`):
        Indices depicting the position of the input sequence tokens in the sequence.
    batch_size (`torch.Tensor`):
        Batch size.
N   )
fill_valuerB   r4   r   )diagonalr  r:   r   )rA   rI   r=  r>  fullr4   triur  r   rD   clonerE   rF   masked_fill)r   r2  r3  rB   r   r   kwargsr   rA  mask_lengthpadding_masks              r6   r<  DPersimmonModel._prepare_4d_causal_attention_mask_with_cache_positionc  s}   > %.*<*<*>!*C(K* ' E*..I** 0Y\j\q\qK !##jjqA5<<>S>STWeWmWmnprsWtttK%dD!Q&67>>z1bRTUK))//1,2226*1aL[L+@ANSTVZ\`bcScDdDgDg&&E    ,q05@Aq,;,AV5W5c5c 6Aq!\k\12 r8   )r   r  r  r  r   r   r   	NNNNNNNNN)F)rX   rY   rZ   r[   r   r   r)   r  r  r   r   r   rI   r   r   r   r   r   r   rU   r   r
   r  staticmethodr   rB   r<  r]   r^   r_   s   @r6   r   r     s    "!"  151537=A59$(,0/359v
E,,-v
 !.v
 u//0	v

 "$u'8'8"9:v
   1 12v
 D>v
 $D>v
 'tnv
 !!1!12v
 
!v
  v
~ #(BellK78B llB 	B
 B  BH 444 4 {{	4
 4 4 4r8   r   c                     ^  \ rS rSrS/rU 4S jrS rS rS rS r	S r
S	 r\\           SS
\\R                      S\\R"                     S\\R                      S\\\R&                        S\\R&                     S\\R                      S\\   S\\   S\\   S\\R                      S\\\R"                  4   S\4S jj5       5       rSrU =r$ )PersimmonForCausalLMi  zlm_head.weightc                    > [         TU ]  U5        [        U5      U l        UR                  U l        [
        R                  " UR                  UR                  SS9U l        U R                  5         g NFr   )
r(   r)   r   r   r   r   rp   rq   lm_headr  rw   s     r6   r)   PersimmonForCausalLM.__init__  sU     #F+
 ++yy!3!3V5F5FUS 	r8   c                 .    U R                   R                  $ rW   r   r   r  s    r6   r  )PersimmonForCausalLM.get_input_embeddings      zz&&&r8   c                 $    XR                   l        g rW   rX  r  s     r6   r  )PersimmonForCausalLM.set_input_embeddings      "'

r8   c                     U R                   $ rW   rU  r  s    r6   get_output_embeddings*PersimmonForCausalLM.get_output_embeddings  s    ||r8   c                     Xl         g rW   r_  )r3   new_embeddingss     r6   set_output_embeddings*PersimmonForCausalLM.set_output_embeddings  s    %r8   c                     Xl         g rW   r   )r3   decoders     r6   set_decoder PersimmonForCausalLM.set_decoder  s    
r8   c                     U R                   $ rW   rg  r  s    r6   get_decoder PersimmonForCausalLM.get_decoder  s    zzr8   r  r   rP   r   r  labelsr   r   r  r   logits_to_keepr   c                    Ub  UOU R                   R                  nU	b  U	OU R                   R                  n	U R                  UUUUUUUU	U
S9	nUR                  n[        U[        5      (       a  [        U* S5      OUnU R                  USS2USS24   5      nSnUb*  U R                  " UU4SU R                   R                  0UD6n[        UUUR                  UR                  UR                  S9$ )u  
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
    config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
    (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

Example:

```python
>>> from transformers import AutoTokenizer, PersimmonForCausalLM

>>> model = PersimmonForCausalLM.from_pretrained("adept/persimmon-8b-base")
>>> tokenizer = AutoTokenizer.from_pretrained("adept/persimmon-8b-base")

>>> prompt = "human: Hey, what should I eat for dinner?"
>>> inputs = tokenizer(prompt, return_tensors="pt")

>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
'human: Hey, what should I eat for dinner?\n\ncat: 🐱\n\nhuman: 😐\n\n'
```N)	r  r   rP   r   r  r   r   r  r   r   losslogitsr   rz   r  )r    r   r  r   r  rG   r   slicerU  loss_functionr   r   r   rz   r  )r3   r  r   rP   r   r  rn  r   r   r  r   ro  rK  r   rz   slice_indicesrs  rr  s                     r6   rU   PersimmonForCausalLM.forward  s   P 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,0::)%+'/!5) ,6 
,
  118B>SV8W8W~ot4]kmA}a,?@A%%  ;;11 	D &#33!//))
 	
r8   )rU  r   r   )NNNNNNNNNNr   )rX   rY   rZ   r[   _tied_weights_keysr)   r  r  r`  rd  ri  rl  r   r   r   rI   r   r   r   r   r   r   r   r   rU   r]   r^   r_   s   @r6   rR  rR    s\   *+'(&  151537=A59-1$(,0/35934L
E,,-L
 !.L
 u//0	L

 "$u'8'8"9:L
   1 12L
 ))*L
 D>L
 $D>L
 'tnL
 !!1!12L
 c5<</0L
 
 L
  L
r8   rR  a  
    The Persimmon transformer with a sequence classification head on top (linear layer).

    [`PersimmonForSequenceClassification`] uses the last token in order to do the classification, as other causal
    models (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    )custom_introc                   *  ^  \ rS rSrU 4S jrS rS r\\         SS\	\
R                     S\	\
R                     S\	\
R                     S\	\   S	\	\
R                     S
\	\
R                     S\	\   S\	\   S\	\   S\4S jj5       5       rSrU =r$ )"PersimmonForSequenceClassificationi  c                    > [         TU ]  U5        UR                  U l        [        U5      U l        [
        R                  " UR                  U R                  SS9U l        U R                  5         g rT  )
r(   r)   
num_labelsr   r   r   rp   rq   scorer  rw   s     r6   r)   +PersimmonForSequenceClassification.__init__"  sS      ++#F+
YYv114??O
 	r8   c                 .    U R                   R                  $ rW   rX  r  s    r6   r  7PersimmonForSequenceClassification.get_input_embeddings+  rZ  r8   c                 $    XR                   l        g rW   rX  r  s     r6   r  7PersimmonForSequenceClassification.set_input_embeddings.  r]  r8   r  r   rP   r   r  rn  r   r   r  r   c
                    U R                  UUUUUUUU	S9n
U
R                  nU R                  U5      nUb  UR                  S   nOUR                  S   nU R                  R
                  c  US:w  a  [        S5      eU R                  R
                  c  SnOUb  XR                  R
                  :g  R                  UR                  [        R                  5      n[        R                  " UR                  S   UR                  [        R                  S9nUU-  R                  S5      nO.Sn[        R                  U R                  R                    S35        U[        R                  " XR                  S	9U4   nSnUb  U R#                  XUU R                  S
9n[%        UUU
R&                  U
R(                  U
R*                  S9$ )e  
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
    Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
    config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
    `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
r   rP   r   r  r   r   r  Nr   r   z=Cannot handle batch sizes > 1 if no padding token is defined.r:   )r4   rB   z will not detect padding tokens in `inputs_embeds`. Results may be unexpected if using padding tokens in conjunction with `inputs_embeds.`r  )rs  rn  pooled_logitsr    rq  )r   r  r~  rE   r    r   r   rF   r4   rI   int32r  argmaxr   r   r5   rX   ru  r   r   rz   r  )r3   r  r   rP   r   r  rn  r   r   r  transformer_outputsrz   rs  r   last_non_pad_tokennon_pad_masktoken_indicesr  rr  s                      r6   rU   *PersimmonForSequenceClassification.forward1  s   * 8<zz)%+'/!5 8B 	8
 ,==M* "+J&,,Q/J;;##+
a\]];;##+!#"%)A)AAEEfmmUZU`U`aL!LL)<V]]Z_ZeZefM"/,">!F!Fr!J!#>>**+ ,Z Z
 u||J}}MOaab%%VR_hlhshs%tD/ /??-;;*55
 	
r8   )r   r}  r~  rO  )rX   rY   rZ   r[   r)   r  r  r   r   r   rI   r   r   r
   r   r   r   rU   r]   r^   r_   s   @r6   r{  r{    s     '(  151537+/59-1$(,0/3A
E,,-A
 !.A
 u//0	A

 "%A
   1 12A
 ))*A
 D>A
 $D>A
 'tnA
 
*A
  A
r8   r{  c                   *  ^  \ rS rSrU 4S jrS rS r\\         SS\	\
R                     S\	\
R                     S\	\
R                     S\	\   S	\	\
R                     S
\	\
R                     S\	\   S\	\   S\	\   S\4S jj5       5       rSrU =r$ )PersimmonForTokenClassificationiw  c                   > [         TU ]  U5        UR                  U l        [        U5      U l        [        USS 5      b  UR                  nO[        USS 5      b  UR                  nOSn[        R                  " U5      U l
        [        R                  " UR                  UR                  5      U l        U R                  5         g )Nclassifier_dropoutr   g?)r(   r)   r}  r   r   getattrr  r   r   r   r   rp   rq   r~  r  )r3   r    r  r5   s      r6   r)   (PersimmonForTokenClassification.__init__z  s      ++#F+
6/6B!'!:!:V-t4@!'!6!6!$zz"45YYv1163D3DE
 	r8   c                 .    U R                   R                  $ rW   rX  r  s    r6   r  4PersimmonForTokenClassification.get_input_embeddings  rZ  r8   c                 $    XR                   l        g rW   rX  r  s     r6   r  4PersimmonForTokenClassification.set_input_embeddings  r]  r8   r  r   rP   r   r  rn  r   r   r  r   c
                    U R                  UUUUUUUU	S9n
U
R                  nU R                  U5      nU R                  U5      nSnUb  U R	                  XU R
                  5      n[        UUU
R                  U
R                  S9$ )r  r  N)rr  rs  rz   r  )	r   r  r   r~  ru  r    r   rz   r  )r3   r  r   rP   r   r  rn  r   r   r  r   sequence_outputrs  rr  s                 r6   rU   'PersimmonForTokenClassification.forward  s    * ,0::)%+'/!5 ,6 	,
 "33,,7O,%%fdkkBD$!//))	
 	
r8   )r   r   r}  r~  rO  )rX   rY   rZ   r[   r)   r  r  r   r   r   rI   r   r   r
   r   r   r   rU   r]   r^   r_   s   @r6   r  r  w  s     '(  151537+/59-1$(,0/3*
E,,-*
 !.*
 u//0	*

 "%*
   1 12*
 ))**
 D>*
 $D>*
 'tn*
 
*
  *
r8   r  )rR  r   r   r{  r  )Nr   )8r   r   typingr   r   r   r   rI   torch.utils.checkpointr   activationsr	   cache_utilsr
   r   
generationr   modeling_attn_mask_utilsr   modeling_outputsr   r   r   r   modeling_rope_utilsr   r   modeling_utilsr   utilsr   r   r   r   configuration_persimmonr   !torch.nn.attention.flex_attentionr   integrations.flex_attentionr   
get_loggerrX   r   Moduler   rc   rk   rm   r}   r   r   r   rR  r{  r  __all__r   r8   r6   <module>r     sV  (   / /    ! . ) >  L - \ \ 4  !!;J 
		H	%<ryy <F(8299 J9 J9ZNBII Nb % % %2 T- T Tns
3_ s
l S
)A S
S
l C
&> C
 C
Lr8   