
    fTh                        S SK r S SKrS SKJr  S SKJrJrJrJrJ	r	  S SK
rS SKrS SKJr  S SKJs  Jr  S SKJrJrJr  S SKJr  SSKJr  SSKJr  SS	KJr  SS
KJrJrJ r   SSK!J"r"J#r#  SSK$J%r%J&r&J'r'J(r(  SSK)J*r*J+r+J,r,  \(RZ                  " \.5      r/\ " S S\%5      5       r0\ " S S\%5      5       r1\ " S S\%5      5       r2 " S S\Rf                  5      r4 SIS\Rf                  S\Rj                  S\Rj                  S\Rj                  S\\Rj                     S\6S\64S jjr7 " S S \Rf                  5      r8 " S! S"\Rf                  5      r9 " S# S$\5      r: " S% S&\Rf                  5      r; " S' S(\Rf                  5      r< " S) S*\Rf                  5      r=S+ r> SJS,\Rj                  S-\6S.\6S/\6S0\6S1\Rj                  4S2 jjr?SKS3 jr@S4 rAS5 rB " S6 S7\Rf                  5      rC\& " S8 S9\#5      5       rD\&" S:S;9 " S< S=\D5      5       rE " S> S?\Rf                  5      rF\&" S@S;9 " SA SB\D5      5       rG\& " SC SD\D5      5       rH\&" SES;9 " SF SG\D5      5       rI/ SHQrJg)L    N)	dataclass)AnyCallableOptionalTupleUnion)BCEWithLogitsLossCrossEntropyLossMSELoss)_calculate_fan_in_and_fan_out   )ACT2FN)_prepare_4d_attention_mask)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)ModelOutputauto_docstringcan_return_tuplelogging   )Siglip2ConfigSiglip2TextConfigSiglip2VisionConfigc                       \ rS rSr% SrSr\\R                     \	S'   Sr
\\R                     \	S'   Sr\\\R                  S4      \	S'   Sr\\\R                  S4      \	S'   S	rg)
Siglip2VisionOutput-   a  
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.

Args:
    image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
        The image embeddings obtained by applying the projection layer to the pooler_output.
    last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
        Sequence of hidden-states at the output of the last layer of the model.
    hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
        one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.

        Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
    attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
        sequence_length)`.

        Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
        heads.
Nimage_embedslast_hidden_state.hidden_states
attentions )__name__
__module____qualname____firstlineno____doc__r!   r   torchFloatTensor__annotations__r"   r#   r   r$   __static_attributes__r%       d/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/siglip2/modeling_siglip2.pyr   r   -   sr    * 15L(5,,-459x 1 129=AM8E%"3"3S"89:A:>Ju00#567>r/   r   c                       \ rS rSr% SrSr\\R                     \	S'   Sr
\\R                     \	S'   Sr\\\R                  S4      \	S'   Sr\\\R                  S4      \	S'   S	rg)
Siglip2TextOutputJ   a  
Base class for text model's outputs that also contains a pooling of the last hidden states.

Args:
    text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
        The text embeddings obtained by applying the projection layer to the pooler_output.
    last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
        Sequence of hidden-states at the output of the last layer of the model.
    hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
        one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.

        Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
    attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
        sequence_length)`.

        Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
        heads.
Ntext_embedsr"   .r#   r$   r%   )r&   r'   r(   r)   r*   r4   r   r+   r,   r-   r"   r#   r   r$   r.   r%   r/   r0   r2   r2   J   sr    * 04K%++,359x 1 129=AM8E%"3"3S"89:A:>Ju00#567>r/   r2   c                      \ rS rSr% SrSr\\R                     \	S'   Sr
\\R                     \	S'   Sr\\R                     \	S'   Sr\\R                     \	S'   Sr\\R                     \	S'   Sr\\	S	'   Sr\\	S
'   S\\   4S jrSrg)Siglip2Outputg   a  
Args:
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
        Contrastive loss for image-text similarity.
    logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
        The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
        similarity scores.
    logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
        The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
        similarity scores.
    text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
        The text embeddings obtained by applying the projection layer to the pooled output of [`Siglip2TextModel`].
    image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
        The image embeddings obtained by applying the projection layer to the pooled output of [`Siglip2VisionModel`].
    text_model_output (`BaseModelOutputWithPooling`):
        The output of the [`Siglip2TextModel`].
    vision_model_output (`BaseModelOutputWithPooling`):
        The output of the [`Siglip2VisionModel`].
Nlosslogits_per_imagelogits_per_textr4   r!   text_model_outputvision_model_outputreturnc                 J   ^  [        U 4S jT R                  5        5       5      $ )Nc              3   n   >#    U  H*  nUS ;  a  TU   O[        TU5      R                  5       v   M,     g7f))r;   r<   N)getattrto_tuple).0kselfs     r0   	<genexpr>)Siglip2Output.to_tuple.<locals>.<genexpr>   s<      
   LLDGRYZ^`aRbRkRkRmm s   25)tuplekeysrD   s   `r0   rA   Siglip2Output.to_tuple   s#     
YY[
 
 	
r/   r%   )r&   r'   r(   r)   r*   r8   r   r+   r,   r-   r9   r:   r4   r!   r;   r   r<   r   r   rA   r.   r%   r/   r0   r6   r6   g   s    ( )-D(5$$
%,48hu001837OXe//07/3K%++,304L(5,,-448186:3:
%* 
r/   r6   c            	          ^  \ rS rSrS\4U 4S jjr\S\R                  S\R                  S\
S\R                  4S j5       rS	\R                  S\R                  S\R                  4S
 jrSrU =r$ )Siglip2VisionEmbeddings   configc                   > [         TU ]  5         Xl        UR                  U l        UR
                  U l        [        R                  " UR                  U R
                  -  U R
                  -  U R                  S9U l	        UR                  U l
        [        U R                  S-  5      U l        [        R                  " U R                  U R                  5      U l        g )N)in_featuresout_featuresg      ?)super__init__rN   hidden_size	embed_dim
patch_sizennLinearnum_channelspatch_embeddingnum_patchesintposition_embedding_size	Embeddingposition_embeddingrD   rN   	__class__s     r0   rS    Siglip2VisionEmbeddings.__init__   s    ++ ++!yy++doo=O 

 "--'*4+;+;S+@'A$"$,,t/?/?"Pr/   positional_embeddingsspatial_shapes
max_lengthr=   c           	      N   UR                   S   nU R                   S   nU R                  n[        R                  " X2U4U R                  US9nU R                  SSS5      R                  S5      n U R                  R                  S:X  a  U R                  [        R                  5      n [        U5       Hn  nX   u  p[        R                  " U X4SSS	S
9n
U
R                  XHU	-  5      R                  SS5      n
U
R                  U5      n
XUSX-  24'   U
S   XgX-  S24'   Mp     U$ )a  
Resize positional embeddings to image-specific size and pad to a fixed size.

Args:
    positional_embeddings (`torch.Tensor`):
        Position embeddings of shape (height, width, embed_dim)
    spatial_shapes (`torch.LongTensor`):
        Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
    max_length (`int`):
        Maximum length of the positional embeddings to pad resized positional embeddings to

Returns:
    `torch.Tensor`: Embeddings of shape (batch_size, max_length, embed_dim)
r   )devicedtype   r   cpubilinearFT)sizemodealign_corners	antialiasN)shaperi   r+   emptyrh   permute	unsqueezetypetofloat32rangeFinterpolatereshape	transpose)rc   rd   re   
batch_sizerU   source_dtyperesulted_positional_embeddingsiheightwidthresized_embeddingss              r0   resize_positional_embeddings4Siglip2VisionEmbeddings.resize_positional_embeddings   sI   ( $))!,
)//3	,22).Y/(//*
& !6 = =aA F P PQR S !'',,5$9$<$<U]]$K!z"A*-MF!"%_#" "4!;!;IPU~!V!`!`abde!f "4!6!6|!DBT1.>.>+>?BTUVBW*fn.>+>?% #( .-r/   pixel_valuesc                 :   U R                   R                  R                  nU R                  UR                  US95      nU R                  R                  R                  U R                  U R                  S5      nU R                  XRUR                  S   S9nXF-   nU$ )a  
Args:
    pixel_values (`torch.FloatTensor`):
        Pixel values of shape (batch_size, max_num_patches, num_channels * patch_size * patch_size)
    spatial_shapes (`List[Tuple[int, int]]`):
        Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to
)ri   rg   r   )re   )	rZ   weightri   rv   r_   r{   r]   r   rq   )rD   r   rd   target_dtypepatch_embedsrc   resized_positional_embeddings
embeddingss           r0   forwardSiglip2VisionEmbeddings.forward   s     ++2288++LOO,O,OP !% 7 7 > > F F(($*F*F!
 )-(I(I!l>P>PQR>S )J )
%
 "A
r/   )rN   rU   r[   rZ   rV   r_   r]   )r&   r'   r(   r)   r   rS   staticmethodr+   Tensor
LongTensorr\   r   r,   r   r.   __classcell__ra   s   @r0   rL   rL      s    Q2 Q 8.$||8.((8. 8. 
	8. 8.tE$5$5 uGWGW \a\h\h  r/   rL   modulequerykeyvalueattention_maskscalingdropoutc                    [         R                  " XR                  SS5      5      U-  nUb  X-   n[        R                  R                  US[         R                  S9R                  UR                  5      n[        R                  R                  XU R                  S9n[         R                  " X5      n	U	R                  SS5      R                  5       n	X4$ )Nrg   )dimri   )ptrainingr   rj   )r+   matmulr|   rW   
functionalsoftmaxrw   rv   ri   r   r   
contiguous)
r   r   r   r   r   r   r   kwargsattn_weightsattn_outputs
             r0   eager_attention_forwardr      s     <<}}R'<=GL!#4==((2U]](SVVW\WbWbcL==((6??([L,,|3K''1-88:K$$r/   c                      ^  \ rS rSrSrS\\\4   4U 4S jjr  SS\	R                  S\\	R                     S\\   S\\	R                  \\	R                     4   4S	 jjrS
rU =r$ )Siglip2Attentioni  z=Multi-headed attention from 'Attention Is All You Need' paperrN   c                    > [         TU ]  5         Xl        UR                  U l        UR
                  U l        U R                  U R                  -  U l        U R                  U R                  -  U R                  :w  a&  [        SU R                   SU R                   S35      eU R                  S-  U l	        UR                  U l        SU l        [        R                  " U R                  U R                  5      U l        [        R                  " U R                  U R                  5      U l        [        R                  " U R                  U R                  5      U l        [        R                  " U R                  U R                  5      U l        g )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).      F)rR   rS   rN   rT   rU   num_attention_heads	num_headshead_dim
ValueErrorscaleattention_dropoutr   	is_causalrW   rX   k_projv_projq_projout_projr`   s     r0   rS   Siglip2Attention.__init__  s   ++33$..8==4>>)T^^;MdnnM] ^NN#2'  ]]D(
//ii?ii?ii?		$..$..Ar/   r#   r   output_attentionsr=   c                    UR                   u  pEnU R                  U5      nU R                  U5      nU R                  U5      n	UR	                  XEU R
                  U R                  5      R                  SS5      nUR	                  XEU R
                  U R                  5      R                  SS5      nU	R	                  XEU R
                  U R                  5      R                  SS5      n	[        n
U R                  R                  S:w  aT  U R                  R                  S:X  a  U(       a  [        R                  S5        O[        U R                  R                     n
U
" U UUU	UU R                  U R                  U R                   (       d  SOU R"                  S9u  pUR%                  XEU5      R'                  5       nU R)                  U5      nU(       d  SnX4$ )	z#Input shape: Batch x Time x Channelr   rj   eagersdpaz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.        )r   r   r   N)rq   r   r   r   viewr   r   r|   r   rN   _attn_implementationloggerwarning_oncer   r   r   r   r   r{   r   r   )rD   r#   r   r   r}   
seq_lengthrU   queriesrH   valuesattention_interfacer   r   s                r0   r   Siglip2Attention.forward  s    -:,?,?)
	++m,{{=)]+,,zt~~t}}U__`acdeyyOYYZ[]^_ZT^^T]]S]]^_abc(?;;++w6{{//69>O##L
 '>dkk>^>^&_#$7nnJJ#}}C$,,	%
! "))*)LWWYmmK0 L((r/   )rN   r   rU   r   r   r   r   r   r   r   r   NF)r&   r'   r(   r)   r*   r   r   r   rS   r+   r   r   boolr   r   r.   r   r   s   @r0   r   r     s    GBu%8:K%KL B. 26,1	-)||-) !.-) $D>	-)
 
u||Xell33	4-) -)r/   r   c                   b   ^  \ rS rSrU 4S jrS\R                  S\R                  4S jrSrU =r	$ )
Siglip2MLPiO  c                   > [         TU ]  5         Xl        [        UR                     U l        [        R                  " UR                  UR                  5      U l
        [        R                  " UR                  UR                  5      U l        g N)rR   rS   rN   r   
hidden_actactivation_fnrW   rX   rT   intermediate_sizefc1fc2r`   s     r0   rS   Siglip2MLP.__init__P  sb    #F$5$5699V//1I1IJ99V55v7I7IJr/   r#   r=   c                 l    U R                  U5      nU R                  U5      nU R                  U5      nU$ r   )r   r   r   )rD   r#   s     r0   r   Siglip2MLP.forwardW  s4    /**=9/r/   )r   rN   r   r   )
r&   r'   r(   r)   rS   r+   r   r   r.   r   r   s   @r0   r   r   O  s)    KU\\ ell  r/   r   c            
          ^  \ rS rSrS\\\4   4U 4S jjr S
S\R                  S\R                  S\
\   S\\R                     4S jjrS	rU =r$ )Siglip2EncoderLayeri^  rN   c                 <  > [         TU ]  5         UR                  U l        [        R
                  " U R                  UR                  S9U l        [        U5      U l	        [        R
                  " U R                  UR                  S9U l
        [        U5      U l        g )Neps)rR   rS   rT   rU   rW   	LayerNormlayer_norm_epslayer_norm1r   	self_attnlayer_norm2r   mlpr`   s     r0   rS   Siglip2EncoderLayer.__init___  sm    ++<<F<Q<QR)&1<<F<Q<QRf%r/   r#   r   r   r=   c                     UnU R                  U5      nU R                  UUUS9u  pXA-   nUnU R                  U5      nU R                  U5      nXA-   nU4nU(       a  Xe4-  nU$ )a  
Args:
    hidden_states (`torch.FloatTensor`):
        Input to the layer of shape `(batch, seq_len, embed_dim)`.
    attention_mask (`torch.FloatTensor`):
        Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
    output_attentions (`bool`, *optional*, defaults to `False`):
        Whether or not to return the attentions tensors of all attention layers. See `attentions` under
        returned tensors for more detail.
)r#   r   r   )r   r   r   r   )rD   r#   r   r   residualr   outputss          r0   r   Siglip2EncoderLayer.forwardg  s      !((7&*nn')/ '5 '
#
 !0 ((7/ 0 "&Gr/   )rU   r   r   r   r   )F)r&   r'   r(   r)   r   r   r   rS   r+   r   r   r   r   r,   r   r.   r   r   s   @r0   r   r   ^  sg    &u%8:K%KL & -2	$||$ $ $D>	$
 
u  	!$ $r/   r   c            
          ^  \ rS rSrSrS\4U 4S jjr\   SS\\	R                     S\\   S\\   S\4S	 jj5       rS
rU =r$ )Siglip2Encoderi  z
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`Siglip2EncoderLayer`].

Args:
    config: Siglip2Config
rN   c                    > [         TU ]  5         Xl        [        R                  " [        UR                  5       Vs/ s H  n[        U5      PM     sn5      U l        SU l	        g s  snf r   )
rR   rS   rN   rW   
ModuleListrx   num_hidden_layersr   layersgradient_checkpointing)rD   rN   _ra   s      r0   rS   Siglip2Encoder.__init__  sT    mm%PVPhPhJi$jJiQ%8%@Ji$jk&+# %ks   A&r   r   output_hidden_statesr=   c                 F   Ub  UOU R                   R                  nUb  UOU R                   R                  nU(       a  SOSnU(       a  SOSnUnU R                   H-  nU(       a  XW4-   nU" UUUS9n	U	S   nU(       d  M%  XiS   4-   nM/     U(       a  XW4-   n[	        UUUS9$ )a  
Args:
    inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
        Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
        This is useful if you want more control over how to convert `input_ids` indices into associated vectors
        than the model's internal embedding lookup matrix.
    attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
        Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

        - 1 for tokens that are **not masked**,
        - 0 for tokens that are **masked**.

        [What are attention masks?](../glossary#attention-mask)
    output_attentions (`bool`, *optional*):
        Whether or not to return the attentions tensors of all attention layers. See `attentions` under
        returned tensors for more detail.
    output_hidden_states (`bool`, *optional*):
        Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
        for more detail.
    return_dict (`bool`, *optional*):
        Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Nr%   )r   r   r   )r"   r#   r$   )rN   r   r   r   r   )
rD   inputs_embedsr   r   r   encoder_statesall_attentionsr#   encoder_layerlayer_outputss
             r0   r   Siglip2Encoder.forward  s    < 2C1N-TXT_T_TqTq$8$D $++JjJj 	  40d%![[M#!/2B!B)"3M *!,M  !/3C2E!E )  +.>>N+(%
 	
r/   )rN   r   r   NNN)r&   r'   r(   r)   r*   r   rS   r   r   r+   r   r   r   r   r.   r   r   s   @r0   r   r     sl    ,} ,  26,0/3<
 !.<
 $D>	<

 'tn<
 
<
 <
r/   r   c                      ^  \ rS rSrS\4U 4S jjr\\  SS\R                  S\R                  S\R                  S\\   S\\   S	\4S
 jj5       5       rSrU =r$ )Siglip2VisionTransformeri  rN   c                 ~  > [         TU ]  5         Xl        UR                  n[	        U5      U l        [        U5      U l        [        R                  " X!R                  S9U l        [        US5      (       d  SOUR                  U l        U R                  (       a  [        U5      U l        UR"                  S:H  U l        g )Nr   vision_use_headTflash_attention_2)rR   rS   rN   rT   rL   r   r   encoderrW   r   r   post_layernormhasattrr   use_head$Siglip2MultiheadAttentionPoolingHeadheadr   _use_flash_attention_2rD   rN   rU   ra   s      r0   rS   !Siglip2VisionTransformer.__init__  s    &&	1&9%f- ll9:O:OP$+F4E$F$FFLbLb==<VDDI&,&A&AEX&X#r/   r   r   rd   r   r   r=   c                    Ub  UOU R                   R                  nUb  UOU R                   R                  nU R                  X5      nUb'  U R                  (       d  [        X&R                  5      nOUnU R                  UUUUS9nUR                  n	U R                  U	5      n	U R                  (       a  U R                  X5      OSn
[        U	U
UR                  UR                  S9$ )z
spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
    Tensor containing the spatial dimensions (height, width) of the input images.
Nr   r   r   r   r"   pooler_outputr#   r$   )rN   r   r   r   r  r   ri   r   r"   r   r   r   r   r#   r$   )rD   r   r   rd   r   r   r#   encoder_attention_maskencoder_outputsr"   r  s              r0   r    Siglip2VisionTransformer.forward  s     2C1N-TXT_T_TqTq$8$D $++JjJj 	 E%d.I.I%?PcPc%d"%3"+/<<'1/!5	 ,8 ,
 ,== //0ABHL		"3D[_)/')77&11	
 	
r/   )r  rN   r   r   r   r   r   NN)r&   r'   r(   r)   r   rS   r   r   r+   r,   r   r   r   r   r   r   r.   r   r   s   @r0   r   r     s    Y2 Y  -1/3*
''*
 *
 ((	*

 $D>*
 'tn*
 
$*
  *
r/   r   c            	          ^  \ rS rSrS\4U 4S jjr   S
S\\R                     S\\R                     S\\R                     S\R                  4S jjrS	rU =r$ )Siglip2TextEmbeddingsi  rN   c                 N  > [         TU ]  5         UR                  n[        R                  " UR
                  U5      U l        [        R                  " UR                  U5      U l        U R                  S[        R                  " UR                  5      R                  S5      SS9  g )Nposition_ids)r   rg   F)
persistent)rR   rS   rT   rW   r^   
vocab_sizetoken_embeddingmax_position_embeddingsr_   register_bufferr+   arangeexpandr  s      r0   rS   Siglip2TextEmbeddings.__init__  s    &&	!||F,=,=yI"$,,v/M/My"Y 	ELL)G)GHOOPWXej 	 	
r/   	input_idsr  r   r=   c                 <   Ub  UR                   S   OUR                   S   nU R                  R                  R                   S   nXE:  a  [        SU SU 35      eUc  U R                  S S 2S U24   nUc  U R                  U5      nU R                  U5      nX6-   nU$ )Nrg   r   r   zRSequence length must be less than max_position_embeddings (got `sequence length`: z and max_position_embeddings: )rq   r_   r   r   r  r  )rD   r  r  r   r   max_position_embeddingposition_embeddingsr   s           r0   r   Siglip2TextEmbeddings.forward(  s     -6,AY__R(}GZGZ[]G^
!%!8!8!?!?!E!Ea!H.d,<=S<TV 
 ,,Q^<L  00;M"55lC"8
r/   )r_   r  r   )r&   r'   r(   r)   r   rS   r   r+   r   r,   r   r   r.   r   r   s   @r0   r  r    sp    

0 

 153759	E,,- u//0   1 12	
 
 r/   r  c                    S nXSU-  -
  :  d  XSU-  -   :  a  [         R                  " SSS9  U" X1-
  U-  5      nU" XA-
  U-  5      nU R                  SU-  S-
  SU-  S-
  5        U R                  5         U R	                  U[
        R                  " S5      -  5        U R                  U5        U R                  X4S9  g )Nc                 h    S[         R                  " U [         R                  " S5      -  5      -   S-  $ )N      ?       @)matherfsqrt)xs    r0   norm_cdf _trunc_normal_.<locals>.norm_cdfF  s(    dhhq499S>122c99r/   rj   zjmean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.)
stacklevelr   r   )minmax)	warningswarnuniform_erfinv_mul_r!  r#  add_clamp_)tensormeanstdabr%  lus           r0   _trunc_normal_r8  C  s    : 	1s7{1s7{ 2;	
 	!(c!"A!(c!"A OOAEAIq1uqy) NN KKdiin$%
KK MMaMr/   r1  r2  r3  r4  r5  r=   c                     [         R                  " 5          [        U SSX45        U R                  U5      R	                  U5        SSS5        g! , (       d  f       g= f)a=  Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(     ext{mean},      ext{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq     ext{mean} \leq b`.

NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
and the result is subsequently scaled and shifted by the mean and std args.

Args:
    tensor: an n-dimensional `torch.Tensor`
    mean: the mean of the normal distribution
    std: the standard deviation of the normal distribution
    a: the minimum cutoff value
    b: the maximum cutoff value
r   r  N)r+   no_gradr8  r.  r/  )r1  r2  r3  r4  r5  s        r0   trunc_normal_tf_r;  g  s<    * 
vq#q,Cd# 
s   /A
Ac                 F   [        U 5      u  pEUS:X  a  UnOUS:X  a  UnOUS:X  a  XE-   S-  nUW-  nUS:X  a"  [        U [        R                  " U5      S-  S9  g US:X  aB  [        R
                  " 5          U R                  [        R                  " U5      S9  S S S 5        g US	:X  aK  [        R                  " S
U-  5      n[        R
                  " 5          U R                  U* U5        S S S 5        g [        SU 35      e! , (       d  f       g = f! , (       d  f       g = f)Nfan_infan_outfan_avgrj   truncated_normalg۶%?r3  normaluniformr   zinvalid distribution )	r   r;  r!  r#  r+   r:  normal_r,  r   )	r1  r   rn   distributionr=  r>  denomvariancebounds	            r0   variance_scaling_rI    s    3F;OFx				!Q&u}H))TYYx%8;N%NO		!]]_NNtyy2N3 _		"		!h,']]_OOUFE* _ 0?@@ _ _s   5$DD
D
D c                     [        U SSS9  g )Nr=  r@  rn   rE  rI  r1  s    r0   lecun_normal_rN    s    f8:LMr/   c                     [        U SSS9  g )Nr=  rB  rK  rL  rM  s    r0   default_flax_embed_initrP    s    f8(Cr/   c                      ^  \ rS rSrS\4U 4S jjr\\     SS\\	R                     S\\	R                     S\\	R                     S\\   S\\   S	\4S
 jj5       5       rSrU =r$ )Siglip2TextTransformeri  rN   c                 >  > [         TU ]  5         Xl        UR                  n[	        U5      U l        [        U5      U l        [        R                  " X!R                  S9U l        [        R                  " X!R                  5      U l        UR                  S:H  U l        g )Nr   r   )rR   rS   rN   rT   r  r   r   r   rW   r   r   final_layer_normrX   projection_sizer   r   r  r  s      r0   rS   Siglip2TextTransformer.__init__  sw    &&	/7%f- "Y<Q<Q RIIi)?)?@	&,&A&AEX&X#r/   r  r   r  r   r   r=   c                    Ub  UOU R                   R                  nUb  UOU R                   R                  nUc  [        S5      eUR	                  5       nUR                  SUS   5      nU R                  XS9nUb&  U R                  (       d  [        X'R                  5      nU R                  UUUUS9nUR                  n	U R                  U	5      n	U	S S 2SS S 24   n
U R                  U
5      n
[        U	U
UR                  UR                   S9$ )NzYou have to specify input_idsrg   )r  r  r  r  )rN   r   r   r   rm   r   r   r  r   ri   r   r"   rT  r   r   r#   r$   )rD   r  r   r  r   r   input_shaper#   r	  r"   pooled_outputs              r0   r   Siglip2TextTransformer.forward  s"    2C1N-TXT_T_TqTq$8$D $++JjJj 	 <==nn&NN2{27	)W %d.I.I7H[H[\N+/<<')/!5	 ,8 ,
 ,== 112CD *!R(3		-0)/')77&11	
 	
r/   )r  rN   r   r   rT  r   NNNNN)r&   r'   r(   r)   r   rS   r   r   r   r+   r   r   r   r   r.   r   r   s   @r0   rR  rR    s    	Y0 	Y  -115/3,0/3.
ELL).
 !..
 u||,	.

 $D>.
 'tn.
 
$.
  .
r/   rR  c                   6    \ rS rSr\rSrSr/ SQrSr	Sr
S rSrg)Siglip2PreTrainedModeli  siglip2T)r  r   rL   r   r   c                 V   [        U[        5      (       a  [        U R                  [        5      (       a   U R                  R                  R
                  OU R                  R
                  n[        R                  R                  UR                  R                  S[        R                  " U5      -  S9  g[        U[        R                  5      (       a  [        UR                  5        g[        U[        5      (       Ga  [        R                  R!                  UR"                  R                  5        [        R                  R!                  UR$                  R                  5        [        R                  R!                  UR&                  R                  5        [        R                  R!                  UR(                  R                  5        [        R                  R+                  UR"                  R,                  5        [        R                  R+                  UR$                  R,                  5        [        R                  R+                  UR&                  R,                  5        [        R                  R+                  UR(                  R,                  5        g[        U[.        5      (       a  [        R                  R!                  UR0                  R                  5        [        R                  R!                  UR2                  R                  5        [        R                  R                  UR0                  R,                  SS9  [        R                  R                  UR2                  R,                  SS9  g[        U[4        5      (       a  [        R                  R!                  UR6                  R8                  5        [        R                  R!                  UR:                  R<                  R8                  5        [        R                  R+                  UR:                  R>                  R8                  5        g[        U[@        5      (       at  [B        RD                  " [B        RF                  " S5      5      nURH                  R8                  RK                  U5        URL                  R8                  RO                  5         g[        U[P        5      (       ak  [        R                  R                  URR                  R                  U R                  R                  R
                  S-  U R                  RT                  -  S9  g[        U[        RV                  [        RX                  45      (       aM  [[        UR                  5        UR,                  b*  [        R                  R+                  UR,                  5        gg[        U[        R\                  5      (       aJ  UR,                  R8                  RO                  5         UR                  R8                  RK                  S5        gg)zInitialize the weightsr   rA  gư>r  r   N)/
isinstancerL   rN   r   vision_configrT   rW   initrD  r_   r   npr#  r^   rP  r   xavier_uniform_r   r   r   r   zeros_biasr   r   r   r   probedata	attentionin_proj_weightin_proj_biasSiglip2Modelr+   logr1  logit_scalefill_
logit_biaszero_Siglip2ForImageClassification
classifierinitializer_factorrX   Conv2drN  r   )rD   r   r   logit_scale_inits       r0   _init_weights$Siglip2PreTrainedModel._init_weights  s   f566 dkk=99 ))55[[,, 
 GGOOF55<<!bggenBTOU--#FMM2 011GG##FMM$8$89GG##FMM$8$89GG##FMM$8$89GG##FOO$:$:;GGNN6==--.GGNN6==--.GGNN6==--.GGNN6??//0
++GG##FJJ$5$56GG##FJJ$5$56GGOOFJJOOO6GGOOFJJOOO6 DEEGG##FLL$5$56GG##F$4$4$C$C$H$HIGGNN6++88==>--$yyc):;##))*:;""((* =>>GGOO!!((KK--994?$++B`B``   BII 677&--({{&v{{+ '--KK""$MM$$S) .r/   r%   N)r&   r'   r(   r)   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_supports_flash_attn_2_supports_sdparw  r.   r%   r/   r0   r]  r]    s-     L!&*# "N,*r/   r]  zL
    The text model from Siglip2 without any head or projection on top.
    )custom_introc                      ^  \ rS rSr\rS\4U 4S jjrS\R                  4S jr	S r
\\     SS\\R                     S\\R                     S	\\R                     S
\\   S\\   S\4S jj5       5       rSrU =r$ )Siglip2TextModeli   rN   c                 d   > [         TU ]  U5        [        U5      U l        U R	                  5         g r   )rR   rS   rR  
text_model	post_initr`   s     r0   rS   Siglip2TextModel.__init__(  s&     08r/   r=   c                 B    U R                   R                  R                  $ r   r  r   r  rI   s    r0   get_input_embeddings%Siglip2TextModel.get_input_embeddings.  s    ))999r/   c                 8    XR                   R                  l        g r   r  )rD   r   s     r0   set_input_embeddings%Siglip2TextModel.set_input_embeddings1  s    5:""2r/   r  r   r  r   r   c                 (    U R                  UUUUUS9$ )aX  
Examples:

```python
>>> from transformers import AutoTokenizer, Siglip2TextModel

>>> model = Siglip2TextModel.from_pretrained("google/siglip2-base-patch16-224")
>>> tokenizer = AutoTokenizer.from_pretrained("google/siglip2-base-patch16-224")

>>> # important: make sure to set padding="max_length" as that's how the model was trained
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding="max_length", return_tensors="pt")

>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output  # pooled (EOS token) states
```r  r   r  r   r   r  )rD   r  r   r  r   r   s         r0   r   Siglip2TextModel.forward4  s)    6 )%/!5  
 	
r/   r  r[  )r&   r'   r(   r)   r   ry  rS   rW   Moduler  r  r   r   r   r+   r   r   r   r   r.   r   r   s   @r0   r  r     s     %L0 :bii :;  -115/3,0/3
ELL)
 !.
 u||,	

 $D>
 'tn
 
$
  
r/   r  c                      ^  \ rS rSrSrS\4U 4S jjrS
S\R                  S\	\R                     S\R                  4S jjr
S	rU =r$ )r   iX  zMultihead Attention Pooling.rN   c                   > [         TU ]  5         [        R                  " [        R
                  " SSUR                  5      5      U l        [        R                  R                  UR                  UR                  SS9U l
        [        R                  " UR                  UR                  S9U l        [        U5      U l        UR                  U l        g )Nr   T)batch_firstr   )rR   rS   rW   	Parameterr+   randnrT   rg  MultiheadAttentionr   ri  r   r   	layernormr   r   r   r`   s     r0   rS   -Siglip2MultiheadAttentionPoolingHead.__init__[  s    \\%++aF4F4F"GH
44V5G5GIcIcqu4vf&8&8f>S>STf%33r/   hidden_stater   r=   c                    UR                   S   nU R                  R                  USS5      nUbc  UR                   S   UR                   S   pe[        X!R                  U5      nUR                  SU R
                  US5      nUR                  SXV5      nU R                  XAXS9S   nUnU R                  U5      nXpR                  U5      -   nUS S 2S4   $ )Nr   r   rg   )	attn_mask)
rq   rg  repeatr   ri   r   r{   ri  r  r   )rD   r  r   r}   rg  
target_len
source_lenr   s           r0   r   ,Siglip2MultiheadAttentionPoolingHead.forwardd  s    !''*


!!*a3%%*[[^\5G5G5J
7HZHZ\fgN+221dnnjRSTN+33B
ON~~e<~bcde~~l3((<"88AqD!!r/   )ri  r  r   r   rg  r   )r&   r'   r(   r)   r*   r   rS   r+   r   r   r   r.   r   r   s   @r0   r   r   X  sF    &42 4"ELL "(5<<BX "didpdp " "r/   r   zN
    The vision model from Siglip2 without any head or projection on top.
    c                      ^  \ rS rSr\rSrS\4U 4S jjrS\R                  4S jr
\\  SS\R                  S\R                  S\R                   S	\\   S
\\   S\4S jj5       5       rSrU =r$ )Siglip2VisionModeliw  r   rN   c                 d   > [         TU ]  U5        [        U5      U l        U R	                  5         g r   )rR   rS   r   vision_modelr  r`   s     r0   rS   Siglip2VisionModel.__init__  s)     4V< 	r/   r=   c                 B    U R                   R                  R                  $ r   )r  r   rZ   rI   s    r0   r  'Siglip2VisionModel.get_input_embeddings  s      ++;;;r/   pixel_attention_maskrd   r   r   c                 (    U R                  UUUUUS9$ )a  
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
    Mask to avoid performing attention on padding pixel indices.
spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
    Tensor containing the spatial dimensions (height, width) of the input images.

Examples:

```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Siglip2VisionModel

>>> model = Siglip2VisionModel.from_pretrained("google/siglip2-base-patch16-224")
>>> processor = AutoProcessor.from_pretrained("google/siglip2-base-patch16-224")

>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)

>>> inputs = processor(images=image, return_tensors="pt")

>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output  # pooled features
```r   r   rd   r   r   r  )rD   r   r  rd   r   r   s         r0   r   Siglip2VisionModel.forward  s,    F   %/)/!5 ! 
 	
r/   r  r  )r&   r'   r(   r)   r   ry  main_input_namerS   rW   r  r  r   r   r+   r,   r   r   r   r   r   r   r.   r   r   s   @r0   r  r  w  s     'L$O2 <bii <  -1/3'
'''
 $ll'
 ((	'

 $D>'
 'tn'
 
$'
  '
r/   r  c                     ^  \ rS rSr\rS\4U 4S jjr\     SS\\	R                     S\\	R                     S\\	R                     S\\   S\\   S	\	R                  4S
 jj5       r\     SS\\	R                     S\\	R                     S\\	R                     S\\   S\\   S	\	R                  4S jj5       r\\         SS\\	R                     S\\	R                     S\\	R                     S\\	R                     S\\	R                     S\\	R                     S\\   S\\   S\\   S	\4S jj5       5       rSrU =r$ )rl  i  rN   c                   > [         TU ]  U5        [        UR                  [        5      (       d"  [        S[        UR                  5       S35      e[        UR                  [        5      (       d"  [        S[        UR                  5       S35      eUR                  nUR                  n[        R                  U5      n[        R                  U5      nUR                  U l        UR                  U l        [        R                  " [         R"                  " S5      5      U l        [        R                  " [         R"                  " S5      5      U l        U R)                  5         g )NzNconfig.text_config is expected to be of type Siglip2TextConfig but is of type .zRconfig.vision_config is expected to be of type Siglip2VisionConfig but is of type r   )rR   rS   r`  text_configr   	TypeErrorru   ra  r   r  _from_configr  r  r  rW   r  r+   r  rn  rp  r  )rD   rN   r  ra  r  r  ra   s         r0   rS   Siglip2Model.__init__  s"    &,,.?@@++,-Q0 
 &..0CDD--./q2 
 ((,, &22;?
)66}E %//(55<<A7,,u{{1~6 	r/   r  r   r  r   r   r=   c                     Ub  UOU R                   R                  nUb  UOU R                   R                  nU R                  UUUUUS9nUR                  nU$ )a  
Returns:
    text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
    applying the projection layer to the pooled output of [`Siglip2TextModel`].

Examples:

```python
>>> from transformers import AutoTokenizer, AutoModel
>>> import torch

>>> model = AutoModel.from_pretrained("google/siglip2-base-patch16-224")
>>> tokenizer = AutoTokenizer.from_pretrained("google/siglip2-base-patch16-224")

>>> # important: make sure to set padding="max_length" as that's how the model was trained
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding="max_length", return_tensors="pt")
>>> with torch.no_grad():
...     text_features = model.get_text_features(**inputs)
```r  )rN   r   r   r  r  )rD   r  r   r  r   r   text_outputsrY  s           r0   get_text_featuresSiglip2Model.get_text_features  sr    : 2C1N-TXT_T_TqTq$8$D $++JjJj 	 48??)%/!5 4C 4
 %22r/   r   r  rd   c                     Ub  UOU R                   R                  nUb  UOU R                   R                  nU R                  UUUUUS9nUR                  nU$ )a<  
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
    Mask to avoid performing attention on padding pixel indices.
spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
    Tensor containing the spatial dimensions (height, width) of the input images.

Returns:
    image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
    applying the projection layer to the pooled output of [`Siglip2VisionModel`].

Examples:

```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AutoModel
>>> import torch

>>> model = AutoModel.from_pretrained("google/siglip2-base-patch16-224")
>>> processor = AutoProcessor.from_pretrained("google/siglip2-base-patch16-224")

>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)

>>> inputs = processor(images=image, return_tensors="pt")

>>> with torch.no_grad():
...     image_features = model.get_image_features(**inputs)
```
r  )rN   r   r   r  r  )rD   r   r  rd   r   r   vision_outputsrY  s           r0   get_image_featuresSiglip2Model.get_image_features	  su    P 2C1N-TXT_T_TqTq$8$D $++JjJj 	 6:5F5F%/)/!5 6G 6
 '44r/   return_lossc
           
         Ub  UOU R                   R                  nU	b  U	OU R                   R                  n	U R                  UUUUU	S9n
U R	                  UUUUU	S9nU
R
                  nUR
                  nXR                  SSSS9-  nXR                  SSSS9-  n[        R                  " XR                  5       R                  UR                  5      5      nU R                  R                  UR                  5      U R                  R                  UR                  5      nnXR                  5       -  U-   nUR                  5       nSnU(       a  [        R                  " UR!                  S5      UR                  S	9n[        R"                  " U5      * SU-  -   n[        R$                  R&                  R)                  UU-  5      n[        R*                  " USS
9* nUR-                  5       n[/        UUUUUUU
S9$ )a<  
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
    Mask to avoid performing attention on padding pixel indices.
spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
    Tensor containing the spatial dimensions (height, width) of the input images.
return_loss (`bool`, *optional*):
    Whether or not to return the contrastive loss.

Examples:

```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AutoModel
>>> import torch

>>> model = AutoModel.from_pretrained("google/siglip2-base-patch16-224")
>>> processor = AutoProcessor.from_pretrained("google/siglip2-base-patch16-224")

>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)

>>> texts = ["a photo of 2 cats", "a photo of 2 dogs"]
>>> # important: we pass `padding=max_length` since the model was trained with this
>>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt")

>>> with torch.no_grad():
...     outputs = model(**inputs)

>>> logits_per_image = outputs.logits_per_image
>>> probs = torch.sigmoid(logits_per_image) # these are the probabilities
>>> print(f"{probs[0][0]:.1%} that image 0 is '{texts[0]}'")
31.9% that image 0 is 'a photo of 2 cats'
```
Nr  r  rj   rg   T)r   r   keepdimr   )rh   r   )r8   r9   r:   r4   r!   r;   r<   )rN   r   r   r  r  r  normr+   r   trv   rh   rn  rp  expeyerm   	ones_likerW   r   
logsigmoidsumr2  r6   )rD   r  r   r  rd   r   r  r  r   r   r  r  r!   r4   r:   rn  rp  r9   r8   r  m1_diag1logliknlls                          r0   r   Siglip2Model.forwardB  s   d 2C1N-TXT_T_TqTq$8$D $++JjJj 	 6:5F5F%/)/!5 6G 6
 48??)%/!5 4C 4
 &33"00 $&7&7!T&7&RR!$4$4qb$$4$OO  ,,{NN4D4G4GHZHZ4[\"&"2"2"5"5k6H6H"I4??K]K]^i^p^pKqZ)OO,==
J*,,.))O003O<R<RSC881s7BHXX((33H4NOF99V,,C88:D-+#%* .
 	
r/   )rp  rn  r  r  r[  )	NNNNNNNNN)r&   r'   r(   r)   r   ry  rS   r   r   r+   r   r   r,   r  r   r  r   r6   r   r.   r   r   s   @r0   rl  rl    s    L} @  -115/3,0/3+ELL)+ !.+ u||,	+
 $D>+ 'tn+ 
		+ +Z  597;59,0/36u0016 'u||46 !!1!12	6
 $D>6 'tn6 
		6 6p  15487;591537&*,0/3e
E,,-e
 u001e
 'u||4	e

 !!1!12e
 !.e
 u//0e
 d^e
 $D>e
 'tne
 
e
  e
r/   rl  z
    Siglip2 vision encoder with an image classification head on top (a linear layer on top of the pooled final hidden states of
    the patch tokens) e.g. for ImageNet.
    c                      ^  \ rS rSrSrS\SS4U 4S jjr\\      SS\	\
R                     S\	\
R                     S\	\
R                     S	\	\
R                     S
\	\   S\	\   S\4S jj5       5       rSrU =r$ )rr  i  r   rN   r=   Nc                   > [         TU ]  U5        UR                  U l        [        R	                  UR
                  5      nUR                  U l        UR                  S:  a5  [        R                  " UR
                  R                  UR                  5      O[        R                  " 5       U l        U R                  5         g )Nr   )rR   rS   
num_labelsr  r  ra  r  rW   rX   rT   Identityrs  r  )rD   rN   r  ra   s      r0   rS   &Siglip2ForImageClassification.__init__  s      ++ *66v7K7KL(55 OUN_N_bcNcBIIf**668I8IJikititiv 	
 	r/   r  rd   labelsr   r   c                 4   Ub  UOU R                   R                  nUb  UOU R                   R                  nU R                  UUUUUS9nUR                  nUbL  US   R                  UR                  5      n	[        R                  " X-  SS9[        R                  " U	SS9-  nO[        R                  " USS9nU R                  U5      n
SnUGb  UR                  U
R                  5      nU R                   R                  c  U R                  S:X  a  SU R                   l        OoU R                  S:  aN  UR                  [        R                  :X  d  UR                  [        R                  :X  a  SU R                   l        OSU R                   l        U R                   R                  S:X  aI  [!        5       nU R                  S:X  a&  U" U
R#                  5       UR#                  5       5      nOU" X5      nOU R                   R                  S:X  a=  [%        5       nU" U
R'                  S	U R                  5      UR'                  S	5      5      nO,U R                   R                  S:X  a  [)        5       nU" X5      n[+        UU
UR,                  UR.                  S
9$ )a  
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
    Mask to avoid performing attention on padding pixel indices.
spatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):
    Tensor containing the spatial dimensions (height, width) of the input images.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
    Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
    config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
    `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

Examples:

```python
>>> from transformers import AutoImageProcessor, Siglip2ForImageClassification
>>> import torch
>>> from PIL import Image
>>> import requests

>>> torch.manual_seed(3)  # doctest: +IGNORE_RESULT
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)

>>> # note: we are loading a `Siglip2Model` from the hub here,
>>> # so the head will be randomly initialized, hence the predictions will be random if seed is not set above.
>>> image_processor = AutoImageProcessor.from_pretrained("google/siglip2-base-patch16-224")
>>> model = Siglip2ForImageClassification.from_pretrained("google/siglip2-base-patch16-224")

>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> # model predicts one of the two classes
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: LABEL_1
```
N)r   rd   r   r   ).Nr   r  
regressionsingle_label_classificationmulti_label_classificationrg   )r8   logitsr#   r$   )rN   r   r   r  r"   rv   rh   r+   r  r2  rs  problem_typer  ri   longr\   r   squeezer
   r   r	   r   r#   r$   )rD   r   r  rd   r  r   r   r   sequence_output	pool_maskr  r8   loss_fcts                r0   r   %Siglip2ForImageClassification.forward  s2   ^ 2C1N-TXT_T_TqTq$8$D $++JjJj 	 /3.?.?/)/!5 /@ /
 "33  +,Y7::?;Q;QRI#ii(CKeiiXaghNiiO#jja@O 1YYv}}-F{{''/??a'/;DKK,__q(fllejj.HFLL\a\e\eLe/LDKK,/KDKK,{{''<7"9??a'#FNN$4fnn6FGD#F3D))-JJ+-B @&++b/R))-II,./$!//))	
 	
r/   )rs  r  r  )NNNNNN)r&   r'   r(   r)   r  r   rS   r   r   r   r+   r   r   r   r   r   r.   r   r   s   @r0   rr  rr    s     %O}  $  047;59)-,0/3d
u||,d
 'u||4d
 !!1!12	d

 &d
 $D>d
 'tnd
 
d
  d
r/   rr  )rl  r]  r  r  rr  )r   )r   r  g       r   )r  r=  rB  )Kr!  r*  dataclassesr   typingr   r   r   r   r   numpyrc  r+   torch.nnrW   torch.nn.functionalr   ry   r	   r
   r   torch.nn.initr   activationsr   modeling_attn_mask_utilsr   modeling_layersr   modeling_outputsr   r   r   modeling_utilsr   r   utilsr   r   r   r   configuration_siglip2r   r   r   
get_loggerr&   r   r   r2   r6   r  rL   r   floatr   r   r   r   r   r   r  r8  r;  rI  rN  rP  rR  r]  r  r   r  rl  rr  __all__r%   r/   r0   <module>r     s  *   ! 8 8      A A 7 ! B 9 b b F K K X X 
		H	% ?+ ? ?8 ? ? ?8 !
K !
 !
Hbbii bX %II%<<% 
% <<	%
 U\\*% % %.D)ryy D)N -4 -`M
RYY M
`:
ryy :
z%BII %P! J \_$LL$ %$27$BG$SX$
\\$4A2ND<
RYY <
~ ;*_ ;* ;*| 
0
- 0

0
f"299 "> 
8
/ 8

8
v q
) q
 q
h {
$: {
{
|r/   