
    fTh                     *   S r SSKJr  SSKJrJrJrJrJr  SSK	r	SSK
r	SSK	Jr  SSKJr  SSKJr  SS	KJr  SS
KJr  SSKJr  SSKJrJr  SSKJrJr  SSKJr  SSKJrJ r J!r!J"r"  SSK#J$r$  SSK%J&r&J'r'  \"RP                  " \)5      r*\ " S S\5      5       r+\ " S S\5      5       r, " S S\RZ                  5      r. SCS\RZ                  S\	R^                  S\	R^                  S\	R^                  S\\	R^                     S\0S \04S! jjr1 " S" S#\RZ                  5      r2 " S$ S%\RZ                  5      r3 " S& S'\RZ                  5      r4 " S( S)\RZ                  5      r5 " S* S+\RZ                  5      r6S,\	R^                  S-\7S.\	R^                  4S/ jr8 " S0 S1\RZ                  5      r9 " S2 S3\RZ                  5      r:\  " S4 S5\5      5       r;\ " S6S79 " S8 S9\;5      5       r<\ " S:S79 " S; S<\;5      5       r= " S= S>\\5      r>\ " S?S79 " S@ SA\;\5      5       r?/ SBQr@g)DzPyTorch Idefics3 model.    )	dataclass)CallableListOptionalTupleUnionN)nn   )ACT2FN)DynamicCache)GenerationMixin)_prepare_4d_attention_mask)FlashAttentionKwargs)BaseModelOutputModelOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)
LossKwargsauto_docstringcan_return_tuplelogging   )	AutoModel   )Idefics3ConfigIdefics3VisionConfigc                       \ rS rSr% SrSr\\R                     \	S'   Sr
\\\\R                           \	S'   Sr\\\R                        \	S'   Sr\\\R                        \	S'   Sr\\\R                        \	S'   S	rg)
Idefics3BaseModelOutputWithPast(   aP	  
Base class for Idefics3 model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
    last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
        Sequence of hidden-states at the output of the last layer of the model.
        If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
        hidden_size)` is output.
    past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
        Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
        `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
        `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
        encoder_sequence_length, embed_size_per_head)`.
        Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
        `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
        input) to speed up sequential decoding.
    hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
        one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
        Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
    attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
        sequence_length)`.
        Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
        heads.
    image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
        Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
        sequence_length, hidden_size)`.
        image_hidden_states of the model produced by the vision encoder
Nlast_hidden_statepast_key_valueshidden_states
attentionsimage_hidden_states )__name__
__module____qualname____firstlineno____doc__r!   r   torchFloatTensor__annotations__r"   r   r#   r$   r%   __static_attributes__r&       f/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/idefics3/modeling_idefics3.pyr   r   (   s    < 6:x 1 129AEOXeE%*;*;$<=>E8<M8E%"3"345<59Ju00129>B%(9(9":;Br0   r   c                   "   \ rS rSr% SrSr\\R                     \	S'   Sr
\\R                     \	S'   Sr\\\R                        \	S'   Sr\\\R                        \	S'   Sr\\\R                        \	S'   Sr\\\R                        \	S	'   S
rg)Idefics3CausalLMOutputWithPastO   aK  
Base class for Idefics causal language model (or autoregressive) outputs.

Args:
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Language modeling loss (for next-token prediction).
    logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
        Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
    past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
        Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
        `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
        Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
        `past_key_values` input) to speed up sequential decoding.
    hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
        Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
        one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
        Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
    attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
        Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
        sequence_length)`.
        Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
        heads.
    image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
        Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
        sequence_length, hidden_size)`.
        image_hidden_states of the model produced by the vision encoder
Nlosslogitsr"   r#   r$   r%   r&   )r'   r(   r)   r*   r+   r5   r   r,   r-   r.   r6   r"   r   r#   r   r$   r%   r/   r&   r0   r1   r3   r3   O   s    8 )-D(5$$
%,*.FHU&&'.9=OXd5#4#456=8<M8E%"3"345<59Ju00129>B%(9(9":;Br0   r3   c                      ^  \ rS rSrSrS\4U 4S jjrS\R                  S\R                  S\R                  4S jrS	rU =r$ )
Idefics3VisionEmbeddingsv   a4  
This is a modified version of `siglip.modelign_siglip.SiglipVisionEmbeddings` to enable images of variable
resolution.

The modifications are adapted from [Patch n' Pack: NaViT, a Vision Transformer for any Aspect Ratio and Resolution](https://arxiv.org/abs/2307.06304)
which allows treating images in their native aspect ratio and without the need to resize them to the same
fixed size. In particular, we start from the original pre-trained SigLIP model
(which uses images of fixed-size square images) and adapt it by training on images of variable resolutions.
configc                   > [         TU ]  5         UR                  U l        UR                  U l        UR
                  U l        [        R                  " UR                  U R                  U R
                  U R
                  SS9U l	        U R                  U R
                  -  U l
        U R                  S-  U l        U R                  U l        [        R                  " U R                  U R                  5      U l        g )Nvalid)in_channelsout_channelskernel_sizestridepaddingr   )super__init__hidden_size	embed_dim
image_size
patch_sizer	   Conv2dnum_channelspatch_embeddingnum_patches_per_sidenum_patchesnum_positions	Embeddingposition_embeddingselfr:   	__class__s     r1   rC   !Idefics3VisionEmbeddings.__init__   s    ++ ++ ++!yy++?? 
 %)OOt$F!44a7!--"$,,t/A/A4>>"Rr0   pixel_valuespatch_attention_maskreturnc                    UR                   u  p4pVU R                  U5      nUR                  S5      R                  SS5      nXPR                  -  X`R                  -  p[
        R                  " SU R                  -  SSU R                  -  5      n[
        R                  " X9U
-  4SS9n[        U5       H  u  pUS S 2S4   R                  5       nUS   R                  5       n[
        R                  " SSSU-  5      n[
        R                  " SSSU-  5      n[
        R                  " UUSS9n[
        R                  " UUSS9nUS S 2S 4   U R                  -  U-   R                  5       nUX   UR                  S	5      R                  5       '   M     UR                  U R                  R                   R"                  5      nXR                  U5      -   nU$ )
Nr   r         ?r   )size
fill_valueg!?T)right)shaperJ   flatten	transposerG   r,   arangerK   full	enumeratesum	bucketizeviewcputorO   weightdevice)rQ   rT   rU   
batch_size_max_im_hmax_im_wpatch_embeds
embeddingsmax_nb_patches_hmax_nb_patches_w
boundariesposition_ids	batch_idxp_attn_masknb_patches_hnb_patches_wfractional_coords_hfractional_coords_wbucket_coords_hbucket_coords_wpos_idss                         r1   forward Idefics3VisionEmbeddings.forward   s   ,8,>,>)
x++L9!))!,66q!<
-5-H(VeVeJe*\\!d&?&?"?a$JcJcFcd
zz
GW4W'Xefg&/0D&E"I&q!t,002L&q>--/L"',,q(A<L"M"',,q(A<L"M#oo.A:UYZO#oo.A:UYZO&q$w/$2K2KKo]ffhGBIL#K$4$4R$8$<$<$>? 'F $t'>'>'E'E'L'LM"9"9,"GG
r0   )rE   rF   rL   rK   rM   rJ   rG   rO   )r'   r(   r)   r*   r+   r   rC   r,   r-   
BoolTensorTensorr}   r/   __classcell__rR   s   @r1   r8   r8   v   sI    S3 S&E$5$5 UM]M] bgbnbn  r0   r8   modulequerykeyvalueattention_maskscalingdropoutc                    [         R                  " XR                  SS5      5      U-  nUb  X-   n[        R                  R                  US[         R                  S9R                  UR                  5      n[        R                  R                  XU R                  S9n[         R                  " X5      n	U	R                  SS5      R                  5       n	X4$ )Nr\   )dimdtype)ptrainingr   r   )r,   matmulr_   r	   
functionalsoftmaxfloat32rg   r   r   r   
contiguous)
r   r   r   r   r   r   r   kwargsattn_weightsattn_outputs
             r1   eager_attention_forwardr      s     <<}}R'<=GL!#4==((2U]](SVVW\WbWbcL==((6??([L,,|3K''1-88:K$$r0   c                      ^  \ rS rSrSrU 4S jr  S
S\R                  S\\R                     S\\	   S\
\R                  \\R                     4   4S jjrS	rU =r$ )Idefics3VisionAttention   z=Multi-headed attention from 'Attention Is All You Need' paperc                    > [         TU ]  5         Xl        UR                  U l        UR
                  U l        U R                  U R                  -  U l        U R                  U R                  -  U R                  :w  a&  [        SU R                   SU R                   S35      eU R                  S-  U l	        UR                  U l        [        R                  " U R                  U R                  5      U l        [        R                  " U R                  U R                  5      U l        [        R                  " U R                  U R                  5      U l        [        R                  " U R                  U R                  5      U l        SU l        g )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      F)rB   rC   r:   rD   rE   num_attention_heads	num_headshead_dim
ValueErrorscaleattention_dropoutr   r	   Lineark_projv_projq_projout_proj	is_causalrP   s     r1   rC    Idefics3VisionAttention.__init__   s   ++33$..8==4>>)T^^;MdnnM] ^NN#2'  ]]D(
//ii?ii?ii?		$..$..A r0   r#   r   output_attentionsrV   c                    UR                   u  pEnU R                  U5      nU R                  U5      nU R                  U5      n	UR	                  XEU R
                  U R                  5      R                  SS5      nUR	                  XEU R
                  U R                  5      R                  SS5      nU	R	                  XEU R
                  U R                  5      R                  SS5      n	[        n
U R                  R                  S:w  aT  U R                  R                  S:X  a  U(       a  [        R                  S5        O[        U R                  R                     n
U
" U UUU	UU R                  U R                  U R                   (       d  SOU R"                  S9u  pUR%                  XEU5      R'                  5       nU R)                  U5      nU(       d  SnX4$ )	z#Input shape: Batch x Time x Channelr   r   eagersdpaz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.        )r   r   r   N)r]   r   r   r   re   r   r   r_   r   r:   _attn_implementationloggerwarning_oncer   r   r   r   r   reshaper   r   )rQ   r#   r   r   rj   
seq_lengthrE   querieskeysvaluesattention_interfacer   r   s                r1   r}   Idefics3VisionAttention.forward   s    -:,?,?)
	++m,{{=)]+,,zt~~t}}U__`acdeyyOYYZ[]^_ZT^^T]]S]]^_abc(?;;++w6{{//69>O##L
 '>dkk>^>^&_#$7nnJJ#}}C$,,	%
! "))*)LWWYmmK0 L((r0   )r:   r   rE   r   r   r   r   r   r   r   r   NF)r'   r(   r)   r*   r+   rC   r,   r   r   boolr   r}   r/   r   r   s   @r1   r   r      sk    G2 26,1	-)||-) !.-) $D>	-)
 
u||Xell33	4-) -)r0   r   c                   b   ^  \ rS rSrU 4S jrS\R                  S\R                  4S jrSrU =r	$ )Idefics3VisionMLPi  c                   > [         TU ]  5         Xl        [        UR                     U l        [        R                  " UR                  UR                  5      U l
        [        R                  " UR                  UR                  5      U l        g N)rB   rC   r:   r   
hidden_actactivation_fnr	   r   rD   intermediate_sizefc1fc2rP   s     r1   rC   Idefics3VisionMLP.__init__  sb    #F$5$5699V//1I1IJ99V55v7I7IJr0   r#   rV   c                 l    U R                  U5      nU R                  U5      nU R                  U5      nU$ r   )r   r   r   )rQ   r#   s     r1   r}   Idefics3VisionMLP.forward  s4    /**=9/r0   )r   r:   r   r   )
r'   r(   r)   r*   rC   r,   r   r}   r/   r   r   s   @r1   r   r     s)    KU\\ ell  r0   r   c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )Idefics3SimpleMLPi#  c                    > [         TU ]  5         UR                  R                  UR                  S-  -  nUR
                  R                  n[        R                  " X#SS9U l        g )Nr   Fbias)	rB   rC   vision_configrD   scale_factortext_configr	   r   proj)rQ   r:   
input_sizeoutput_sizerR   s       r1   rC   Idefics3SimpleMLP.__init__$  sR    ))559L9La9OP
((44IIjEB	r0   c                 $    U R                  U5      $ r   r   )rQ   xs     r1   r}   Idefics3SimpleMLP.forward*  s    yy|r0   r   )r'   r(   r)   r*   rC   r}   r/   r   r   s   @r1   r   r   #  s    C r0   r   c            
          ^  \ rS rSrS\4U 4S jjr S
S\R                  S\R                  S\\	   S\
\R                     4S jjrS	rU =r$ )Idefics3EncoderLayeri/  r:   c                 <  > [         TU ]  5         UR                  U l        [	        U5      U l        [        R                  " U R                  UR                  S9U l	        [        U5      U l        [        R                  " U R                  UR                  S9U l        g )Neps)rB   rC   rD   rE   r   	self_attnr	   	LayerNormlayer_norm_epslayer_norm1r   mlplayer_norm2rP   s     r1   rC   Idefics3EncoderLayer.__init__0  sm    ++08<<F<Q<QR$V,<<F<Q<QRr0   r#   r   r   rV   c                     UnU R                  U5      nU R                  UUUS9u  pXA-   nUnU R                  U5      nU R                  U5      nXA-   nU4nU(       a  Xe4-  nU$ )a  
Args:
    hidden_states (`torch.FloatTensor`):
        Input to the layer of shape `(batch, seq_len, embed_dim)`.
    attention_mask (`torch.FloatTensor`):
        Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.
    output_attentions (`bool`, *optional*, defaults to `False`):
        Whether or not to return the attentions tensors of all attention layers. See `attentions` under
        returned tensors for more detail.
)r#   r   r   )r   r   r   r   )rQ   r#   r   r   residualr   outputss          r1   r}   Idefics3EncoderLayer.forward9  s      !((7&*nn')/ '5 '
#
 !0 ((7/ 0 "&Gr0   )rE   r   r   r   r   )F)r'   r(   r)   r*   r   rC   r,   r   r   r   r   r-   r}   r/   r   r   s   @r1   r   r   /  s^    S3 S -2	$||$ $ $D>	$
 
u  	!$ $r0   r   c                      ^  \ rS rSrSrS\4U 4S jjr    SS\\R                     S\\
   S\\
   S\\
   S	\\\4   4
S
 jjrSrU =r$ )Idefics3Encoderia  z
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`Idefics3EncoderLayer`].

Args:
    config: Idefics3Config
r:   c                    > [         TU ]  5         Xl        [        R                  " [        UR                  5       Vs/ s H  n[        U5      PM     sn5      U l        SU l	        g s  snf r   )
rB   rC   r:   r	   
ModuleListrangenum_hidden_layersr   layersgradient_checkpointing)rQ   r:   rk   rR   s      r1   rC   Idefics3Encoder.__init__j  sT    mm5QWQiQiKj$kKja%9&%AKj$kl&+# %ls   A&r   r   output_hidden_statesreturn_dictrV   c                 2   Ub  UOU R                   R                  nUb  UOU R                   R                  nUb  UOU R                   R                  nU(       a  SOSnU(       a  SOSnUnU R                   Hn  n	U(       a  Xh4-   nU R
                  (       a0  U R                  (       a  U R                  U	R                  UUU5      n
OU	" UUUS9n
U
S   nU(       d  Mf  XzS   4-   nMp     U(       a  Xh4-   nU(       d  [        S XU4 5       5      $ [        XUS9$ )a  
Args:
    inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
        Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
        This is useful if you want more control over how to convert `input_ids` indices into associated vectors
        than the model's internal embedding lookup matrix.
    attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
        Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

        - 1 for tokens that are **not masked**,
        - 0 for tokens that are **masked**.

        [What are attention masks?](../glossary#attention-mask)
    output_attentions (`bool`, *optional*):
        Whether or not to return the attentions tensors of all attention layers. See `attentions` under
        returned tensors for more detail.
    output_hidden_states (`bool`, *optional*):
        Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
        for more detail.
    return_dict (`bool`, *optional*):
        Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Nr&   )r   r   r   c              3   .   #    U  H  oc  M  Uv   M     g 7fr   r&   ).0vs     r1   	<genexpr>*Idefics3Encoder.forward.<locals>.<genexpr>  s     e$Sq$Ss   	r!   r#   r$   )r:   r   r   use_return_dictr   r   r   _gradient_checkpointing_func__call__tupler   )rQ   inputs_embedsr   r   r   r   encoder_statesall_attentionsr#   encoder_layerlayer_outputss              r1   r}   Idefics3Encoder.forwardq  s)   < 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]30d%![[M#!/2B!B**t}} $ A A!**!"%	! !.!"&7! *!,M  !/3C2E!E) ),  +.>>Ne]N$Seee+Vd
 	
r0   )r:   r   r   NNNN)r'   r(   r)   r*   r+   r   rC   r   r,   r   r   r   r   r   r}   r/   r   r   s   @r1   r   r   a  s    ,~ , 26,0/3&*E
 !.E
 $D>	E

 'tnE
 d^E
 
uo%	&E
 E
r0   r   r#   n_reprV   c                     U R                   u  p#pEUS:X  a  U $ U SS2SS2SSS2SS24   R                  X#XU5      n U R                  X#U-  XE5      $ )z
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
r   N)r]   expandr   )r#   r  batchnum_key_value_headsslenr   s         r1   	repeat_kvr
    s_    
 2?1D1D.Ez!!Qa"23::5W\dlmM  e(CTTTr0   c                   8   ^  \ rS rSrSU 4S jjrS rS rSrU =r$ )Idefics3RMSNormi  c                    > [         TU ]  5         [        R                  " [        R
                  " U5      5      U l        X l        g)z.
Idefics3RMSNorm is equivalent to T5LayerNorm
N)rB   rC   r	   	Parameterr,   onesrh   variance_epsilon)rQ   rD   r   rR   s      r1   rC   Idefics3RMSNorm.__init__  s/     	ll5::k#:; #r0   c                    UR                   nUR                  [        R                  5      nUR	                  S5      R                  SSS9nU[        R                  " X0R                  -   5      -  nU R                  UR                  U5      -  $ )Nr   r\   T)keepdim)	r   rg   r,   r   powmeanrsqrtr  rh   )rQ   r#   input_dtypevariances       r1   r}   Idefics3RMSNorm.forward  sw    #))%((7 $$Q',,R,>%H?T?T4T(UU{{]--k:::r0   c                 ^    [        U R                  R                  5       SU R                   3$ )Nz, eps=)r   rh   r]   r  rQ   s    r1   
extra_reprIdefics3RMSNorm.extra_repr  s*    ))*+6$2G2G1HIIr0   )r  rh   )gư>)	r'   r(   r)   r*   rC   r}   r  r/   r   r   s   @r1   r  r    s    $;J Jr0   r  c                   8   ^  \ rS rSrU 4S jrSS jrS rSrU =r$ )Idefics3Connectori  c                 d   > [         TU ]  5         UR                  U l        [        U5      U l        g r   )rB   rC   r   r   modality_projectionrP   s     r1   rC   Idefics3Connector.__init__  s)    "//#4V#< r0   c                    UR                  5       u  p4n[        US-  5      =pgUR                  X6Xu5      nUR                  X6[        Xr-  5      XR-  5      nUR                  SSSS5      nUR	                  U[        Xr-  5      [        Xb-  5      XRS-  -  5      nUR                  SSSS5      nUR	                  U[        XBS-  -  5      XRS-  -  5      nU$ )Ng      ?r   r   r   r
   )rY   intre   permuter   )rQ   r   r   bszseqrE   heightwidths           r1   pixel_shuffleIdefics3Connector.pixel_shuffle  s    ffh)S#X&FF31FF3E$8 99;STIIaAq!IIc3u34c&:O6PR[mn_nRopIIaAq!IIc3sAo67TUo9VWr0   c                 ^    U R                  XR                  5      nU R                  U5      nU$ r   )r*  r   r!  )rQ   r%   s     r1   r}   Idefics3Connector.forward  s2    "001DFWFWX"667JK""r0   )r!  r   )r   )	r'   r(   r)   r*   rC   r*  r}   r/   r   r   s   @r1   r  r    s    =
	# #r0   r  c                   F    \ rS rSr\rSrSrSS/rSr	Sr
SrSrSrSrS rSrg	)
Idefics3PreTrainedModeli  modelTr   Idefics3DecoderLayerr"   c                    [        U R                  SU R                  R                  5       R                  5      n[	        U[
        R                  [
        R                  45      (       aW  UR                  R                  R                  SUS9  UR                  b%  UR                  R                  R                  5         g g [	        U[
        R                  5      (       ad  UR                  R                  R                  SUS9  UR                  b2  UR                  R                  UR                     R                  5         g g [	        U[
        R                  5      (       aJ  UR                  R                  R!                  S5        UR                  R                  R                  5         g [	        U["        5      (       a&  UR                  R                  R!                  S5        g g )Ninitializer_ranger   )r  stdrX   )getattrr:   get_text_configr3  
isinstancer	   r   rH   rh   datanormal_r   zero_rN   padding_idxr   fill_r  )rQ   r   r4  s      r1   _init_weights%Idefics3PreTrainedModel._init_weights  sM   dkk#68S8S8U8g8ghfryy"))455MM&&CS&9{{&  &&( '--MM&&CS&9!!-""6#5#56<<> .--MM$$S)KK""$00MM$$S) 1r0   r&   N)r'   r(   r)   r*   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_flash_attn_2_supports_sdpa_supports_flex_attn_supports_cache_class_supports_attention_backendr=  r/   r&   r0   r1   r/  r/    sG    !L&*#24JK"3!N "&*r0   r/  zO
    The Idefics3 Vision Transformer Model outputting raw image embedding.
    custom_introc                      ^  \ rS rSr\rSrSrSrS\4U 4S jjr	S r
S r    SS\\R                     S\\   S	\\   S
\\   S\\\4   4
S jjrSrU =r$ )Idefics3VisionTransformeri  Tr:   c                   > [         TU ]  U5        UR                  n[        U5      U l        [        U5      U l        UR                  U l        [        R                  " X!R                  S9U l        UR                  S:H  U l        g )Nr   flash_attention_2)rB   rC   rD   r8   ro   r   encoderrG   r	   r   r   post_layernormr   _use_flash_attention_2)rQ   r:   rE   rR   s      r1   rC   "Idefics3VisionTransformer.__init__  sk     &&	26:&v. ++ ll9:O:OP&,&A&AEX&X#r0   c                     U R                   $ r   ro   r  s    r1   get_input_embeddings.Idefics3VisionTransformer.get_input_embeddings'  s    r0   c                     Xl         g r   rT  rQ   r   s     r1   set_input_embeddings.Idefics3VisionTransformer.set_input_embeddings+  s    r0   rU   r   r   r   rV   c                 *   Ub  UOU R                   R                  nUb  UOU R                   R                  nUb  UOU R                   R                  nUR	                  S5      nUcq  U R
                  n[        R                  " UUR	                  S5      U-  UR	                  S5      U-  45      nUR                  [        R                  UR                  S9nU R                  XS9nUR                  US5      n[        R                  " U) 5      (       d  S nO&U R                  (       d  [        X(R                   5      nU R#                  UUUUUS9n	U	S   n
U R%                  U
5      n
U(       d	  U
4U	SS  -   $ ['        U
U	R(                  U	R*                  S	9$ )
Nr   r   r
   r   ri   rT   rU   r\   )r   r   r   r   r   r   r   )r:   r   r   r   rY   rG   r,   r  rg   r   ri   ro   re   anyrQ  r   r   rO  rP  r   r#   r$   )rQ   rT   rU   r   r   r   rj   rG   r#   encoder_outputsr!   s              r1   r}   !Idefics3VisionTransformer.forward.  s    2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]!&&q)
'J#(:: %%a(J6 %%a(J6$  $8#:#:T`TgTg#:#h \m388RH yy..//#' ,,#=>RTgTg#h ,,'//!5# ' 
 ,A. //0AB%'/!"*===/)77&11
 	
r0   )rQ  ro   rO  rG   rP  r  )r'   r(   r)   r*   r   r?  rE  _supports_flash_attention_2rF  rC   rU  rY  r   r,   r   r   r   r   r   r}   r/   r   r   s   @r1   rL  rL    s     (LN"&Y3 Y  <@,0/3&*7
 'u'7'787
 $D>	7

 'tn7
 d^7
 
uo%	&7
 7
r0   rL  zZ
    Idefics3 model consisting of a SIGLIP vision encoder and Llama3 language decoder
    c            #       |  ^  \ rS rSrS\4U 4S jjrS rS rS rS r	S\
R                  S	\\
R                     S
\\
R                     4S jrSS\
R                  S\
R                  4S jjr\\" SS9             SS\\
R                     S\\
R                     S\\
R                     S\\\
R                        S	\\
R                     S\\
R                     S\\
R(                     S
\\
R                     S\\   S\\   S\\   S\\
R                     S\\   S\\   S\\\4   4S jj5       5       rSrU =r$ )Idefics3Modelih  r:   c                   > [         TU ]  U5        U R                  R                  R                  U l        U R                  R                  R                  U l        [        R                  UR                  5      U l
        [        U5      U l        [        R                  " UR                  5      U l        [!        UR                  R"                  UR                  R$                  -  S-  UR&                  S-  -  5      U l        U R                  R*                  U l        UR                  R,                  S:H  U l        U R1                  5         g )Nr   rN  )rB   rC   r:   r   pad_token_idr;  
vocab_sizerL  _from_configr   vision_modelr  	connectorr   from_config
text_modelr$  rF   rG   r   image_seq_lenimage_token_idr   rQ  	post_initrP   s     r1   rC   Idefics3Model.__init__n  s     ;;22??++11<<5BB6CWCWX*62#//0B0BC ""--1E1E1P1PPUVV[a[n[npq[qr
 #kk88&,&8&8&M&MQd&d#r0   c                    ^ U4S jmS nU R                  5       R                  U5      U l        T" U R                  5      R                  U5      U l        g)a  
Enables the gradients for the input embeddings.

This is useful for lora when using gradient checkpointing.
c.f. https://github.com/huggingface/peft/issues/1402#issuecomment-1913675032

Override to set output.requires_grad = True for both the decoder's and vision model's embeddings.
c                    > [        [        U R                  5       5      5      S:X  a  U $ T" [        U R                  5       5      S   5      $ )Nr   )lenlistchildren)r   get_lowest_modules    r1   ru  CIdefics3Model.enable_input_require_grads.<locals>.get_lowest_module  s?    4)*+q0 )foo.?)@)CDDr0   c                 &    UR                  S5        g NTrequires_grad_r   inputoutputs      r1   make_inputs_require_gradsKIdefics3Model.enable_input_require_grads.<locals>.make_inputs_require_grads      !!$'r0   N)rU  register_forward_hook_text_require_grads_hookrh  _vision_require_grads_hook)rQ   r~  ru  s     @r1   enable_input_require_grads(Idefics3Model.enable_input_require_grads  sN    	E	( )-(A(A(C(Y(YZs(t%*;D<M<M*N*d*d%+
'r0   c                 l    U R                   R                  5         U R                  R                  5         g r   r  remover  r  s    r1   disable_input_require_grads)Idefics3Model.disable_input_require_grads  &    %%,,.''..0r0   c                 6    U R                   R                  5       $ r   )rk  rU  r  s    r1   rU  "Idefics3Model.get_input_embeddings  s    3355r0   c                 :    U R                   R                  U5        g r   )rk  rY  rX  s     r1   rY  "Idefics3Model.set_input_embeddings  s    ,,U3r0   	input_idsr   r%   c                     XR                   :H  nUR                  5       nUR                  UR                  UR                  5      nX5U'   U$ )a2  
This method aims at merging the token embeddings with the image hidden states into one single sequence of vectors that are fed to the transformer LM.
The merging happens as follows:
- The text token sequence is: `tok_1 tok_2 tok_3 <fake_token_around_image> <image> <image> ... <image> <fake_token_around_image> tok_4`.
- We get the image hidden states for the image through the vision encoder and that hidden state, after a pixel shuffle operation, is then projected into the text embedding space.
We thus have a sequence of image hidden states of size (1, image_seq_len, hidden_dim), where 1 is for batch_size of 1 image and hidden_dim is the hidden_dim of the LM transformer.
- The merging happens so that we obtain the following sequence: `vector_tok_1 vector_tok_2 vector_tok_3 vector_fake_tok_around_image {sequence of image_seq_len image hidden states} vector_fake_toke_around_image vector_tok_4`. That sequence is fed to the LM.
- To fit the format of that sequence, `input_ids`, `input_embeds`, `attention_mask` are all 3 adapted to insert the image hidden states.
)rm  clonerg   ri   r   )rQ   r  r   r%   special_image_token_masknew_inputs_embedss         r1   inputs_mergerIdefics3Model.inputs_merger  sP     $-0C0C#C )//1144]5I5I=K^K^_6I23  r0   rT   pixel_attention_maskc                    UR                   u  p4pVnUR                  U R                  S9nUR                  " X4-  /UR                   SS Q76 nUR                   SS R	                  5       nUS:H  R                  SS9U:g  n	X   R                  5       nUc_  [        R                  " UR                  S5      UR                  S5      UR                  S	5      4[        R                  UR                  S
9nO4UR                  " X4-  /UR                   SS Q76 nX)   R                  5       nU R                  R                  R                  n
UR                  SXS9nUR                  SXS9nUR                  SS9S:  R                  5       nU R!                  XS9nUR"                    U R%                  UR"                  5      nUR                  SUR                   S   5      nU$ )az  
Encodes images into continuous embeddings that can be forwarded to the language model.

Args:
    pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
        The tensors corresponding to the input images.
    pixel_attention_mask (`torch.LongTensor`, *optional*):
        The attention mask indicating padded regions in the image.
)r   r   Nr   r   )r\   r   )r   r   r
   )rY   r   ri   )	dimensionrY   step)r\   r   r]  r\   )r]   rg   r   re   numelrc   r   r,   r  rY   r   ri   r:   r   rG   unfoldrh  r!   ri  )rQ   rT   r  rj   
num_imagesrI   r(  r)  nb_values_per_imagereal_images_indsrG   patches_subgridrU   r%   s                 r1   get_image_features Idefics3Model.get_image_features  s    ?K>P>P;
e#TZZ8#(()@Z<CUCUVWVXCYZ +004::<(C/444FJ]]#5@@B  '#(::"''*L,=,=a,@,BSBSTUBVWjj#**$  $8#<#<Z=T#vWkWqWqrsrtWu#v #7#I#T#T#V [[..99
.55
5d)001:0_ / 3 3 3 AA EKKM #//\/u-- #nn-@-R-RS166r;N;T;TUW;XY""r0   a  
        Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to
        the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where
        max_num_images is the maximum number of images among the batch_size samples in the batch.
        Padding images are not needed beyond padding the pixel_values at the entrance of the model.
        For efficiency, we only pass through the vision_model's forward the real images by
        discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where
        image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.
        rI  r   rs   r"   	use_cacher   r   cache_positionr   r   rV   c                 :   U
b  U
OU R                   R                  n
Ub  UOU R                   R                  nU	b  U	OU R                   R                  n	Ub  UOU R                   R                  nU R
                  (       a9  U R                  R                  (       a  U	(       a  [        R                  S5        Sn	Ub  UR                  u  nnOUb  UR                  u  nnnO[        S5      eSnU	(       a  Uc
  [        5       nUR                  5       nUb  Uc  US:X  a  [        S5      eUc9  U R                  R                  5       " U5      R                  U R                   5      nUb  Ub  [        S5      eUb  U R#                  Xg5      nO'Ub$  UR                  U R$                  UR                   S9nUS:X  a  Ub  Ub  U R'                  UUUS9nU R                  " SUUUUU	U
UUS	S
.	UD6n[)        UR*                  UR,                  UR.                  UR0                  US9$ )aT  
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
    Mask to avoid performing attention on padding pixel indices.
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
    The hidden states of the image encoder after modality projection.
zZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fz5You have to specify either input_ids or inputs_embedsr   zWWhen first calling the model, if input_embeds are passed, input_ids should not be None.zMYou cannot specify both pixel_values and image_hidden_states at the same timer\  )r  r   r%   T)	r   r   rs   r"   r  r   r   r  r   )r!   r"   r#   r$   r%   r&   )r:   r   r   r  r   r   rk  r   r   r   r]   r   r   get_seq_lengthrU  rg   ri   r  r   r  r   r!   r"   r#   r$   )rQ   r  r   rs   r"   r   rT   r  r%   r  r   r   r  r   r   rj   r   rk   past_seen_tokensr   s                       r1   r}   Idefics3Model.forward  s?   F 2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	%0%<k$++B]B]==T__CC	l I  %.__"J
&(5(;(;%J
ATUU&"...==?$):?OST?Tvww  OO@@B9MPPQUQ\Q\]M #(;(Glmm%"&"9"9,"] ,"5"8"8tzzR[RbRb"8"cq ]%>CVCb !..#+$7 / M // 
')%+/!5)
 
 /%77#33!//)) 3
 	
r0   )
r  rQ  r  ri  rl  rm  r;  rk  rh  rf  r   )NNNNNNNNNNNNN)r'   r(   r)   r*   r   rC   r  r  rU  rY  r,   
LongTensorr   r   r  r-   r  r   r   r   r   r   r   r   r   r   r   r}   r/   r   r   s   @r1   rc  rc  h  s   ~ &
61
64!##!  -! &ell3	!.+#u/@/@ +#X]XhXh +#Z 
 151537=A5948;?;?$(,0/359&*\
E,,-\
 !.\
 u//0	\

 "$u'8'8"9:\
   1 12\
 u001\
 'u'7'78\
 &e&7&78\
 D>\
 $D>\
 'tn\
 !!1!12\
 d^\
 -.\
  
u55	6!\

 \
r0   rc  c                       \ rS rSrSrg)KwargsForCausalLMiW  r&   N)r'   r(   r)   r*   r/   r&   r0   r1   r  r  W  s    3r0   r  z
    The Idefics3 Model with a language modeling head. It is made up a SigLIP vision encoder, with a language modeling head on top.
    c            '       R  ^  \ rS rSrS/rU 4S jrS rS rS rS r	S r
S	 r\\               SS
\\R                      S\\R"                     S\\R                      S\\\R&                        S\\R&                     S\\R&                     S\\R(                     S\\R&                     S\\R                      S\\   S\\   S\\   S\\R                      S\\   S\\\R"                  4   S\\   S\\\4   4"S jj5       5       r        S U 4S jjrU 4S jrSrU =r $ )! Idefics3ForConditionalGenerationiZ  zlm_head.weightc                 V  > [         TU ]  U5        [        U5      U l        U R                  R
                  U l        [        R                  " UR                  R                  UR                  R                  SS9U l        UR                  R                  U l
        U R                  5         g )NFr   )rB   rC   rc  r0  r:   rm  r	   r   r   rD   rf  lm_headrn  rP   s     r1   rC   )Idefics3ForConditionalGeneration.__init__c  sz     "6*
"kk88yy!3!3!?!?ASASA^A^ejk ,,77 	r0   c                     S nU R                  5       R                  U5      U l        U R                  R                  R                  5       R                  U5      U l        g)z
Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping
the model weights fixed.
c                 &    UR                  S5        g rx  ry  r{  s      r1   r~  ^Idefics3ForConditionalGeneration.enable_input_require_grads.<locals>.make_inputs_require_gradsu  r  r0   N)rU  r  r  r0  rh  r  )rQ   r~  s     r1   r  ;Idefics3ForConditionalGeneration.enable_input_require_gradso  sO    	( )-(A(A(C(Y(YZs(t%*.***A*A*V*V*X*n*n%+
'r0   c                 l    U R                   R                  5         U R                  R                  5         g r   r  r  s    r1   r  <Idefics3ForConditionalGeneration.disable_input_require_grads~  r  r0   c                 J    U R                   R                  R                  5       $ r   )r0  rk  rU  r  s    r1   rU  5Idefics3ForConditionalGeneration.get_input_embeddings  s    zz$$99;;r0   c                 N    U R                   R                  R                  U5        g r   )r0  rk  rY  rX  s     r1   rY  5Idefics3ForConditionalGeneration.set_input_embeddings  s    

2259r0   c                     U R                   $ r   r  r  s    r1   get_output_embeddings6Idefics3ForConditionalGeneration.get_output_embeddings  s    ||r0   c                     Xl         g r   r  )rQ   new_embeddingss     r1   set_output_embeddings6Idefics3ForConditionalGeneration.set_output_embeddings  s    %r0   r  r   rs   r"   r   rT   r  r%   labelsr  r   r   r  r   logits_to_keepr   rV   c                 H   Ub  UOU R                   R                  nUb  UOU R                   R                  nUb  UOU R                   R                  nU R                  " SUUUUUUUUU
UUUSS.UD6nUS   n[        U[        5      (       a  [        U* S5      OUnU R                  USS2USS24   5      nSnU	b3  U R                  " SUXR                   R                  R                  S.UD6n[        UUUR                  UR                  UR                  UR                   S9$ )an  
pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):
    Mask to avoid performing attention on padding pixel indices.
image_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
    The hidden states of the image encoder after modality projection.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
    config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `Idefics3ForConditionalGeneration`).
    Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only
    computed for the tokens with labels in `[0, ..., config.vocab_size]`.

Example:

```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from io import BytesIO

>>> from transformers import AutoProcessor, AutoModelForVision2Seq
>>> from transformers.image_utils import load_image

>>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
>>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
>>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
>>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")

>>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3")
>>> model = AutoModelForVision2Seq.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3", torch_dtype=torch.bfloat16, device_map="auto")

>>> # Create inputs
>>> messages = [
...     {
...         "role": "user",
...         "content": [
...             {"type": "image"},
...             {"type": "text", "text": "In this image, we can see the city of New York, and more specifically the Statue of Liberty."},
...             {"type": "image"},
...             {"type": "text", "text": "What can we see in this image?"},
...         ]
...     },
...     {
...         "role": "user",
...         "content": [
...             {"type": "image"},
...             {"type": "text", "text": "In which city is that bridge located?"},
...         ]
...     }
... ]

>>> prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages]
>>> images = [[image1, image2], [image3]]
>>> inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to(model.device)

>>> # Generate
>>> generated_ids = model.generate(**inputs, max_new_tokens=256)
>>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)

>>> print(generated_texts[0])
Assistant: There are buildings, trees, lights, and water visible in this image.

>>> print(generated_texts[1])
Assistant: The bridge is in San Francisco.
```NT)r  r   rs   r"   r   rT   r  r%   r  r   r   r  r   r   )r6   r  rf  )r5   r6   r"   r#   r$   r%   r&   )r:   r   r   r   r0  r7  r$  slicer  loss_functionr   rf  r3   r"   r#   r$   r%   )rQ   r  r   rs   r"   r   rT   r  r%   r  r  r   r   r  r   r  r   r   r#   slice_indicesr6   r5   s                         r1   r}   (Idefics3ForConditionalGeneration.forward  sR   j 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B] ** 
)%+'%!5 3/!5)
 
"  
8B>SV8W8W~ot4]kmA}a,?@A%% f9P9P9[9[_eD .#33!//)) ' ; ;
 	
r0   c
                 v   > [         TU ]  " U4UUUUUUUU	S.U
D6nUb  US   S:X  a  XS'   Ub
  S US'   S US'   U$ )N)r"   r   r   r  rT   r  r%   r  r   r  rT   r  )rB   prepare_inputs_for_generation)rQ   r  r"   r   r   r  rT   r  r%   r  r   model_inputsrR   s               r1   r  >Idefics3ForConditionalGeneration.prepare_inputs_for_generation  s{      w<
+)')%!5 3)
 
 $):a)?(1%*+/L(37L/0r0   c                 N   > [         TU ]  " SUUUS.UD6nUR                  US'   U$ )N)r   model_kwargsis_encoder_decoderr%   r&   )rB   #_update_model_kwargs_for_generationr%   )rQ   r   r  r  r   rR   s        r1   r  DIdefics3ForConditionalGeneration._update_model_kwargs_for_generation=  sC    wB 
%1
 	
 /6.I.I*+r0   )r  r  rm  r  r0  rf  )NNNNNNNNNNNNNNr   )NNNNNNNN)!r'   r(   r)   r*   _tied_weights_keysrC   r  r  rU  rY  r  r  r   r   r   r,   r  r   r   r-   r   r   r   r$  r   r  r   r3   r}   r  r  r/   r   r   s   @r1   r  r  Z  s    ++	
1
<:&  151537=A5948;?;?-1$(,0/359&*34!}
E,,-}
 !.}
 u//0	}

 "$u'8'8"9:}
   1 12}
 u001}
 'u'7'78}
 &e&7&78}
 ))*}
 D>}
 $D>}
 'tn}
 !!1!12}
 d^}
  c5<</0!}
" *+#}
$ 
u44	5%}
  }
F ! &R	 	r0   r  )r  r/  rc  rL  )r   )Ar+   dataclassesr   typingr   r   r   r   r   r,   torch.utils.checkpointr	   activationsr   cache_utilsr   
generationr   modeling_attn_mask_utilsr   modeling_flash_attention_utilsr   modeling_outputsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   autor   configuration_idefics3r   r   
get_loggerr'   r   r   r3   Moduler8   r   floatr   r   r   r   r   r   r$  r
  r  r  r/  rL  rc  r  r  __all__r&   r0   r1   <module>r     sU    ! 9 9    ! ' ) B B < F & J J  H 
		H	% #Ck #C #CL "C[ "C "CL7ryy 7D %II%<<% 
% <<	%
 U\\*% % %0G)bii G)V		 		 .299 .dU
bii U
r	UU\\ 	U# 	U%,, 	UJbii J(#		 #. *o * *< 
O
 7 O

O
d 
g
+ g

g
T ?,j > 
g'> g
gT xr0   