
    eThj                    ^   S SK JrJrJrJrJrJr  S SKrSSK	J
r
  SSKJr  SSKJrJrJrJr  SSKJrJrJrJrJr  SSKJrJrJrJrJrJrJrJ r J!r!J"r"  SS	K#J$r$  SS
K%J&r&  SSK'J(r(J)r)J*r*  SSK+J,r,J-r-  SSK.J/r/J0r0J1r1J2r2J3r3  SSK4J5r5  SSK6J7r7J8r8J9r9  SSK:J;r;  SSK<J=r=J>r>J?r?J@r@JArAJBrB  SSKCJDrDJErEJFrFJGrG  SSKHJIrI  \3R                  " \K5      rL\5" 5       (       a
  S SKMrMS SKMJNrN  S rO " S S\;5      rP " S S\5      rQ " S S\B5      rR " S S\NR                  5      rT " S S \NR                  5      rU " S! S"\NR                  5      rV " S# S$\5      rW " S% S&\(S'S(9rX " S) S*\)5      rY " S+ S,\?5      rZ " S- S.\NR                  5      r[ " S/ S0\NR                  5      r\ " S1 S2\NR                  5      r] " S3 S4\=5      r^\1 " S5 S6\&5      5       r_ " S7 S8\A5      r` " S9 S:\@5      ra " S; S<\$\/5      rb " S= S>\_\>5      rc " S? S@\D5      rd " SA SB\G5      re " SC SD\F5      rf\1" SESF9 " SG SH\E5      5       rg/ SIQrhg)J    )DictIterableListOptionalTupleUnionN   )ACT2FN)PretrainedConfig)BaseImageProcessorBatchFeatureget_patch_output_sizeselect_best_resolution)PaddingModeconvert_to_rgbpadresizeto_channel_dimension_format)
ChannelDimension
ImageInputPILImageResamplingget_image_sizeinfer_channel_dimension_formatis_scaled_imagemake_flat_list_of_imagesto_numpy_arrayvalid_imagesvalidate_preprocess_arguments)FlashAttentionKwargs)PreTrainedModel)ProcessingKwargsProcessorMixinUnpack)PreTokenizedInput	TextInput)
LossKwargs
TensorTypeauto_docstringcan_return_tuplelogging)is_torch_available   )CONFIG_MAPPING
AutoConfigAutoTokenizer)LlamaConfig)LlamaDecoderLayerLlamaForCausalLMLlamaMLP
LlamaModelLlamaPreTrainedModelLlamaRMSNorm)LlavaCausalLMOutputWithPastLlavaForConditionalGeneration
LlavaModelLlavaModelOutputWithPast)divide_to_patches)nnc                    U R                   S   nUR                   S   n[        R                  " X4U R                  U R                  S9n[        R
                  " USS9n[        R                  " S[        R                  UR                  S9n[        R                  " Xv45      n[        UR                   S   5       H.  nXh   n	XhS-      n
X	U
 n[        R                  " XU   5      nXX& M0     U$ )a
  
Compute the matrix multiplication (GEMM) for each expert sequentially. This approach is computationally inefficient, especially when dealing with a large number of experts.

Args:
    token_states (torch.Tensor): Input tensor of shape (num_tokens, in_features).
    expert_weights (torch.Tensor): Weight tensor of shape (num_experts, in_features, out_features).
    tokens_per_expert (torch.Tensor): Number of tokens assigned to each expert.

Returns:
    torch.Tensor: Output tensor of shape (num_tokens, out_features).
r   dtypedevicedim   )
shapetorchzerosr@   rA   cumsumlongcatrangematmul)token_statesexpert_weightstokens_per_expert
num_tokensout_featuresoutputcumsum_num_tokenszero_tensor
expert_numstartendtokensouts                ]/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/aria/modular_aria.pysequential_experts_gemmr[   C   s     ##A&J!''+L[[9K9KT`TgTghF%6A>++auzz:K:R:RSK		;"BCN0034
!-Q/C(ll6*#=>u 5 M    c            	       V   ^  \ rS rSrSrSrSr     SS\S\S\S\4U 4S	 jjjrS
r	U =r
$ )AriaTextConfigb   a  
This class handles the configuration for the text component of the Aria model.
Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria
[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture.
This class extends the LlamaConfig to include additional parameters specific to the Mixture of Experts (MoE) architecture.

Args:
    vocab_size (`int`, *optional*, defaults to 32000):
        Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
        `inputs_ids` passed when calling [`LlamaModel`]
    hidden_size (`int`, *optional*, defaults to 4096):
        Dimension of the hidden representations.
    intermediate_size (`int`, *optional*, defaults to 4096):
        The size of the MLP representations.
    num_hidden_layers (`int`, *optional*, defaults to 32):
        Number of hidden layers in the Transformer decoder.
    num_attention_heads (`int`, *optional*, defaults to 32):
        Number of attention heads for each attention layer in the Transformer decoder.
    num_key_value_heads (`int`, *optional*):
        This is the number of key_value heads that should be used to implement Grouped Query Attention. If
        `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
        `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
        converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
        by meanpooling all the original heads within that group. For more details checkout [this
        paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
        `num_attention_heads`.
    hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
        The non-linear activation function (function or string) in the decoder.
    max_position_embeddings (`int`, *optional*, defaults to 2048):
        The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,
        Llama 2 up to 4096, CodeLlama up to 16384.
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    rms_norm_eps (`float`, *optional*, defaults to 1e-06):
        The epsilon used by the rms normalization layers.
    use_cache (`bool`, *optional*, defaults to `True`):
        Whether or not the model should return the last key/values attentions (not used by all models). Only
        relevant if `config.is_decoder=True`.
    pad_token_id (`int`, *optional*, defaults to 2):
        Padding token id.
    bos_token_id (`int`, *optional*, defaults to 1):
        Beginning of stream token id.
    eos_token_id (`int`, *optional*, defaults to 2):
        End of stream token id.
    pretraining_tp (`int`, *optional*, defaults to 1):
        Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
        document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
        understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
        results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
    tie_word_embeddings (`bool`, *optional*, defaults to `False`):
        Whether to tie weight embeddings
    rope_theta (`float`, *optional*, defaults to 10000.0):
        The base period of the RoPE embeddings.
    rope_scaling (`Dict`, *optional*):
        Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
        and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
        accordingly.
        Expected contents:
            `rope_type` (`str`):
                The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
                'llama3'], with 'default' being the original RoPE implementation.
            `factor` (`float`, *optional*):
                Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
                most scaling types, a `factor` of x will enable the model to handle sequences of length x *
                original maximum pre-trained length.
            `original_max_position_embeddings` (`int`, *optional*):
                Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
                pretraining.
            `attention_factor` (`float`, *optional*):
                Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
                computation. If unspecified, it defaults to value recommended by the implementation, using the
                `factor` field to infer the suggested value.
            `beta_fast` (`float`, *optional*):
                Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
                ramp function. If unspecified, it defaults to 32.
            `beta_slow` (`float`, *optional*):
                Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
                ramp function. If unspecified, it defaults to 1.
            `short_factor` (`List[float]`, *optional*):
                Only used with 'longrope'. The scaling factor to be applied to short contexts (<
                `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
                size divided by the number of attention heads divided by 2
            `long_factor` (`List[float]`, *optional*):
                Only used with 'longrope'. The scaling factor to be applied to long contexts (<
                `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
                size divided by the number of attention heads divided by 2
            `low_freq_factor` (`float`, *optional*):
                Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
            `high_freq_factor` (`float`, *optional*):
                Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
    attention_bias (`bool`, *optional*, defaults to `False`):
        Whether to use a bias in the query, key, value and output projection layers during self-attention.
    attention_dropout (`float`, *optional*, defaults to 0.0):
        The dropout ratio for the attention probabilities.
    mlp_bias (`bool`, *optional*, defaults to `False`):
        Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
    head_dim (`int`, *optional*):
        The attention head dimension. If None, it will default to hidden_size // num_heads
    moe_num_experts (`int`, *optional*, defaults to 8):
        The number of experts in the MoE layer.
    moe_topk (`int`, *optional*, defaults to 2):
        The number of top experts to route to for each token.
    moe_num_shared_experts (`int`, *optional*, defaults to 2):
        The number of shared experts.
	aria_texttext_configintermediate_sizemoe_num_expertsmoe_topkmoe_num_shared_expertsc                 Z   > [         TU ]  " SSU0UD6  Xl        X l        X0l        X@l        g )Npad_token_id )super__init__rb   rc   rd   re   )selfrb   rc   rd   re   rg   super_kwargs	__class__s          rZ   rj   AriaTextConfig.__init__   s1     	ClClC!2. &<#r\   )rb   rc   re   rd   )i      r,   r,   r,   )__name__
__module____qualname____firstlineno____doc__
model_typebase_config_keyintrj   __static_attributes____classcell__rm   s   @rZ   r^   r^   b   sW    hT J#O "& &'== = 	=
 !$= =r\   r^   c                   p   ^  \ rS rSrSrSrSS0r\\S.r	      SS\
S\S	\\   S\
S
\4
U 4S jjjrSrU =r$ )
AriaConfig   a  
This class handles the configuration for both vision and text components of the Aria model,
as well as additional parameters for image token handling and projector mapping.
Instantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria
[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.

Args:
    vision_config (`AriaVisionConfig` or `dict`, *optional*):
        Configuration for the vision component.
    vision_feature_layer (`int`, *optional*, defaults to -1):
        The index of the layer to select the vision feature.
    text_config (`AriaTextConfig` or `dict`, *optional*):
        Configuration for the text component.
    projector_patch_to_query_dict (`dict`, *optional*):
        Mapping of patch sizes to query dimensions.
    image_token_index (`int`, *optional*, defaults to 9):
        Index used to represent image tokens.
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated normal initializer for initializing all weight matrices.

Attributes:
    model_type (`str`):
        Type of the model, set to `"aria"`.
    image_token_index (`int`):
        Index used to represent image tokens.
    projector_patch_to_query_dict (`dict`):
        Mapping of patch sizes to query dimensions.
    vision_config (`AriaVisionConfig`):
        Configuration for the vision component.
    text_config (`AriaTextConfig`):
        Configuration for the text component.
ariaimage_token_idimage_token_index)ra   vision_configvision_feature_layerra   projector_patch_to_query_dictinitializer_rangec                 ,  > XPl         Uc  SSS.nUR                  5        VV	s0 s H  u  p[        U5      [        U	5      _M     sn	nU l        [	        U R                  R                  5       5      U l        X l        [        U[        5      (       a  SUS'   [        US      " S0 UD6nOUc  [        S   " 5       nXl        X`l        [        U[        5      (       a  SU;   a  [        S0 UD6nOUc
  [        5       nX0l        [        T
U ]@  " S0 UD6  g s  sn	nf )N      )i  i$  idefics3_visionru   rh   )r   itemsrw   r   maxvalues'max_value_projector_patch_to_query_dictr   
isinstancedictr-   r   r   r^   ra   ri   rj   )rk   r   r   ra   r   r   r   kwargskvrm   s             rZ   rj   AriaConfig.__init__  s    "3 )0-) JgIlIlIn-oInc!fc!fnIn-o*7:4;];];d;d;f7g4$8!mT***;M,'*=+FGX-XM"*+<=?M*!2k4((\[-H(7;7K (*K&"6"' .ps   !D)r   r   r   r   ra   r   r   )Nr>   NN	   g{Gz?)rp   rq   rr   rs   rt   ru   attribute_mapr^   r.   sub_configsrw   r   r   floatrj   rx   ry   rz   s   @rZ   r|   r|      s}    "H J-M #1:NK $&&*8<!"#'&# "&# $	&#
 (0~&# &# !&# &#r\   r|   c                       \ rS rSrSrg)AriaTextRMSNormi4  rh   Nrp   rq   rr   rs   rx   rh   r\   rZ   r   r   4      r\   r   c                   2   ^  \ rS rSrSrU 4S jrS rSrU =r$ )AriaProjectorMLPi8  z
Feed-Forward Network module for the Aria Projector.

Args:
    in_features (`int`):
        Input embedding dimension.
    hidden_features (`int`):
        Hidden dimension of the feed-forward network.
    output_dim (`int`):
        Output dimension.
c                    > [         TU ]  5         [        R                  " XSS9U l        [        R                  " X#SS9U l        [        S   U l        g )NFbiasgelu_new)ri   rj   r<   Linear	linear_in
linear_outr
   act)rk   in_featureshidden_features
output_dimrm   s       rZ   rj   AriaProjectorMLP.__init__E  s>    ;eL))OeL*%r\   c                 h    U R                  U R                  U5      5      nU R                  U5      nU$ Nr   r   r   )rk   hidden_statess     rZ   forwardAriaProjectorMLP.forwardK  s-    !>?6r\   r   	rp   rq   rr   rs   rt   rj   r   rx   ry   rz   s   @rZ   r   r   8  s    
& r\   r   c                   F   ^  \ rS rSrSrSS\S\4U 4S jjjrS	S jrSr	U =r
$ )
AriaCrossAttentioniQ  zb
Aria Cross-Attention module.

Args:
    config (`AriaConfig`):
        The configuration to use.
configdropout_ratec                 .  > [         TU ]  5         UR                  R                  nUR                  R                  nX@l        [        R                  " X3SS9U l        [        R                  " X3SS9U l	        [        R                  " X3SS9U l
        [        R                  " X4SS9U l        [        R                  " X35      U l        [        R                  " U5      U l        [        R                   " U5      U l        [        R                   " U5      U l        g )NFr   T)batch_first)ri   rj   r   hidden_sizenum_attention_heads	num_headsr<   r   q_projk_projv_projMultiheadAttentionmultihead_attnlinearDropoutdropout	LayerNorm
layer_normlayer_norm_kv)rk   r   r   r   r   rm   s        rZ   rj   AriaCrossAttention.__init__Z  s    **66((<<	"iiuEiiuEiiuE !33KX\]ii9zz,/,,{3\\+6r\   c                    U R                  U R                  U5      5      nU R                  U5      nU R                  U5      nU R	                  U5      nU R                  XEXcS9u  pxU R                  U R                  U5      5      nU$ )ai  
Forward pass of the AriaCrossAttention module.

Args:
    key_value_states (`torch.Tensor`):
        Input tensor for key and value.
    hidden_states (`torch.Tensor`):
        Input tensor for query.
    attn_mask (`torch.Tensor`, *optional*, defaults to None):
        Attention mask.

Returns:
    torch.Tensor:
        Output tensor after cross-attention.
	attn_mask)r   r   r   r   r   r   r   r   )	rk   key_value_statesr   r   querykeyvalueattn_output_s	            rZ   r   AriaCrossAttention.forwardk  s      DOOM:;--.>?kk*+,-,,U,Tll4;;{#;<r\   )	r   r   r   r   r   r   r   r   r   )r   r   )rp   rq   rr   rs   rt   r|   r   rj   r   rx   ry   rz   s   @rZ   r   r   Q  s*    7z 7 7 7" r\   r   c                   x   ^  \ rS rSrSrS\4U 4S jjrS	S\R                  S\	\R                     4S jjr
SrU =r$ )
AriaProjectori  z
Aria Projector module.

This module projects vision features into the language model's embedding space, enabling interaction between vision and language components.

Args:
    config (`AriaConfig`):
        Configuration object for the model.
r   c                   > [         TU ]  5         UR                  U l        UR                  R
                  U l        UR                  R                  U l        UR                  R
                  U l	        UR                  R
                  U l        UR                  R
                  U l        [        R                  " [        R                   " UR"                  U R                  5      5      U l        ['        U5      U l        [        R*                  " U R                  5      U l        [/        U R                  U R                  U R                  5      U l        g r   )ri   rj   r   patch_to_query_dictr   r   r   r   r   kv_dimra   r   r   r<   	ParameterrF   rG   r   r   r   
cross_attnr   r   r   feed_forwardrk   r   rm   s     rZ   rj   AriaProjector.__init__  s     	#)#G#G !//;;--AA**66%11== ,,88\\%++f.\.\^b^n^n"op
,V4,,t'7'78,T-=-=t?S?SUYUdUder\   r   r   c                 J   UR                   S   UR                   S   pCX@R                  R                  5       ;  a*  [        SU SU R                  R                  5        S35      eU R                  U   nU R                  SU R                  S5      R                  USS5      nUbM  UR                  U R                  S5      nUR                  S5      R                  SUR                  S5      S5      nU R                  XUS9nU R                  U R                  U5      5      nU$ )	aH  
Forward pass of the Projector module.

Args:
    key_value_states (`torch.Tensor`):
        Input tensor of shape (batch_size, num_patches, kv_dim).
    attn_mask (`torch.Tensor`, *optional*, default is None):
        Attention mask.

Returns:
    `torch.Tensor`: Output tensor of shape (batch_size, query_number, output_dim).
r   rD   zNumber of patches z: not found in patch_to_query_dict amongst possible values .Nr>   r   )rE   r   keysKeyErrorr   	unsqueezerepeatrepeat_interleaver   expandsizer   r   r   )	rk   r   r   
batch_sizenum_patches	query_numqueriesattention_outrY   s	            rZ   r   AriaProjector.forward  s9    #3"8"8";=M=S=STU=VK66;;==$[M1klp  mE  mE  mJ  mJ  mL  lM  MN  O  ,,[9	**Zi(2215<<ZAN !33DNNAFI!++A.55b',,q/2NI(8YW >?
r\   )
r   r   r   r   r   r   r   r   r   r   r   )rp   rq   rr   rs   rt   r|   rj   rF   Tensorr   r   rx   ry   rz   s   @rZ   r   r     s<    ff( %,,AW  r\   r   c                      ^  \ rS rSrSr/ SQrSSSSSSSSS	S\R                  4S
\\	\
      S\\	\
      S\S\S\\	\\\4         S\\   S\\   S\S\\\
4   S\\   S\4U 4S jjjrSSSSSSSSSSS\R"                  S4S\\\	\   4   S
\\\
\	\
   4      S\\\
\	\
   4      S\\   S\\   S\\   S\\   S\\   S\\
   S\\   S\S\\\\4      S\\   S\\\\4      4S jjrS\R.                  S\S\S\R.                  4S jrS\R.                  S\S\S\R.                  4S  jr\R8                  S!SS4S\R:                  S"\\\\\4   \\\\4      4   S#\S$\\
\\
   4   S\\\\4      S\\\\4      S\R:                  4S% jjrS\R.                  S&\	\\\4      S'\S\S\S\S\	\R.                     4S( jr S)r!U =r"$ )*AriaImageProcessori  a  
A vision processor for the Aria model that handles image preprocessing.
Initialize the AriaImageProcessor.

Args:
    image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
        Mean values for normalization.
    image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
        Standard deviation values for normalization.
    max_image_size (`int`, *optional*, defaults to 980):
        Maximum image size.
    min_image_size (`int`, *optional*, defaults to 336):
        Minimum image size.
    split_resolutions (`list`, *optional*, defaults to a list of optimal,resolutions as tuples):
        The optimal resolutions for splitting the image.
    split_image (`bool`, *optional*, defaults to `False`):
        Whether to split the image.
    do_convert_rgb (`bool`, *optional*, defaults to `True`):
        Whether to convert the image to RGB.
    do_rescale (`bool`, *optional*, defaults to `True`):
        Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
        the `preprocess` method.
    rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
        Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
        method.
    do_normalize (`bool`, *optional*, defaults to `True`):
        Whether to normalize the image.
    resample (PILImageResampling, *optional*, defaults to `BICUBIC`):
        The resampling filter to use if resizing the image.
pixel_values
pixel_mask	num_cropsN  iP  FTgp?
image_mean	image_stdmax_image_sizemin_image_sizesplit_resolutionssplit_imagedo_convert_rgb
do_rescalerescale_factordo_normalizeresamplec                   > [         TU ]  " S0 UD6  Uc  / SQnUc  / SQnX0l        X@l        Xl        X l        X`l        Uc#  / SQnU Vs/ s H  oS   S-  US   S-  4PM     nnXPl        Xpl        Xl	        Xl
        Xl        Xl        g s  snf )N)      ?r   r   ))rD   r,   )rD   r	   )rD      )rD      )rD      )rD      )rD   ro   )r,   r   )r,   r	   )r,   r,   )r,   rD   )r	   rD   )r	   r,   )r   rD   )r   r,   )r   rD   )r   rD   )r   rD   )ro   rD   r     rD   rh   )ri   rj   r   r   r   r   r   r   r   r   r   r   r   )rk   r   r   r   r   r   r   r   r   r   r   r   r   elrm   s                 rZ   rj   AriaImageProcessor.__init__  s     	"6"(J'I,,$"&$ !yFW XFWQ%#+r!us{!;FW X!2,$,(  !Ys   	B	ptimagesreturn_tensorsdata_formatinput_data_formatc           
      n   Ub  UOU R                   nUb  UOU R                  nUb  UOU R                  nUb  UOU R                  nUb  UOU R                  nUb  UOU R
                  nUb  UOU R                  nU	b  U	OU R                  n	U
b  U
OU R                  n
Ub  UOU R                  nUS;  a  [        S5      e[        U5      n[        U5      (       d  [        S5      e[        U
UUUUU	S9  U(       a  U Vs/ s H  n[        U5      PM     nnU Vs/ s H  n[        U5      PM     nnU(       a(  [!        US   5      (       a  ["        R%                  S5        Uc  ['        US   5      n/ n/ nSnU GH  nU(       a  U R)                  UU R*                  UUUUS9nOU/nUb  [-        U5      U:  a  [-        U5      nU GH.  n[/        U5      u  nnU[1        UU5      -  nUU:  a  [1        [3        UU-  5      U5      U4nOU[1        [3        UU-  5      U5      4n[5        UUUUUS	9nUUS   -
  UUS
   -
  nn[7        USU4SU44UUS9n[8        R:                  " XD4[<        S9nS
USUS   2SUS
   24'   UR?                  U5        U(       a  U RA                  UXS9nU
(       a8  U RC                  UU R                   U R                  UUS9nUb  [E        UX5      OUnUR?                  U5        GM1     GM     [G        [8        RH                  " USS9[8        RH                  " USS9US.US9$ s  snf s  snf )a  
Process a list of images.

Args:
    images (ImageInput or list of ImageInput):
        The input image or a list of images.
    image_mean (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
        Mean values for normalization.
    image_std (`list`, *optional*, defaults to [0.5, 0.5, 0.5]):
        Standard deviation values for normalization.
    max_image_size (`int`, *optional*, defaults to `self.max_image_size` (980)):
        Maximum image size.
    min_image_size (`int`, *optional*, defaults to `self.min_image_size` (336)):
        Minimum image size.
    split_image (`bool`, *optional*, defaults to `self.split_image` (False)):
        Whether to split the image.
    do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb` (True)):
        Whether to convert the image to RGB.
    do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
        Whether to rescale the image.
    rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
        Rescale factor to rescale the image by if `do_rescale` is set to `True`.
    do_normalize (`bool`, *optional*, defaults to `self.do_normalize` (True)):
        Whether to normalize the image.
    resample (PILImageResampling, *optional*, defaults to `self.resample` (BICUBIC)):
        The resampling filter to use if resizing the image.
    return_tensors (`str` or `TensorType`, *optional*, defaults to "pt"):
        The type of tensor to return.
    data_format (`str` or `ChannelDimension`, *optional*):
        The channel dimension format for the output image. Can be one of:
            - `"channels_first"` or `ChannelDimension.FIRST`:
                image in (num_channels, height, width) format.
            - `"channels_last"` or `ChannelDimension.LAST`:
                image in (height, width, num_channels) format.
        If unset, will use same as the input image.
    input_data_format (`str` or `ChannelDimension`, *optional*):
        The channel dimension format for the input image. Can be one of:
            - `"channels_first"` or `ChannelDimension.FIRST`:
                image in (num_channels, height, width) format.
            - `"channels_last"` or `ChannelDimension.LAST`:
                image in (height, width, num_channels) format.
        If unset, will use the inferred format of the input image.

Returns:
    BatchFeature:
        A BatchFeature object containing:
        - 'pixel_values':
            Tensor of processed image pixel values.
        - 'pixel_mask':
            Boolean pixel mask. This mask is a 2D tensor of shape (max_image_size, max_image_size) where:
            - True (1) values indicate pixels that belong to the original resized image.
            - False (0) values indicate pixels that are part of the padding.
          The mask helps distinguish between actual image content and padded areas in subsequent processing steps.
        - 'num_crops':
            The maximum number of crops across all images.
Nr  r   z(max_image_size must be either 490 or 980zkInvalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.)r   r   r   r   r   r   r   zIt looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.)r  r  )r   r  r  rD   )r@   )imagescaler  )axisr   datatensor_type)%r   r   r   r   r   r   r   r   r   r   
ValueErrorr   r   r   r   r   r   loggerwarning_oncer   get_image_patchesr   lenr   r   rw   r   r   nprG   boolappendrescale	normalizer   r   stack)rk   r  r   r   r   r   r   r   r   r   r   r   r  r  r  r  r   pixel_masksr   crop_images
crop_imagehwr  new_sizecrop_image_resizedpadding_bottompadding_rightcrop_image_paddedr   s                                 rZ   
preprocessAriaImageProcessor.preprocess  s   R $.#9Zt
!*!6IDNN	+9+E4K^K^+9+E4K^K^%0%<k$BRBR+9+E4K^K^#-#9Zt
+9+E4K^K^'3'?|TEVEV'38+GHH)&1F##: 
 	&%!!)	
 9?@nU+F@ 6<<VE.'V</&)44s
 $ >vay I	E"44**" 1&7 5   %g C$4y$@,	)
%j11&Q26 #CE	NN C^TH .CE	NN0STH%+% 1&7&" 1?!0Ln_ghi_jNj$'&(1m*<= 1&7	%!  XX~&FdS
;<
=Xa[=-HQK-78"":.(,/~ )5 )%  (,)$5*; )7 )% '2 44E{f. & ##$56c * B  "A > hh{;&
 '
 	
i A =s   ;L-L2r  target_resolutionreturnc                 :    [        XU5      u  pV[        XU4X4S9nU$ )a  
Resizes an image to a target resolution while maintaining aspect ratio.

Args:
    image (np.array):
        The input image.
    target_resolution (tuple):
        The target resolution (height, width) of the image.
    resample (`PILImageResampling`):
        Resampling filter to use if resizing the image.
    input_data_format (`ChannelDimension` or `str`):
        The channel dimension format of the input image.

Returns:
    np.array: The resized and padded image.
r   r  )r   r   )rk   r  r(  r   r  
new_height	new_widthresized_images           rZ   _resize_for_patching'AriaImageProcessor._resize_for_patching  s-    & !6ePa b
 u9&=vr\   c                     Uu  pE[        XU5      u  pg[        XW-
  S5      u  p[        XF-
  S5      u  pU R                  XX-   4XU	-   44S9nU$ )zE
Pad an image to a target resolution while maintaining aspect ratio.
r,   )padding)r   divmodr   )rk   r  r(  r  target_heighttarget_widthr,  r-  paste_xr_xpaste_yr_ypadded_images                rZ   _pad_for_patching$AriaImageProcessor._pad_for_patching  si     '8# 5ePa b
l6:m8!<xx'-0H7^aTaJb/cxdr\           r2  modeconstant_valuesc                 ^   [        U[        5      (       d  [        U5      S:w  a  [        XX4XV5      $ Uc  [	        U5      n[
        R                  S[
        R                  S[
        R                  S[
        R                  S0n[        R                  " XXs   US9nUb  [        XU5      nU$ UnU$ )a  
Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`)
dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected
as input.

Args:
    image (`np.ndarray`):
        The image to pad.
    padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):
        Padding to apply to the edges of the height, width axes. Can be one of three formats:
        - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.
        - `((before, after),)` yields same before and after pad for height and width.
        - `(pad,)` or int is a shortcut for before = after = pad width for all axes.
    mode (`PaddingMode`):
        The padding mode to use. Can be one of:
            - `"constant"`: pads with a constant value.
            - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
            vector along each axis.
            - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
            - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
    constant_values (`float` or `Iterable[float]`, *optional*):
        The value to use for the padding if `mode` is `"constant"`.
    data_format (`str` or `ChannelDimension`, *optional*):
        The channel dimension format for the output image. Can be one of:
            - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
            - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
        If unset, will use same as the input image.
    input_data_format (`str` or `ChannelDimension`, *optional*):
        The channel dimension format for the input image. Can be one of:
            - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
            - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
        If unset, will use the inferred format of the input image.

Returns:
    `np.ndarray`: The padded image.

r   constantreflectedge	symmetric)r>  r?  )r   rw   r  r   r   r   CONSTANTREFLECT	REPLICATE	SYMMETRICr  r   )rk   r  r2  r>  r?  r  r  padding_mode_mappings           rZ   r   AriaImageProcessor.pad  s    ` gs##s7|q'8utk]]$ >u E   *!!6!!;	 
 u,@,FXghR]Ri'<MN 	  pu 	 r\   grid_pinpoints
patch_sizec           	         [        U[        5      (       d  [        S5      eUn[        XS9n[	        X5      n	U R                  XXFS9n
U R                  XUS9n[        XUS9nU Vs/ s H  n[        XUS9PM     nnU$ s  snf )a  
Process an image with variable resolutions by dividing it into patches.

Args:
    image (`np.array`):
        The input image to be processed.
    grid_pinpoints (List[Tuple[int, int]]):
        A list of possible resolutions as tuples.
    patch_size (`int`):
        Size of the patches to divide the image into.
    resample (`PILImageResampling`):
        Resampling filter to use if resizing the image.
    data_format (`ChannelDimension` or `str`):
        The channel dimension format for the output image.
    input_data_format (`ChannelDimension` or `str`):
        The channel dimension format of the input image.

Returns:
    `List[np.array]`: A list of NumPy arrays containing the processed image patches.
z6grid_pinpoints must be a list of possible resolutions.)channel_dimr+  )r  )rL  r  )rN  input_channel_dim)	r   list	TypeErrorr   r   r/  r;  r;   r   )rk   r  rK  rL  r   r  r  possible_resolutions
image_sizebest_resolutionr.  r:  patchespatchs                 rZ   r  $AriaImageProcessor.get_image_patchesA  s    : .$//TUU-#EI
0R11X 2 
 --m`q-r#L[lm
 !
  (Zkl  	 
 	
s   %A=)r   r   r   r   r   r   r   r   r   r   r   )#rp   rq   rr   rs   rt   model_input_namesr   BICUBICr   r   r   rw   r   r  r   rj   r   FIRSTr   strr'   r&  r  arraytupler/  r;  r   rE  ndarrayr   r   r  rx   ry   rz   s   @rZ   r   r     s   > D -1+/!!=A&+)-,3'+'9'A'A"!T%[)"! DK("! 	"!
 "! $DsCx$9:"! d^"! !"! "! c5j)"! tn"! %"! "!N ;?9=(,(,&*)-%)*.'+'+;?2B2H2HDHD
j$z"223D
 U5$u+#567D
 E%e"456	D

 !D
 !D
 d^D
 !D
 TND
 !D
 tnD
 %D
 !sJ!78D
 ./D
 $E#/?*?$@AD
LXX27Vf	4XX27L\	( (009<>BDH@zz@ sE#s(OXeCHo-FFG@ 	@
 uhuo56@ eC)9$9:;@ $E#/?*?$@A@ 
@D0xx0 U38_-0 	0
 %0 &0 ,0 
bhh0 0r\   r   c                   >    \ rS rSrSS0SSS.\R
                  S.rSrg)	AriaProcessorKwargsit  r2  Fr   )r   r   )text_kwargsimages_kwargsr  rh   N)rp   rq   rr   rs   r'   PYTORCH	_defaultsrx   rh   r\   rZ   r`  r`  t  s-     u
 " 
 %,,	Ir\   r`  F)totalc                      ^  \ rS rSrSrSS/rSS/rSrSr    SS\	\
\4   S\\   S\\\	\\4   \4      4U 4S	 jjjr   SS
\	\\\\   \\   4   S\\   S\\   S\4S jjrS rS r\S 5       rSrU =r$ )AriaProcessori  a  
AriaProcessor is a processor for the Aria model which wraps the Aria image preprocessor and the LLama slow tokenizer.

Args:
    image_processor (`AriaImageProcessor`, *optional*):
        The AriaImageProcessor to use for image preprocessing.
    tokenizer (`PreTrainedTokenizerBase`, *optional*):
        An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
    chat_template (`str`, *optional*):
        A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string.
    size_conversion (`Dict`, *optional*):
        A dictionary indicating size conversions for images.
image_processor	tokenizerchat_templatesize_conversionr   r/   c                 ,  > Uc  SSS.nUR                  5        VVs0 s H  u  pV[        U5      U_M     snnU l        UR                  U l        UR                  U l        Ub  UR
                  c  UR                  Ul        [        TU ]!  XUS9  g s  snnf )Nr   r   r
  )rj  )	r   rw   rk  image_tokenr   	pad_token	unk_tokenri   rj   )rk   rh  ri  rj  rk  r   r   rm   s          rZ   rj   AriaProcessor.__init__  s     "$'c2O6E6K6K6MN6MdaA	6MN$00'66 Y%8%8%@"+"5"5I=Q  Os   Btextr  r   r)  c                    U R                   " [        4SU R                  R                  0UD6n[	        U[
        5      (       a  U/nO8[	        U[        5      (       d#  [	        US   [
        5      (       d  [        S5      eUb  U R                  " U40 US   D6nU R                  UR                  R                  S      n/ n	UR                  S5      U-  n
U HQ  nUR                  U R                  R                  U R                  R                  U
-  5      nU	R                  U5        MS     O0 nUn	US   R                  S	S5      nU R                  " U	40 US   D6nU R!                  XS
/S9  [#        0 UEUEUS9$ )a  
Main method to prepare for the model one or several sequences(s) and image(s).

Args:
    text (`TextInput`, `PreTokenizedInput`, `List[TextInput]`, `List[PreTokenizedInput]`):
        The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
        (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
        `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
    images (`ImageInput`):
        The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
        tensor. Both channels-first and channels-last formats are supported.


Returns:
    [`BatchFeature`]: A [`BatchFeature`] with the following fields:
    - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
    - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
    `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
    `None`).
    - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
    - **pixel_mask** -- Pixel mask to be fed to a model. Returned when `images` is not `None`.
tokenizer_init_kwargsr   zAInvalid input text. Please provide a string, or a list of stringsNrb  r,   r   ra  r  r  )
modalitiesr  )_merge_kwargsr`  ri  init_kwargsr   r[  rP  r  rh  rk  r   rE   popreplacerm  r  _check_special_mm_tokensr   )rk   rq  r  audiovideosr   output_kwargsimage_inputstokens_per_imageprompt_stringsr   sampler  text_inputss                 rZ   __call__AriaProcessor.__call__  s   < **
"&.."<"<
 
 dC  6DD$''
47C0H0H`aa//0L
  $33L4M4M4S4STU4VWN$((58HHI(B(BDNND^D^ajDjk%%f- 
 L!N&}599:JDQnn^T}]7ST%%nwi%X!@K!@<!@n]]r\   c                 :    U R                   R                  " U0 UD6$ )z
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
)ri  batch_decoderk   argsr   s      rZ   r  AriaProcessor.batch_decode  s    
 ~~**D;F;;r\   c                 :    U R                   R                  " U0 UD6$ )z
This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
)ri  decoder  s      rZ   r  AriaProcessor.decode  s    
 ~~$$d5f55r\   c                     U R                   R                  nU R                  R                  nU Vs/ s H  o3S:w  d  M
  UPM     nn[        [        R                  X-   5      5      $ s  snf )Nr   )ri  rX  rh  rP  r   fromkeys)rk   tokenizer_input_namesimage_processor_input_namesnames       rZ   rX  AriaProcessor.model_input_names  sb     $ @ @&*&:&:&L&L# 9T&k8S_jWjt8S#&kDMM"7"UVWW 'ls
   	A&A&)rm  r   rk  )NNNN)NNN)rp   rq   rr   rs   rt   
attributesvalid_kwargsimage_processor_classtokenizer_classr   r/   r[  r   r   r   rw   rj   r%   r$   r   r   r#   r`  r   r  r  r  propertyrX  rx   ry   rz   s   @rZ   rg  rg    s    $[1J#%67L0%O /3'+BFR +,R  }	R
 "$uUCZ'8#'=">?R R* (,>^I0$y/4HYCZZ[>^ $>^ ,->^ 
>^@<6 X Xr\   rg  c                   4   ^  \ rS rSrSrS\4U 4S jjrSrU =r$ )AriaSharedExpertsMLPi   a  
Shared Expert MLP for shared experts.

Unlike routed experts, shared experts process all tokens without routing.
This class reconfigures the intermediate size in comparison to the LlamaMLP.

Args:
    config (`AriaTextConfig`): Configuration object for the Aria language model.
r   c                 `   > [         TU ]  U 5        UR                  UR                  -  U l        g r   )ri   rj   rb   re   r   s     rZ   rj   AriaSharedExpertsMLP.__init__  s)    !'!9!9F<Y<Y!Yr\   )rb   )	rp   rq   rr   rs   rt   r^   rj   rx   ry   rz   s   @rZ   r  r     s    Z~ Z Zr\   r  c                   2   ^  \ rS rSrSrU 4S jrS rSrU =r$ )AriaGroupedExpertsGemmi  a  
Grouped GEMM (General Matrix Multiplication) module for efficient expert computation.
This module utilizes the grouped_gemm library (https://github.com/fanshiqing/grouped_gemm)
for optimized performance. If the grouped_gemm library is not installed, it gracefully
falls back to a sequential GEMM implementation, which may be slower but ensures
functionality.

Args:
    in_features (`int`):
        Number of input features.
    out_features (`int`):
        Number of output features.
    groups (`int`):
        Number of expert groups.
c                    > [         TU ]  5         Xl        X l        X0l        [
        R                  " [        R                  " X1U5      5      U l	        g r   )
ri   rj   r   rQ   groupsr<   r   rF   emptyweight)rk   r   rQ   r  rm   s       rZ   rj   AriaGroupedExpertsGemm.__init__!  s:    &(ll5;;vL#QRr\   c                 L    [        UU R                  UR                  5       5      $ )a-  
Perform grouped matrix multiplication.

Args:
    input (`torch.Tensor`):
        Input tensor of shape (num_tokens, in_features).
    tokens_per_expert (`torch.Tensor`):
        Number of tokens assigned to each expert.

Returns:
    torch.Tensor: Output tensor of shape (num_tokens, out_features).
)r[   r  cpu)rk   inputrO   s      rZ   r   AriaGroupedExpertsGemm.forward(  s'     'KK!!#
 	
r\   )r  r   rQ   r  r   rz   s   @rZ   r  r    s     S
 
r\   r  c                   >   ^  \ rS rSrSrS\SS4U 4S jjrS rSrU =r	$ )	AriaGroupedExpertsMLPi<  z~
Grouped MLP module for Mixture of Experts.

Args:
    config (`AriaTextConfig`):
        Configuration object for the model.
r   r)  Nc                    > [         TU ]  5         Xl        [        UR                  UR
                  S-  UR                  5      U l        [        UR
                  UR                  UR                  5      U l        g )Nr,   )	ri   rj   r   r  r   rb   rc   fc1fc2r   s     rZ   rj   AriaGroupedExpertsMLP.__init__E  s_    )&*<*<f>V>VYZ>Z\b\r\rs)&*B*BFDVDVX^XnXnor\   c                     U R                  X5      n[        R                  " USSS9u  pE[        R                  R                  U5      U-  nU R                  X25      nU$ )z
Forward pass of the Grouped MLP.

Args:
    permuted_tokens (torch.Tensor): Permuted input tokens.
    tokens_per_expert (torch.Tensor): Number of tokens assigned to each expert.

Returns:
    torch.Tensor: Output tensor after passing through the MLP.
r,   r>   rB   )r  rF   chunkr<   
functionalsilur  )rk   permuted_tokensrO   
fc1_output
projectiongate
fc2_outputs          rZ   r   AriaGroupedExpertsMLP.forwardK  sT     XXoA
 ;;z1"=
]]''
3d:
XXj<
r\   )r   r  r  )
rp   rq   rr   rs   rt   r^   rj   r   rx   ry   rz   s   @rZ   r  r  <  s(    p~ p$ p r\   r  c                   n   ^  \ rS rSrSrS\4U 4S jjrS\R                  S\R                  4S jr	Sr
U =r$ )	AriaTextMoELayeri^  z
Aria Text Mixture of Experts (MoE) Layer.

This layer applies a gating mechanism to route input tokens to different experts.

Args:
    config (`AriaTextConfig`):
        Configuration object for the text component of the model.
r   c                    > [         TU ]  5         [        R                  " UR                  UR
                  SS9U l        [        U5      U l        [        U5      U l
        Xl        g NFr   )ri   rj   r<   r   r   rc   routerr  expertsr  shared_expertsr   r   s     rZ   rj   AriaTextMoELayer.__init__i  sM    ii 2 2F4J4JQVW,V426:r\   r   r)  c                    UR                   nUR                  SUR                  S5      5      nU R                  U5      n[        R
                  " X0R                  R                  SS9u  pE[        R                  R                  USS9nUR                  n[        R                  " UR                  5       R                  [        R                  5      U R                  R                   SU R                  R                   S-
  S9R                  U5      nUn	U	R                  S5      n
[        R"                  " U
5      nUR%                  SXR                  R                  -  5      nU R'                  X5      n[        R(                  " UR                   S   U R                  R                  -  UR                  S5      4UR                  UR*                  S9nUR-                  SX5        UR                  SU R                  R                  UR                  S5      5      nXR/                  S5      -  R1                  SS9R                  U5      nU R3                  UR                  U5      5      nUU-   $ )a  
Forward pass of the MoE Layer.

Args:
    hidden_states (`torch.Tensor`):
        Input tensor of shape (batch_size, sequence_length, hidden_size).

Returns:
    torch.Tensor: Output tensor after passing through the MoE layer.

Process:
1. Route tokens to experts using the router.
2. Permute tokens based on routing decisions.
3. Process tokens through experts.
4. Unpermute and combine expert outputs.
5. Add shared expert output to the final result.
r>   rD   )r   rC   rB   r   )binsminr   r?   )rE   viewr   r  rF   topkr   rd   r<   r  softmaxr@   histcflattentofloat32rc   argsortindex_selectr  rG   rA   index_copy_r   sumr  )rk   r   original_shapelogits
top_logitstop_indicesscoresoriginal_dtyperO   indicesflatten_indicessorted_indicesr  expert_outputunpermuted_tokensrR   shared_expert_outputs                    rZ   r   AriaTextMoELayer.forwardq  s   $ ',,%**2}/A/A"/EF ]+"'**V{{7K7KQR"S
&&zr&:$**!KK!$$U]]3,,++a/	

 "^
 	  ",,r*7'44Q++J^J^8^_ _H "KK\\!_t{{333]5G5G5JK%% ''

 	%%aG-222t{{7K7K]M_M_`aMbc#&6&6r&::??A?FKKN[  $22=3E3En3UV,,,r\   )r   r  r  r  )rp   rq   rr   rs   rt   r^   rj   rF   r   r   rx   ry   rz   s   @rZ   r  r  ^  s4    ~ 9-U\\ 9-ell 9- 9-r\   r  c                   8   ^  \ rS rSrSrS\S\4U 4S jjrSrU =r	$ )AriaTextDecoderLayeri  aG  
Aria Text Decoder Layer.

This class defines a single decoder layer in the language model, incorporating self-attention and Mixture of Experts (MoE) feed-forward network.

Args:
    config (`AriaTextConfig`):
        Configuration object for the text component of the model.
    layer_idx (`int`):
        Index of the layer.
r   	layer_idxc                 D   > [         TU ]  U 5        [        U5      U l        g r   )ri   rj   r  mlprk   r   r  rm   s      rZ   rj   AriaTextDecoderLayer.__init__  s    #F+r\   )r  )
rp   rq   rr   rs   rt   r^   rw   rj   rx   ry   rz   s   @rZ   r  r    s     
,~ ,# , ,r\   r  c                   B    \ rS rSr\rSrSS/rSrSr	Sr
SrSrSrS rS	rg
)AriaTextPreTrainedModeli  modelr  r  Tpast_key_valuesFc                    U R                   R                  n[        U[        R                  5      (       aW  UR
                  R                  R                  SUS9  UR                  b%  UR                  R                  R                  5         g g [        U[        R                  5      (       ad  UR
                  R                  R                  SUS9  UR                  b2  UR
                  R                  UR                     R                  5         g g [        U[        5      (       a&  UR
                  R                  R                  S5        g [        U[        5      (       a%  UR
                  R                  R                  SUS9  g g )Nr=  meanstd      ?)r   r   r   r<   r   r  r  normal_r   zero_	Embeddingpadding_idxr   fill_r  rk   moduler  s      rZ   _init_weights%AriaTextPreTrainedModel._init_weights  s   kk++fbii((MM&&CS&9{{&  &&( '--MM&&CS&9!!-""6#5#56<<> .00MM$$S) 677MM&&CS&9 8r\   rh   N)rp   rq   rr   rs   r^   config_classbase_model_prefix_no_split_modulessupports_gradient_checkpointing_skip_keys_device_placement_supports_flash_attn_2_supports_sdpa_supports_cache_class_supports_attention_backendr  rx   rh   r\   rZ   r  r    sA    !L/1IJ&*#"3"N "&:r\   r  c                   *    \ rS rSr\rSrSrSrS r	Sr
g)AriaPreTrainedModeli   Fc                    U R                   R                  n[        U[        R                  5      (       aW  UR
                  R                  R                  SUS9  UR                  b%  UR                  R                  R                  5         g g [        U[        R                  5      (       a  UR                  5         g [        U[        R                  5      (       aJ  UR
                  R                  R                  S5        UR                  R                  R                  5         g [        U[        5      (       a)  [        R                  R!                  UR"                  US9  g g )Nr=  r  r  )r  )r   r   r   r<   r   r  r  r  r   r  r   _reset_parametersr   r  r   inittrunc_normal_r   r  s      rZ   r  !AriaPreTrainedModel._init_weights  s    kk++fbii((MM&&CS&9{{&  &&( ' 5 566$$&--MM$$S)KK""$..GG!!&,,C!8 /r\   rh   N)rp   rq   rr   rs   r|   r  r  _supports_static_cacher  r  rx   rh   r\   rZ   r  r    s    L""'9r\   r  c                   0   ^  \ rS rSrS\4U 4S jjrSrU =r$ )AriaTextModeli  r   c           	         > [         TU ]  U5        [        R                  " [	        UR
                  5       Vs/ s H  n[        X5      PM     sn5      U l        SU l        U R                  5         g s  snf )NF)
ri   rj   r<   
ModuleListrK   num_hidden_layersr  layersgradient_checkpointing	post_initr  s      rZ   rj   AriaTextModel.__init__  s_     mmFKFLdLdFefFe!&4Fef
 ',# gs   A1)r
  r	  )rp   rq   rr   rs   r^   rj   rx   ry   rz   s   @rZ   r  r    s    ~  r\   r  c                       \ rS rSrSrg)KwargsForCausalLMi  rh   Nr   rh   r\   rZ   r  r    s    3r\   r  c                   L   ^  \ rS rSrS/rS\4U 4S jjr\U 4S j5       rSr	U =r
$ )AriaTextForCausalLMi  zlm_head.weightr   c                    > [         TU ]  U5        [        U5      U l        UR                  U l        [
        R                  " UR                  UR                  SS9U l        U R                  5         g r  )
ri   rj   r  r  
vocab_sizer<   r   r   lm_headr  r   s     rZ   rj   AriaTextForCausalLM.__init__  sU     "6*
 ++yy!3!3V5F5FUS 	r\   c                 (   > [         TU ]  " U 40 UD6  g r   )ri   r   )rk   rl   rm   s     rZ   r   AriaTextForCausalLM.forward  s    --r\   )r  r  r  )rp   rq   rr   rs   _tied_weights_keysr^   rj   r(   r   rx   ry   rz   s   @rZ   r  r    s,    *+~  . .r\   r  c                       \ rS rSrSrg)AriaCausalLMOutputWithPasti  rh   Nr   rh   r\   rZ   r  r    r   r\   r  c                       \ rS rSrSrg)AriaModelOutputWithPasti  rh   Nr   rh   r\   rZ   r  r    r   r\   r  c                     ^  \ rS rSrS\4U 4S jjrS r  SS\R                  S\	\R                     S\
4S jjr            SS	\R                  S\R                  S\R                  S
\	\R                     S\	\R                     S\	\\R                        S\	\R                     S\	\   S\	\   S\	\   S\	\   S\	\R                     S\\   S\\\4   4S jjrSrU =r$ )	AriaModeli  r   c                 D   > [         TU ]  U5        [        U5      U l        g r   )ri   rj   r   multi_modal_projectorr   s     rZ   rj   AriaModel.__init__  s     %26%:"r\   c                 ~   Uc  g UR                  SU R                  R                  R                  U R                  R                  R                  S9nUR                  SU R                  R                  R                  U R                  R                  R                  S9nUR	                  SS9S:  R                  5       $ )NrD   )	dimensionr   stepr,   )r>   rB   r   )unfoldvision_towerr   rL  r  r  )rk   r   patches_subgrids      rZ   _create_patch_attention_mask&AriaModel._create_patch_attention_mask  s    $++""))44""))44 , 

 *00""))44""))44 1 

  ###1A5;;==r\   r   r   r   c                    Ub  UOU R                   R                  nU R                  U5      nU R                  XSS9nSnUb'  UR	                  S5      n[
        R                  " U5      nUR                  U   nU R                  XS9n	U	$ )a  
Obtains image last hidden states from the vision tower and apply multimodal projection.

Args:
    pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
       The tensors corresponding to the input images.
    pixel_mask (`torch.FloatTensor]`, *optional*):
        The tensors corresponding to the input image mask.
    vision_feature_layer (`Union[int, List[int]]`, *optional*):
        The index of the layer to select the vision feature. If multiple indices are provided,
        the vision feature of the corresponding indices will be concatenated to form the
        vision features.
Returns:
    image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
NT)patch_attention_maskoutput_hidden_statesrD   r   )	r   r   r(  r&  r  rF   logical_notr   r  )
rk   r   r   r   r+  image_outputsimage_attn_maskflattened_maskselected_image_featureimage_featuress
             rZ   get_image_featuresAriaModel.get_image_features-  s    , %9$D $++JjJj 	  $@@L))Z^ * 
 +199!<N#//?O!.!<!<=Q!R334J3fr\   	input_idsattention_maskposition_idsr  inputs_embeds	use_cacheoutput_attentionsr,  return_dictcache_positionr   r)  c                    U	b  U	OU R                   R                  n	U
b  U
OU R                   R                  n
Ub  UOU R                   R                  nUc  U R	                  5       " U5      nUGb  UR
                  S   S:w  Ga  Ucx  XpR	                  5       " [        R                  " U R                   R                  [        R                  UR                  S95      :H  nUR                  SS9R                  SS9S   nOmXR                   R                  :H  nUR                  S5      R                  U5      R                  UR                  5      nUR                  SS9R                  SS9nU R                  UUU R                   R                   S9nUR
                  S   UR
                  S   nnUU-  nUU:w  a  [#        SU SU 35      eUR                  UR                  UR$                  5      nUR'                  UU5      nU R(                  " SUUUUUU	U
S	US
.	UD6n[+        UR,                  U(       a  UR.                  OS UR0                  UR2                  Ub  WS9$ S S9$ )NrD   r?   rB   r   r>   )r   r   r   z6Image features and image tokens do not match: tokens: z, features T)	r6  r7  r  r8  r9  r:  r,  r;  r<  )last_hidden_stater  r   
attentionsimage_hidden_statesrh   )r   r:  r,  use_return_dictget_input_embeddingsrE   rF   tensorr   rI   rA   r  r   	expand_asr  r3  r   r  r@   masked_scatterlanguage_modelr  r>  r  r   r?  )rk   r5  r   r   r6  r7  r  r8  r9  r:  r,  r;  r<  r   special_image_maskn_image_tokensimage_embedsr2  n_imagesn_features_per_imagen_image_featuresoutputss                         rZ   r   AriaModel.forwardR  s     2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]  557	BM #(;(;A(>!(C %26O6O6QLL!;!;5::VcVjVjk7 &" #5!9!9a!9!@!D!D!D!KA!N(KK,F,FF%1%;%;B%?%I%I-%X%[%[\i\p\p%q"".!3!3!3!:!>!>1!>!E!44)%%)[[%E%E 5 N
 .<-A-A!-DnFZFZ[\F]*H'*>>!11 L^L\\ghxgyz  ,..}/C/C]EXEXYN)889K^\M%% 
)%+'/!5)
 
 '%777@G33d!//))2>2J
 	

 QU
 	
r\   )r  )Nr>   )NNNNNNNNNNNN)rp   rq   rr   rs   r|   rj   r(  rF   FloatTensorr   rw   r3  
LongTensorr   r   r  r#   r   r   r   r  r   rx   ry   rz   s   @rZ   r  r    s   ;z ;>& 37$&	#''# U../# "	#N '+*.'+1537=A59$(,0/3&*59F
##F
 ''F
 $$	F

 !.F
 u//0F
 "$u'8'8"9:F
   1 12F
 D>F
 $D>F
 'tnF
 d^F
 !!1!12F
 -.F
 
u--	.F
 F
r\   r  z
    Aria model for conditional generation tasks.

    This model combines a vision tower, a multi-modal projector, and a language model
    to perform tasks that involve both image and text inputs.
    )custom_introc            %         ^  \ rS rSr\\              SS\R                  S\R                  S\R                  S\	\R                     S\	\R                     S\	\\R                        S\	\R                     S	\	\R                     S
\	\   S\	\   S\	\   S\	\   S\\\R                  4   S\	\R                     S\\   S\\\4   4 S jj5       5       r       SU 4S jjrSrU =r$ )AriaForConditionalGenerationi  r5  r   r   r6  r7  r  r8  labelsr9  r:  r,  r;  logits_to_keepr<  r   r)  c                 0   U
b  U
OU R                   R                  n
Ub  UOU R                   R                  nUb  UOU R                   R                  nU R                  " SUUUUUUUU	U
UUUS.UD6nUS   n[        U[        5      (       a  [        U* S5      OUnU R                  USS2USS24   5      nSnUb3  U R                  " SUXR                   R                  R                  S.UD6n[        UUUR                  UR                  UR                  S9$ )a	  
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
    config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `AriaForConditionalGeneration`).
    Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only
    computed for the tokens with labels in `[0, ..., config.vocab_size]`.

Example:

```python
>>> import requests
>>> import torch
>>> from PIL import Image
>>> from io import BytesIO

>>> from transformers import AutoProcessor, AutoModel
>>> from transformers.image_utils import load_image

>>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
>>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
>>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
>>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")

>>> processor = AutoProcessor.from_pretrained("Rhymes-AI/Aria")
>>> model = AutoModel.from_pretrained("Rhymes-AI/Aria", torch_dtype=torch.bfloat16, device_map="auto")

>>> # Create inputs
>>> messages = [
...     {
...         "role": "user",
...         "content": [
...             {"type": "image"},
...             {"type": "text", "text": "In this image, we can see the city of New York, and more specifically the Statue of Liberty."},
...             {"type": "image"},
...             {"type": "text", "text": "What can we see in this image?"},
...         ]
...     },
...     {
...         "role": "user",
...         "content": [
...             {"type": "image"},
...             {"type": "text", "text": "In which city is that bridge located?"},
...         ]
...     }
... ]

>>> prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages]
>>> images = [[image1, image2], [image3]]
>>> inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to(model.device)

>>> # Generate
>>> generated_ids = model.generate(**inputs, max_new_tokens=256)
>>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)

>>> print(generated_texts[0])
Assistant: There are buildings, trees, lights, and water visible in this image.

>>> print(generated_texts[1])
Assistant: The bridge is in San Francisco.
```N)r5  r   r   r6  r7  r  r8  r9  r:  r,  r;  r<  r   )r  rT  r  )lossr  r  r   r?  rh   )r   r:  r,  rA  r  r   rw   slicer  loss_functionra   r  r  r  r   r?  )rk   r5  r   r   r6  r7  r  r8  rT  r9  r:  r,  r;  rU  r<  r   rM  r   slice_indicesr  rW  s                        rZ   r   $AriaForConditionalGeneration.forward  sD   ` 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]** 
%!)%+'/!5#)
 
   
8B>SV8W8W~ot4]kmA}a,?@A%% f9P9P9[9[_eD *#33!//))
 	
r\   c	           	      X   > [         TU ]  " U4UUUUUS.U	D6n
US   S:X  a  XJS'   XZS'   U
$ )N)r  r8  r6  r<  rU  r   r   r   )ri   prepare_inputs_for_generation)rk   r5  r  r8  r   r   r6  r<  rU  r   model_inputsrm   s              rZ   r]  :AriaForConditionalGeneration.prepare_inputs_for_generation  s\     w<
+')))
 
 !! ,8()3&r\   rh   )NNNNNNNNNNNNr   N)NNNNNNN)rp   rq   rr   rs   r)   r(   rF   rP  rO  r   r   r   r  r   rw   r#   r  r   r  r   r]  rx   ry   rz   s   @rZ   rS  rS    s     '+*.'+1537=A59-1$(,0/3&*3459u
##u
 ''u
 $$	u

 !.u
 u//0u
 "$u'8'8"9:u
   1 12u
 ))*u
 D>u
 $D>u
 'tnu
 d^u
 c5<</0u
 !!1!12u
  *+!u
" 
u00	1#u
  u
t  r\   rS  )
r|   r^   r   rg  rS  r  r  r  r  r  )itypingr   r   r   r   r   r   numpyr  activationsr
   configuration_utilsr   image_processing_utilsr   r   r   r   image_transformsr   r   r   r   r   image_utilsr   r   r   r   r   r   r   r   r   r   modeling_flash_attention_utilsr   modeling_utilsr    processing_utilsr!   r"   r#   tokenization_utilsr$   r%   utilsr&   r'   r(   r)   r*   utils.import_utilsr+   autor-   r.   r/   llama.configuration_llamar0   llama.modeling_llamar1   r2   r3   r4   r5   r6   llava.modeling_llavar7   r8   r9   r:   &llava_next.image_processing_llava_nextr;   
get_loggerrp   r  rF   r<   r[   r^   r|   r   Moduler   r   r   r   r`  rg  r  r  r  r  r  r  r  r  r  r  r  r  r  rS  __all__rh   r\   rZ   <module>ru     s   @ ?  ! 3 u u e e   C - H H > V V 4 < < 3   G 
		H	%>{=[ {=|Q#! Q#h	l 	ryy 24 4n>BII >Bh+ hV
*% 
|XN |X~Z8 Z )
RYY )
XBII DL-ryy L-^,, ,$ :o : :69. 9.J  ?,j >.13C ."	!< 		6 	@

 @
F V#@ VVrr\   