o
    ZhaH                     @   s:  d dl mZmZmZmZ d dlZd dlZd dlmZ ddlm	Z	 ddl
mZ ddlmZ ddlmZmZmZ d	d
lmZmZ d	dlmZ d	dlmZmZmZmZmZ eeZG dd deZ G dd deZ!G dd deZ"G dd deZ#G dd deZ$G dd deZ%G dd deZ&G dd deZ'g dZ(dS )    )ListOptionalTupleUnionN)nn   )DynamicCache)FlashAttentionKwargs)Unpack)auto_docstringcan_return_tuplelogging   )Idefics3ConfigIdefics3VisionConfig)Idefics3ImageProcessor)Idefics3BaseModelOutputWithPast Idefics3ForConditionalGenerationIdefics3ModelIdefics3PreTrainedModelIdefics3VisionTransformerc                   @      e Zd ZdZdZdS )SmolVLMVisionConfiga  
    This is the configuration class to store the configuration of a [`SmolVLMVisionModel`]. It is used to instantiate a
    SmolVLM vision encoder according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint
    [google/siglip-so400m-patch14-384](https://huggingface.co/google/siglip-so400m-patch14-384) used in SmolVLM
    [HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct).

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        hidden_size (`int`, *optional*, defaults to 1152):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_channels (`int`, *optional*, defaults to 3):
            Number of channels in the input images.
        image_size (`int`, *optional*, defaults to 224):
            The size (resolution) of each image.
        patch_size (`int`, *optional*, defaults to 32):
            The size (resolution) of each patch.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.

    Example:

    ```python
    >>> from transformers.models.smolvlm.modeling_smolvlm import SmolVLMVisionTransformer
    >>> from transformers.models.smolvlm.configuration_smolvlm import SmolVLMVisionConfig

    >>> # Initializing a SmolVLMVisionConfig with google/siglip-so400m-patch14-384 style configuration
    >>> configuration = SmolVLMVisionConfig()

    >>> # Initializing a SmolVLMVisionTransformer (with random weights) from the google/siglip-so400m-patch14-384 style configuration
    >>> model = SmolVLMVisionTransformer(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Zsmolvlm_visionN__name__
__module____qualname____doc__Z
model_type r   r   Z/var/www/auris/lib/python3.10/site-packages/transformers/models/smolvlm/modular_smolvlm.pyr   (   s    3r   c                   @   s   e Zd Zdd ZdS )SmolVLMPreTrainedModelc                 C   s   t | jd| j j}t|tjtjfr,|jj	j
d|d |jd ur*|jj	  d S d S t|tjrM|jj	j
d|d |jd urK|jj	|j   d S d S t|tjrb|jj	d |jj	  d S d S )Ninitializer_range        )meanstdg      ?)getattrconfigZget_text_configr!   
isinstancer   LinearZConv2dweightdataZnormal_biasZzero_Z	EmbeddingZpadding_idxZ	LayerNormZfill_)selfmoduler$   r   r   r   _init_weightsa   s   

z$SmolVLMPreTrainedModel._init_weightsN)r   r   r   r.   r   r   r   r   r    `   s    r    c                   @      e Zd ZdS )SmolVLMVisionTransformerNr   r   r   r   r   r   r   r0   q       r0   c                   @   r   )SmolVLMConfiga  
    This is the configuration class to store the configuration of a [`SmolVLMModel`]. It is used to instantiate a
    SmolVLM model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the model of the SmolVLM
    [HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should cache the key/value pairs of the attention mechanism. Only
            relevant if `config.is_decoder=True`.
        image_token_id (`int`, *optional*, defaults to 128257):
            The id of the "image" token.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether or not to tie the word embeddings with the token embeddings.
        vision_config (`IdeficsVisionConfig` or `dict`, *optional*, defaults to `IdeficsVisionConfig`):
            Custom vision config or dict for the vision tower
        text_config (`PretrainedConfig` or `dict`, *optional*, defaults to `LlamaConfig`):
            Custom text config or dict for the text model
        scale_factor (`int`, *optional*, defaults to 2):
            The scale factor for the image encoder.
        pad_token_id (`int`, *optional*, defaults to 128002):
            The id of the padding token.

    Example:
    ```python
    >>> from transformers import SmolVLMModel, SmolVLMConfig
    >>> # Initializing configuration
    >>> configuration = SmolVLMConfig()
    >>> # Initializing a model from the configuration
    >>> model = SmolVLMModel(configuration)
    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```ZsmolvlmNr   r   r   r   r   r3   u   s    %r3   c                   @   r/   )SmolVLMImageProcessorNr1   r   r   r   r   r4      r2   r4   c                   @   r/   )SmolVLMBaseModelOutputWithPastNr1   r   r   r   r   r5      r2   r5   c                #   @   s
  e Zd ZdZdejdejdejfddZddejd	ejfd
dZ	e
edd													ddeej deej deej deeej  deej deej d	eej deej dee dee dee dee deej dee deeef fddZdS )SmolVLMModelz
    A subclass of Idefics3Model. We do *not* remove or block the call to inputs_merger
    in forward. Instead, we override inputs_merger here with custom logic.
    	input_idsinputs_embedsimage_hidden_statesc                 C   s   |j \}}}|| jk}|jdd}t|| dkstd|| }tjjj|j	ddddd}	|	d d }
|j	dd}|d | }|d | }|

d| }t|}||| || d d f ||< t|
d||}|S )N   dimr   zCAt least one sample has <image> tokens not divisible by patch_size.)r:   r   )value)shapeZimage_token_idsumtorchall
ValueErrorr   Z
functionalpadZcumsumZ	unsqueezeZ
zeros_likewhere)r,   r7   r8   r9   _
patch_sizeZ
image_maskZnum_image_tokensZblocks_per_sampleoffsetsZblock_offsetZrow_cumZ	chunk_idxZ	local_idxZ	block_idxZimage_embedsZmerged_embedsr   r   r   inputs_merger   s    

zSmolVLMModel.inputs_mergerNpixel_valuespixel_attention_maskc                    s*   j \}}}}} j|| g j dd R    j dd  } dkjdd|k}	t|	s3d|	d<  |	   |du rOtj fd	d
dD tj j	d}n|j|| g|j dd R  }||	  }| j
jj}
|jd|
|
d}|jd|
|
d}|jdddk }| j |d}|j}| |}|S )a  
        Encodes images into continuous embeddings that can be forwarded to the language model.

        Args:
            pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
                The tensors corresponding to the input images.
            pixel_attention_mask (`torch.LongTensor`, *optional*):
                The attention mask indicating padded regions in the image.
        r   Nr:   r"   )r>   r;   Tr   c                    s   g | ]} j | qS r   )r?   ).0irJ   r   r   
<listcomp>   s    z3SmolVLMModel.get_image_features.<locals>.<listcomp>)r   r   r   )sizedtypedevice)	dimensionrR   step)r>   rL   )rJ   patch_attention_mask)r?   viewZnumelr@   any
contiguousrA   ZonesboolrT   r&   Zvision_configrG   ZunfoldZvision_modellast_hidden_stateZ	connector)r,   rJ   rK   
batch_sizeZ
num_imagesZnum_channelsheightwidthZnb_values_per_imageZreal_images_indsrG   Zpatches_subgridrW   r9   r   rP   r   get_image_features   s.   
  

zSmolVLMModel.get_image_featuresa  
        Inputs fed to the model can have an arbitrary number of images. To account for this, pixel_values fed to
        the model have image padding -> (batch_size, max_num_images, 3, max_heights, max_widths) where
        max_num_images is the maximum number of images among the batch_size samples in the batch.
        Padding images are not needed beyond padding the pixel_values at the entrance of the model.
        For efficiency, we only pass through the vision_model's forward the real images by
        discarding the padding images i.e. pixel_values of size (image_batch_size, 3, height, width) where
        image_batch_size would be 7 when num_images_per_sample=[1, 3, 1, 2] and max_num_images would be 3.
        )Zcustom_introattention_maskposition_idspast_key_values	use_cacheoutput_attentionsoutput_hidden_statesreturn_dictcache_positionkwargsreturnc                 K   s  |
d ur|
n| j j}
|d ur|n| j j}|	d ur|	n| j j}	|d ur$|n| j j}| jr8| jjr8|	r8t	d d}	|d urB|j
\}}n|d urM|j
\}}}ntdd}|	r`|d u r\t }| }|d urp|d u rp|dkrptd|d u r| j ||j}|d ur|d urtd|d ur| ||}n|d ur|j| j|jd}|d ur|d ur| j|||d}| jd|||||	|
|d	|d
	|}t|j|j|j|j|dS )NzZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fz5You have to specify either input_ids or inputs_embedsr   zWWhen first calling the model, if input_embeds are passed, input_ids should not be None.zMYou cannot specify both pixel_values and image_hidden_states at the same time)rS   rT   )r7   r8   r9   T)	r8   ra   rb   rc   rd   re   rf   rg   rh   )r\   rc   hidden_states
attentionsr9   r   )r&   re   rf   rd   Zuse_return_dictZtrainingZ
text_modelZgradient_checkpointingloggerZwarning_oncer?   rC   r   Zget_seq_lengthZget_input_embeddingstorT   r`   rS   rI   r5   r\   rc   rk   rl   )r,   r7   ra   rb   rc   r8   rJ   rK   r9   rd   re   rf   rg   rh   ri   r]   Z
seq_lengthrF   Zpast_seen_tokensZoutputsr   r   r   forward   sp   
zSmolVLMModel.forward)N)NNNNNNNNNNNNN)r   r   r   r   rA   Z
LongTensorZTensorrI   ZFloatTensorr`   r   r   r   r   Z
BoolTensorr[   r
   r	   r   r   r5   ro   r   r   r   r   r6      st    
.	

r6   c                       s(   e Zd Z fddZ fddZ  ZS )SmolVLMForConditionalGenerationc                    s<   t  | t|| _tj|jj|jjdd| _	| 
  d S )NF)r+   )super__init__r6   modelr   r(   Ztext_configZhidden_sizeZ
vocab_sizeZlm_headZ	post_init)r,   r&   	__class__r   r   rr   Z  s   
z(SmolVLMForConditionalGeneration.__init__c                    s   t  jdi | dS )a  
        Example:

        ```python
        >>> import requests
        >>> import torch
        >>> from PIL import Image
        >>> from io import BytesIO

        >>> from transformers import AutoProcessor, AutoModelForImageTextToText
        >>> from transformers.image_utils import load_image

        >>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible
        >>> image1 = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
        >>> image2 = load_image("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg")
        >>> image3 = load_image("https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg")

        >>> processor = AutoProcessor.from_pretrained("HuggingFaceTB/SmolVLM2-2.2B-Instruct")
        >>> model = AutoModelForImageTextToText.from_pretrained("HuggingFaceTB/SmolVLM2-2.2B-Instruct", torch_dtype=torch.bfloat16, device_map="auto")

        >>> # Create inputs
        >>> messages = [
        ...     {
        ...         "role": "user",
        ...         "content": [
        ...             {"type": "video", "path": path/to/video},
        ...             {"type": "text", "text": "What is happening in this video?"},
        ...         ]
        ...     }
        ... ]

        >>> inputs = processor.apply_chat_template([messages], add_generation_prompt=True)

        >>> # Generate
        >>> generated_ids = model.generate(**inputs, max_new_tokens=256)
        >>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)

        >>> print(generated_texts)
        ```Nr   )rq   ro   )r,   Zsuper_kwargsrt   r   r   ro   `  s   (z'SmolVLMForConditionalGeneration.forward)r   r   r   rr   ro   __classcell__r   r   rt   r   rp   Y  s    rp   )r   r3   r4   rp   r    r6   r0   ))typingr   r   r   r   rA   Ztorch.utils.checkpointr   Zcache_utilsr   Zmodeling_flash_attention_utilsr	   Zprocessing_utilsr
   utilsr   r   r   Zidefics3.configuration_idefics3r   r   Z"idefics3.image_processing_idefics3r   Zidefics3.modeling_idefics3r   r   r   r   r   Z
get_loggerr   rm   r   r    r0   r3   r4   r5   r6   rp   __all__r   r   r   r   <module>   s,   
	8* 32