
    fThY                        S r SSKrSSKJr  SSKJrJrJrJrJ	r	  SSK
rSSKJr  SSKJrJr  SSKJrJrJrJr  SS	KJrJr  SS
KJrJrJr  SSKJrJrJ r   \" 5       (       a  SSK!J"r"J#r#J$r$J%r%  \(       a  SSKJ&r&  \RN                  " \(5      r)\" 5       (       a  SSK*J*r*  OSr*S r+S r,S r- " S S\SS9r. " S S\SS9r/ " S S\5      r0S/r1g)z
Processor class for SmolVLM.
    N)	timedelta)TYPE_CHECKINGDictListOptionalUnion   )BatchFeature)
ImageInputmake_nested_list_of_images)ImagesKwargsProcessingKwargsProcessorMixinUnpack)BatchEncoding	TextInput)is_num2words_availableis_vision_availablelogging)
VideoInput
load_videomake_batched_videos   )DEFAULT_MEDIA_OUTTRODEFAULT_VIDEO_INTROFRAME_TIMESTAMP_MESSAGEsmolvlm_sample_indices_fn)PreTokenizedInput)	num2wordsc           	          Sn[        U5       H7  n[        U5       H   nUU SUS-    SUS-    S3-   U U -  -   -  nM"     US-  nM9     USU 3U -   U U -  -   U -   -  nU$ )zKPrompt with expanded image tokens for when the image is split into patches. z<row_r   _col_>
)range)	image_seq_len
image_rows
image_colsfake_token_around_imageimage_tokenglobal_image_tokentext_split_imagesn_hn_ws	            f/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/smolvlm/processing_smolvlm.py_prompt_split_imager0   5   s     Z $C*+sQwiuS1WIQ/OOU`TaerRrr % 	T! ! 
$%& 	"M]
*	+ %%	'     c                 &    U U -   U U -  -   U -   $ )z5Prompt with expanded image tokens for a single image. )r&   r)   r*   r+   s       r/   _prompt_single_imager4   J   s6     #
# 	"M]
*	+ %%	'r1   c                 L    U S:X  a  US:X  a  [        UUUUS9$ [        X XXE5      $ )Nr   )r)   r*   r+   )r4   r0   )r'   r(   r&   r)   r*   r+   s         r/   get_image_prompt_stringr6   T   s@     Q:?#$;#1	
 	
 : r1   c                   @    \ rS rSr% \\   \S'   \\\\	4      \S'   Sr
g)SmolVLMImagesKwargsc   return_row_col_infomax_image_sizer3   N)__name__
__module____qualname____firstlineno__r   bool__annotations__r   strint__static_attributes__r3   r1   r/   r8   r8   c   s    !$'T#s(^,,r1   r8   F)totalc                   6    \ rS rSr% \\S'   SSSS.SS0S.rSrg	)
SmolVLMProcessorKwargsh   images_kwargsTF)add_special_tokenspaddingis_split_into_wordsr:   )text_kwargsrI   r3   N)r<   r=   r>   r?   r8   rA   	_defaultsrD   r3   r1   r/   rG   rG   h   s+    && #'#(
 "4
	Ir1   rG   c                     ^  \ rS rSrSr/ SQrSS/rSrSrSr	  S!S\
S\\   4U 4S jjjr S"S	 jr    S#S
\\\\   \\\      4   S\\S\\   \S   4   S\S\\   S\4
S jjrS\\\\\4         S\\   S\\   S\\\\\4         4S jrS rS r\S 5       r    S$S\\S4   S\\
   S\\
   S\S\
S\R@                  4S jjr!S r"U =r#$ )%SmolVLMProcessorw   a9  
Constructs a SmolVLM processor which wraps a LLama tokenizer and SmolVLM image processor into a single processor.

[`SmolVLMProcessor`] offers all the functionalities of [`SmolVLMImageProcessor`] and [`SmolVLMTokenizerFast`]. See
the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.

Args:
    image_processor (`SmolVLMImageProcessor`):
        An instance of [`SmolVLMImageProcessor`]. The image processor is a required input.
    tokenizer (`PreTrainedTokenizerBase`):
        An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
    video_processor (`SmolVLMImageProcessor`):
        n instance of [`SmolVLMImageProcessor`]. The video processor is a required input.
    image_seq_len (`int`, *optional*, defaults to 169):
        The length of the image sequence i.e. the number of <image> tokens per image in the input.
        This parameter is used to build the string from the input prompt and image tokens and should match the
        value the model used. It is computed as: image_seq_len = int(((image_size // patch_size) ** 2) / (scale_factor**2))
    chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
        in a chat into a tokenizable string.
)image_processor	tokenizervideo_processorr&   chat_templateSmolVLMImageProcessorAutoTokenizerc                 4  > [        USS5      U l        [        USS5      U l        UR                  U R                  5      U l        [        USS5      U l        [        USS5      U l        X@l        UR                  S	   U l	        UR                  U l        UR                  U l        UR                  R                  S
S5      U l        UR                  S   U l        UR                  S   U l        ["        (       d  [%        S5      e[&        TU ]P  " XU4SU0UD6  g )Nfake_image_tokenz<fake_token_around_image>r*   z<image>end_of_utterance_tokenz<end_of_utterance>r+   z<global-img>
video_sizedo_image_splittingF
max_framesfpszbPackage `num2words` is required to run SmolVLM processor. Install it with `pip install num2words`.rU   )getattrrY   r*   convert_tokens_to_idsimage_token_idrZ   r+   r&   video_samplingr[   size
image_sizer\   getdo_video_splittingdefault_max_framesdefault_fpsr   ImportErrorsuper__init__)selfrR   rS   rT   r&   rU   kwargs	__class__s          r/   rk   SmolVLMProcessor.__init__   s    !(	3EGb c"9mYG'==d>N>NO&-i9QSg&h#"))5I>"Z*)88F).."1"D"D"1"@"@"D"DEY[`"a"1"@"@"N*99%@ yt  	_lTaleklr1   c                 J   Ub*  U Vs/ s H  owR                  U R                  5      PM     nnU V	s/ s H  n	[        U	5      PM     n
n	U" U4XES.UD6nUc  S U4$ U
W:w  a  [        SU SU
 S35      eUR	                  SS/[        U5      -  /5      nUR	                  SS/[        U5      -  /5      n/ n[        XU5       H  u  pn/ n[        UU5       HM  u  nn[        UUU R                  U R                  U R                  U R                  S9nUR                  U5        MO     UR                  U R                  5      n[        U5      S:X  a  [        S	5      eUS   n[        U5       H  u  nnUUUUS
-      -   -  nM     UR                  U5        M     X4$ s  snf s  sn	f )N)r\   rc   z!The number of images in the text z and images z should be the same.rowsr   cols)r*   r)   r+   z.The image token should be present in the text.r   )countr*   len
ValueErrorpopzipr6   r&   rY   r+   appendsplit	enumerate)rl   textimagesoutput_kwargsr\   image_processor_size	processorsamplen_images_in_textsublistn_images_in_imagesimage_inputsr'   r(   prompt_stringssample_rowssample_colsimage_prompt_stringsn_rowsn_colsimage_prompt_stringsplit_sampleis                          r/   process_visionSmolVLMProcessor.process_vision   s    MQRT6T-=-= >TR:@A&wc'l&A 
'9
Xe
 <%%!1134D3E\RdQeeyz  "%%fsSY.?@
!%%fsSY.?@
03Dj0Q,F#% "%k;"?&=&& $ 0 0,0,A,A'+'>'>'# %++,?@ #@ "<<(8(89L< A% !QRR "!_F*34H*I&&-QU0CCC +J!!&)- 1R0 ++U  SAs
   $FF r|   r{   r   videosrm   returnc           	         Uc  Uc  Uc  [        S5      eUc  USL USL-  (       a  [        S5      eU R                  " [        4SU R                  R                  0UD6nUb  [        U[        5      (       a  U/nO8[        U[        5      (       d#  [        US   [        5      (       d  [        S5      e[        U Vs/ s H  owR                  U R                  5      PM     sn5      nUS:  a  Uc  Uc  [        SU S35      e0 n	UbT  [        U5      nU R                  UUUS	   U R                  U R                  U R                  S
9u  p*U	R!                  U
5        OVUbS  [#        U5      nU R                  UUUS   U R                  U R$                  U R&                  S
9u  p*U	R!                  U
5        US   R)                  SS5      nUb8  U R                  " U40 US   D6nU R+                  X,S/S9  U	R!                  U5        [-        XS9$ s  snf )a
  
Processes the input prompts and returns a BatchEncoding.

Example:

```python
>>> import requests
>>> from transformers import SmolVLMProcessor
>>> from transformers.image_utils import load_image

>>> processor = SmolVLMProcessor.from_pretrained("HuggingFaceM4/SmolVLM2-256M-Video-Instruct")
>>> processor.image_processor.do_image_splitting = False  # Force as False to simplify the example

>>> url1 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
>>> url2 = "https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg"

>>> image1, image2 = load_image(url1), load_image(url2)
>>> images = [[image1], [image2]]

>>> text = [
...     "<image>In this image, we see",
...     "bla bla bla<image>",
... ]
>>> outputs = processor(images=images, text=text, return_tensors="pt", padding=True)
>>> input_ids = outputs.input_ids
>>> input_tokens = processor.tokenizer.batch_decode(input_ids)
>>> print(input_tokens)
['<|begin_of_text|><fake_token_around_image><global-img>((<image>)*169)<fake_token_around_image> In this image, we see', '<|reserved_special_token_0|><|reserved_special_token_0|><|reserved_special_token_0|><|begin_of_text|>bla bla bla<fake_token_around_image><global-img>((<image>)*169)<fake_token_around_image>']
```

Args:
    images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`, *optional*):
        The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
        tensor. If is of type `List[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
    text (`Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]`, *optional*):
        The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
        (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
        `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
        Wherever an image token, `<image>` is encountered it is expanded to
        `<fake_token_around_image>` + `<row_x_col_y>` + `<image>` * `image_seq_len` * <fake_token_around_image>`.
    videos (`List[PIL.Image.Image]`, `np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`, *optional*):
        The video or batch of videos to be prepared. Each video can be a list of PIL frames, NumPy array or PyTorch
        tensor. If is of type `List[VideoInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
    return_tensors (`Union[str, TensorType]`, *optional*):
        If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more
        information.
Nz5You must provide one of `text`, `images` or `videos'.z4You must specify exactly one of `images` or `videos`tokenizer_init_kwargsr   zAInvalid input text. Please provide a string, or a list of stringszWe detected z4 tokens in the text but no images/videos were passedrI   )r\   r~   r   videos_kwargsrM   return_tensorsimage)
modalities)tensor_type)ru   _merge_kwargsrG   rS   init_kwargs
isinstancerB   listsumrs   r*   r   r   r\   rd   rR   updater   r[   rT   rv   _check_special_mm_tokensr
   )rl   r|   r{   audior   rm   r}   r   r   inputsvision_inputsr   text_inputss                r/   __call__SmolVLMProcessor.__call__   s,   n <FNv~TUU<fnt1CDSTT**"
"&.."<"<
 
 $$$vd++JtAw4L4L !dee"QU#VQUvLL1A1A$BQU#VW!#FN <0@/AAu!vww/7F"&"5"5o.#'#:#:%)__.. #6 #D MM-((0F"&"5"5o.#'#:#:%)__.. #6 #D MM-(&}599:JDQ..N}1MNK))$	)RMM+&F??I $Ws   ;$H	conversationsbatch_imagesbatch_videosbatch_video_metadatac                     [         R                  " U5      n/ / pv[        XC5       H  u  p[        X5       H  u  p[        U
S5      n[        U
S5      n[        U
S5      n/ n[        X5       H?  u  nnUU-  n[	        US-  5      n[	        US-  5      nUR                  US SUS 35        MA     UR                  U5        UR                  [        U5      5        M     M     U GH&  nU GH  nSU;  a  M  / nUS    H  nUR                  S5      S	:X  a  UR                  S
5      nUR                  S
5      n[        [	        W5      S9nUR                  S[        R                  " [        U5      [        U5      S9S.5        [        U5       H?  u  nnUR                  S[        R                  " US9S.5        UR                  SS05        MA     UR                  S[         S.5        M  UR                  U5        GM     UUS'   GM     GM)     U$ )a  
Used within `apply_chat_template` when a model has special way to process conversation history. For example,
video models might want to specify in the prompt the duration of video or which frame indices at which timestamps
were sampled. This information cannot be accessed before the video is loaded.
For most models it is a no-op, must be overridden by model processors which require special processing.
Args:
    conversation (`List[Dict, str, str]`):
        The conversation to process. Always comes in batched format.
    batch_images (`List[List[ImageInput]]`):
        Batch of images that were loaded from url/path defined in the conversation. The images
        are ordered in the same way as in the conversation. Comes in nested list format, one list of `PIL` images
        per batch.
    batch_videos (`List[List[ImageInput]]`):
        Batch of videos that were loaded from url/path defined in the conversation. The videos
        are ordered in the same way as in the conversation. Comes in nested list format, one list of 4D video arrays
        per batch.
    batch_video_metadata (`List[List[Dict[[str, any]]]]`):
        Batch of metadata returned from loading videos. That includes video fps, duration and total number of framer in original video.
        Metadata are ordered in the same way as `batch_videos`. Comes in nested list format, one list of 4D video arrays
        per batch.
durationframes_indicesr^   <   02d:contenttypevideor   )secondsr{   )frame_countvideo_duration)r   r{   )	timestampr   )copydeepcopyrw   r_   rC   rx   rt   re   rv   r   r   formatr   rB   rz   r   r   )rl   r   r   r   r   chat_template_kwargsbatch_num_framesbatch_timestampsmetadata_list
video_listmetadatar   duration_sec
frames_idxr^   
timestampsidxframe_npsecmmssconversationmsgnew_contentblockcurr_timestampscurr_num_framestdr   tss                                 r/   #_process_messages_for_chat_template4SmolVLMProcessor._process_messages_for_chat_templateU  s   > m4-/*),-A)P%M#&}#A&x<$X/?@
h.
%(%;MC)CSBYBS2XB%%C"S&:;	 &<
 !''
3 ''E
3 $B *Q *L#C'  ^Eyy(G3*:*>*>q*A*:*>*>q*A 's</@A#**(.(;(B(B09/0J[^_a[b)" &/%?EAr'..H_HfHfqsHt/uv'../@A &@
 $**FDX+YZ $**515 ,: "-IE $ *J r1   c                 >    U R                   R                  " U0 UD6nU$ )z
This method forwards all its arguments to SmolVLMTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
)rS   batch_decode)rl   argsrm   batched_decode_outputs       r/   r   SmolVLMProcessor.batch_decode  s$    
 !% ; ;T LV L$$r1   c                 >    U R                   R                  " U0 UD6nU$ )z
This method forwards all its arguments to SmolVLMTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
)rS   decode)rl   r   rm   decode_outputs       r/   r   SmolVLMProcessor.decode  s#    
 --t>v>r1   c                     U R                   R                  nU R                  R                  n[        [        R                  X!-   5      5      $ )N)rS   model_input_namesrR   r   dictfromkeys)rl   tokenizer_input_namesimage_processor_input_namess      r/   r   "SmolVLMProcessor.model_input_names  s<     $ @ @&*&:&:&L&L#DMM"="UVWWr1   r   r   
num_framesr^   backend	skip_secsc                 z   ^^	^
 Uc  U R                   OUm	Uc  U R                  OUm
U	UU
4S jn[        XUS9u  pX4$ )aO  
Loads `video` to a numpy array.

Args:
    video (`str` or `VideoInput`):
        The video to convert to the numpy array format. Can be a link to video or local path.
    num_frames (`int`, *optional*):
        Number of frames to sample uniformly. If not passed, the whole video is loaded.
    fps (`int`, *optional*):
        Number of frames to sample per second. Should be passed only when `num_frames=None`.
        If not specified and `num_frames==None`, all frames are sampled.
    backend (`str`, *optional*, defaults to `"opencv"`):
        The backend to use when loading the video. Can be any of ["decord", "pyav", "opencv", "torchvision"]. Defaults to "opencv".

Returns:
    Tuple[`np.array`, Dict]: A tuple containing:
        - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).
        - Metadata dictionary.
c                 $   > [        U 4TTTS.UD6$ )N)r]   
target_fpsr   )r   )r   	fn_kwargsr]   r   r   s     r/   sample_indices_fn_funcFSmolVLMProcessor._load_video_for_model.<locals>.sample_indices_fn_func  s(    ,%/JR[_h r1   )r   sample_indices_fn)rg   rh   r   )rl   r   r   r^   r   r   rm   r   r   r]   r   s        `   @@r/   _load_video_for_model&SmolVLMProcessor._load_video_for_model  sF    8 1;0BT,,

),T%%#
	
 %UOefr1   )rh   rg   r\   rf   rZ   rY   r+   r&   rd   r*   ra   r[   )   N)FNN)NNNN)NNopencvg        )$r<   r=   r>   r?   __doc__
attributesvalid_kwargsimage_processor_classvideo_processor_classtokenizer_classrC   r   rB   rk   r   r   r   r   r   r   r   rG   r   r   r   anyr   r   r   propertyr   nparrayr   rD   __classcell__)rn   s   @r/   rP   rP   w   s   * EJ#_5L3  &O !'+ m
  m  } m  mF ko.,d OSbf!l@j$z"2Dj9I4JJKl@ I2DOTJ]E^^_l@
 l@ /0l@ 
l@\UDc3h01U :&U :&	U
 #4S#X#78Un% X X %)!%S,&'% SM% c]	%
 % % 
% %r1   rP   )2r   r   datetimer   typingr   r   r   r   r   numpyr   feature_extraction_utilsr
   image_utilsr   r   processing_utilsr   r   r   r   tokenization_utils_baser   r   utilsr   r   r   video_utilsr   r   r   video_processing_smolvlmr   r   r   r   r   
get_loggerr<   loggerr   r0   r4   r6   r8   rG   rP   __all__r3   r1   r/   <module>r     s      = =  4 A V V ? I I F F   <			H	% #I*-,e -
-U q~ qh 
r1   