
    eThK                        S SK r S SKrS SKrS SKrS SKJrJrJrJrJ	r	J
r
  S SKrSSKJr  SSKJrJr  SSKJr  SSKJrJrJr  SSKJrJr  SS	KJrJrJrJrJ r J!r!J"r"J#r#J$r$J%r%J&r&J'r'J(r(J)r)J*r*  SS
K+J,r,  SSK-J.r.J/r/J0r0J1r1J2r2J3r3  \)" 5       (       a  SSKJ4r4  \&" 5       (       a  S SK5r5\'" 5       (       a  SSKJ6r6  \(" 5       (       a  S SK7J8r9  OS SK:J8r9  \*Rv                  " \<5      r=Sr>\ " S\>5      \," SS9 " S S\5      5       5       r?\"" \?R                  5      \?l@        \?R                  R                  b5  \?R                  R                  R                  SSSS9\?R                  lA        gg)    N)AnyDictListOptionalTupleUnion   )custom_object_save)BatchFeatureget_size_dict)BaseImageProcessorFast)ChannelDimensionSizeDictvalidate_kwargs)UnpackVideosKwargs)VIDEO_PROCESSOR_NAME
TensorTypeadd_model_info_to_auto_map"add_model_info_to_custom_pipelinesadd_start_docstringscached_file	copy_funcdownload_urlis_offline_modeis_remote_urlis_torch_availableis_torchvision_availableis_torchvision_v2_availableis_vision_availablelogging)requires)
VideoInputgroup_videos_by_shape
load_videomake_batched_videosreorder_videosto_channel_dimension_format)PILImageResampling)pil_torch_interpolation_mapping)
functionalac  
    Args:
        do_resize (`bool`, *optional*, defaults to `self.do_resize`):
            Whether to resize the video's (height, width) dimensions to the specified `size`. Can be overridden by the
            `do_resize` parameter in the `preprocess` method.
        size (`dict`, *optional*, defaults to `self.size`):
            Size of the output video after resizing. Can be overridden by the `size` parameter in the `preprocess`
            method.
        size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
            The size by which to make sure both the height and width can be divided.
        default_to_square (`bool`, *optional*, defaults to `self.default_to_square`):
            Whether to default to a square video when resizing, if size is an int.
        resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
            Resampling filter to use if resizing the video. Only has an effect if `do_resize` is set to `True`. Can be
            overridden by the `resample` parameter in the `preprocess` method.
        do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
            Whether to center crop the video to the specified `crop_size`. Can be overridden by `do_center_crop` in the
            `preprocess` method.
        do_pad (`bool`, *optional*):
            Whether to pad the video to the `(max_height, max_width)` of the videos in the batch.
        crop_size (`Dict[str, int]` *optional*, defaults to `self.crop_size`):
            Size of the output video after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
            method.
        do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
            Whether to rescale the video by the specified scale `rescale_factor`. Can be overridden by the
            `do_rescale` parameter in the `preprocess` method.
        rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`):
            Scale factor to use if rescaling the video. Only has an effect if `do_rescale` is set to `True`. Can be
            overridden by the `rescale_factor` parameter in the `preprocess` method.
        do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
            Whether to normalize the video. Can be overridden by the `do_normalize` parameter in the `preprocess`
            method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
        image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
            Mean to use if normalizing the video. This is a float or list of floats the length of the number of
            channels in the video. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
            overridden by the `image_mean` parameter in the `preprocess` method.
        image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
            Standard deviation to use if normalizing the video. This is a float or list of floats the length of the
            number of channels in the video. Can be overridden by the `image_std` parameter in the `preprocess` method.
            Can be overridden by the `image_std` parameter in the `preprocess` method.
        do_convert_rgb (`bool`, *optional*, defaults to `self.image_std`):
            Whether to convert the video to RGB.
        return_tensors (`str` or `TensorType`, *optional*):
            Returns stacked tensors if set to `pt, otherwise returns a list of tensors.
        data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
            The channel dimension format for the output video. Can be one of:
            - `"channels_first"` or `ChannelDimension.FIRST`: video in (num_channels, height, width) format.
            - `"channels_last"` or `ChannelDimension.LAST`: video in (height, width, num_channels) format.
            - Unset: Use the channel dimension format of the input video.
        input_data_format (`ChannelDimension` or `str`, *optional*):
            The channel dimension format for the input video. If unset, the channel dimension format is inferred
            from the input video. Can be one of:
            - `"channels_first"` or `ChannelDimension.FIRST`: video in (num_channels, height, width) format.
            - `"channels_last"` or `ChannelDimension.LAST`: video in (height, width, num_channels) format.
            - `"none"` or `ChannelDimension.NONE`: video in (height, width) format.
        device (`torch.device`, *optional*):
            The device to process the videos on. If unset, the device is inferred from the input videos.z!Constructs a base VideoProcessor.)visiontorchvision)backendsc            "         ^  \ rS rSrSrSrSrSrSrSr	Sr
SrSrSrSrSrSrSrSr\rS/rS\\   SS4U 4S jjrS\4S	 jrS
SS\4S jr  S;S\S\\\\4      S\S   S\ S   4S jjr!\"" \#5      S\S\\   S\4S j5       r$ S<S\ S   S\%S\%S\&S\\'   S\S   S\%S\&S\%S\%S\(S\%S\\\(\ \(   4      S \\\(\ \(   4      S!\\\\)4      S\4 S" jjr*\+     S=S#\\\,RZ                  4   S$\\\\,RZ                  4      S%\%S&\%S'\\\\%4      S(\4S) jj5       r.S>S*\\\,RZ                  4   S+\%4S, jjr/\+S#\\\,RZ                  4   S\0\1\\24   \1\\24   4   4S- j5       r3\+S.\1\\24   4S/ j5       r4S\1\\24   4S0 jr5S\4S1 jr6S2\\\,RZ                  4   4S3 jr7S4 r8\+S5\\\,RZ                  4   4S6 j5       r9\+S?S7 j5       r:S8\\\ \   4   4S9 jr;S:r<U =r=$ )@BaseVideoProcessor   NTgp?pixel_values_videoskwargsreturnc           
        > [         TU ]  5         UR                  SS 5      U l        UR	                  5        H  u  p# [        XU5        M     UR                  SU R                  5      nUb#  [        XQR                  SU R                  5      S9OS U l	        UR                  SU R                  5      nUb
  [        USS	9OS U l        [        U R                  R                  R!                  5       5      U l        U R"                   H;  nUR%                  U5      b  [        XX   5        M%  [        X['        XS 5      5        M=     g ! [         a%  n[        R                  SU SU SU  35        UeS nAff = f)
Nprocessor_classz
Can't set z with value z for sizedefault_to_square)r7   r8   	crop_size)
param_name)super__init__pop_processor_classitemssetattrAttributeErrorloggererrorr7   r   r8   r9   listvalid_kwargs__annotations__keysmodel_valid_processing_keysgetgetattr)selfr3   keyvalueerrr7   r9   	__class__s          [/var/www/auris/envauris/lib/python3.13/site-packages/transformers/video_processing_utils.pyr<   BaseVideoProcessor.__init__   sK    &

+<d C !,,.JC5) ) zz&$)),  tzzBUW[WmWm7no 		
 JJ{DNN;	MVMby[Ihl ,00A0A0Q0Q0V0V0X+Y(33Czz#*6;/74d#;<	 4! " z#l5'tfMN	s   E
E3 E..E3c                 (    U R                   " U40 UD6$ N)
preprocess)rK   videosr3   s      rP   __call__BaseVideoProcessor.__call__   s    v000    videoztorch.Tensorc                 $   [         R                  " U5      nUR                  S   S:X  d#  USSSS2SS24   S:  R                  5       (       d  U$ USSSS2SS24   S-  nSUSSSS2SS24   -
  S-  USSSS2SS24   USSS2SS2SS24   -  -   nU$ )z
Converts a video to RGB format.

Args:
    video (`"torch.Tensor"`):
        The video to convert.

Returns:
    `torch.Tensor`: The converted video.
   .N   g     o@r	   )Fgrayscale_to_rgbshapeany)rK   rY   alphas      rP   convert_to_rgb!BaseVideoProcessor.convert_to_rgb   s     ""5);;r?ac1al(;c(A'F'F'H'HL c1al#e+U3a?++s2U3a?5KeTWY[Z[Y[]^`aTaNb5bbrX   rU   input_data_formatdeviceztorch.devicec                 8   [        U5      n/ nU H  n[        U[        R                  5      (       a?  [	        U[
        R                  U5      n[        R                  " U5      R                  5       nUb  UR                  U5      nUR                  U5        M     U$ )z*
Prepare the input videos for processing.
)r&   
isinstancenpndarrayr(   r   FIRSTtorch
from_numpy
contiguoustoappend)rK   rU   re   rf   processed_videosrY   s         rP   _prepare_input_videos(BaseVideoProcessor._prepare_input_videos   s     %V,E%,,3E;K;Q;QSde((/::< !(##E*   rX   c           	      t   [        UR                  5       U R                  R                  R                  5       S9  U R                  R                   H  nUR	                  U[        XS 5      5        M!     UR                  S5      nUR                  S5      nU R                  XUS9nU R                  " S
0 UD6nU R                  " S
0 UD6  UR                  S5      n[        U[        [        45      (       a	  [        U   OUUS'   UR                  S5        UR                  S5        U R                  " S
S	U0UD6$ )N)captured_kwargsvalid_processor_keysre   rf   )rU   re   rf   resampleinterpolationr8   data_formatrU    )r   rG   rE   rF   
setdefaultrJ   r=   rr   _further_process_kwargs_validate_preprocess_kwargsrh   r)   intr*   _preprocess)rK   rU   r3   
kwarg_namere   rf   rw   s          rP   rT   BaseVideoProcessor.preprocess   s    	DL]L]LmLmLrLrLtu ++;;Jj'$D*IJ < #JJ':;H%++6gm+n--77((262 ::j)9CHOacfNg9h9h+H5nv 	
 	

&'

=!8v888rX   do_convert_rgb	do_resizer7   size_divisorrx   zF.InterpolationModedo_center_cropr9   
do_rescaledo_padrescale_factordo_normalize
image_mean	image_stdreturn_tensorsc           	         [        U5      u  nn0 nUR                  5        H;  u  nnU(       a  U R                  U5      nU(       a  U R                  UXEUS9nUUU'   M=     [	        UU5      n[        U5      u  nn0 nUR                  5        H8  u  nnU(       a  U R                  UU5      nU R                  UXXU5      nUUU'   M:     [	        UU5      nU(       a  [        R                  " USS9OUn[        SU0US9$ )N)r7   r   rx   r   )dimr2   )datatensor_type)
r$   r?   rc   resizer'   center_croprescale_and_normalizerl   stackr   )rK   rU   r   r   r7   r   rx   r   r9   r   r   r   r   r   r   r   grouped_videosgrouped_videos_indexresized_videos_groupedr`   stacked_videosresized_videosprocessed_videos_groupedrq   s                           rP   r   BaseVideoProcessor._preprocess  s+   & 0EV/L,,!#%3%9%9%;!E>!%!4!4^!D!%"Xe "- " -;"5) &< ((>@TU 0E^/T,,#% %3%9%9%;!E>!%!1!1.)!L!77
LV_N /=$U+ &< **BDXYCQ5;;'7Q?Wg"79I!JXfggrX   pretrained_model_name_or_path	cache_dirforce_downloadlocal_files_onlytokenrevisionc                     X'S'   X7S'   XGS'   XgS'   UR                  SS5      nUb+  [        R                  " S[        5        Ub  [	        S5      eUnUb  XWS	'   U R
                  " U40 UD6u  pU R                  " U	40 UD6$ )
a  
Instantiate a type of [`~video_processing_utils.VideoProcessorBase`] from an video processor.

Args:
    pretrained_model_name_or_path (`str` or `os.PathLike`):
        This can be either:

        - a string, the *model id* of a pretrained video hosted inside a model repo on
          huggingface.co.
        - a path to a *directory* containing a video processor file saved using the
          [`~video_processing_utils.VideoProcessorBase.save_pretrained`] method, e.g.,
          `./my_model_directory/`.
        - a path or url to a saved video processor JSON *file*, e.g.,
          `./my_model_directory/preprocessor_config.json`.
    cache_dir (`str` or `os.PathLike`, *optional*):
        Path to a directory in which a downloaded pretrained model video processor should be cached if the
        standard cache should not be used.
    force_download (`bool`, *optional*, defaults to `False`):
        Whether or not to force to (re-)download the video processor files and override the cached versions if
        they exist.
    resume_download:
        Deprecated and ignored. All downloads are now resumed by default when possible.
        Will be removed in v5 of Transformers.
    proxies (`Dict[str, str]`, *optional*):
        A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
        'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
    token (`str` or `bool`, *optional*):
        The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
        the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
    revision (`str`, *optional*, defaults to `"main"`):
        The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
        git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
        identifier allowed by git.


        <Tip>

        To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.

        </Tip>

    return_unused_kwargs (`bool`, *optional*, defaults to `False`):
        If `False`, then this function returns just the final video processor object. If `True`, then this
        functions returns a `Tuple(video_processor, unused_kwargs)` where *unused_kwargs* is a dictionary
        consisting of the key/value pairs whose keys are not video processor attributes: i.e., the part of
        `kwargs` which has not been used to update `video_processor` and is otherwise ignored.
    subfolder (`str`, *optional*, defaults to `""`):
        In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
        specify the folder name here.
    kwargs (`Dict[str, Any]`, *optional*):
        The values in kwargs of any keys which are video processor attributes will be used to override the
        loaded values. Behavior concerning key/value pairs whose keys are *not* video processor attributes is
        controlled by the `return_unused_kwargs` keyword parameter.

Returns:
    A video processor of type [`~video_processing_utils.ImagVideoProcessorBase`].

Examples:

```python
# We can't instantiate directly the base class *VideoProcessorBase* so let's show the examples on a
# derived class: *LlavaOnevisionVideoProcessor*
video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
    "llava-hf/llava-onevision-qwen2-0.5b-ov-hf"
)  # Download video_processing_config from huggingface.co and cache.
video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
    "./test/saved_model/"
)  # E.g. video processor (or model) was saved using *save_pretrained('./test/saved_model/')*
video_processor = LlavaOnevisionVideoProcessor.from_pretrained("./test/saved_model/preprocessor_config.json")
video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
    "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", do_normalize=False, foo=False
)
assert video_processor.do_normalize is False
video_processor, unused_kwargs = LlavaOnevisionVideoProcessor.from_pretrained(
    "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", do_normalize=False, foo=False, return_unused_kwargs=True
)
assert video_processor.do_normalize is False
assert unused_kwargs == {"foo": False}
```r   r   r   r   use_auth_tokenNrThe `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.V`token` and `use_auth_token` are both specified. Please set only the argument `token`.r   )r=   warningswarnFutureWarning
ValueErrorget_video_processor_dict	from_dict)
clsr   r   r   r   r   r   r3   r   video_processor_dicts
             rP   from_pretrained"BaseVideoProcessor.from_pretrainedI  s    t ({#1 %5!"%z$4d;%MM E   l  #E#7O'*'C'CDa'lek'l$}}1<V<<rX   save_directorypush_to_hubc           	      H   UR                  SS5      nUb=  [        R                  " S[        5        UR	                  SS5      b  [        S5      eXCS'   [        R                  R                  U5      (       a  [        SU S35      e[        R                  " USS	9  U(       ar  UR                  S
S5      nUR                  SUR                  [        R                  R                  5      S   5      nU R                  " U40 UD6nU R                  U5      nU R                  b
  [!        XU S9  [        R                  R#                  U[$        5      nU R'                  U5        [(        R+                  SU 35        U(       a"  U R-                  UWWWUR	                  S5      S9  U/$ )a  
Save an video processor object to the directory `save_directory`, so that it can be re-loaded using the
[`~video_processing_utils.VideoProcessorBase.from_pretrained`] class method.

Args:
    save_directory (`str` or `os.PathLike`):
        Directory where the video processor JSON file will be saved (will be created if it does not exist).
    push_to_hub (`bool`, *optional*, defaults to `False`):
        Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
        repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
        namespace).
    kwargs (`Dict[str, Any]`, *optional*):
        Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
r   Nr   r   r   zProvided path (z#) should be a directory, not a fileT)exist_okcommit_messagerepo_id)configzVideo processor saved in )r   r   )r=   r   r   r   rI   r   ospathisfileAssertionErrormakedirssplitsep_create_repo_get_files_timestamps_auto_classr
   joinr   to_json_filerB   info_upload_modified_files)	rK   r   r   r3   r   r   r   files_timestampsoutput_video_processor_files	            rP   save_pretrained"BaseVideoProcessor.save_pretrained  s     $4d;%MM E zz'4(4 l  -7O77>>.)) ?>2BBe!fgg
NT2#ZZ(8$?NjjN,@,@,Mb,QRG'':6:G#99.I 'tDA ')ggll>CW&X#56/0K/LMN'' -jj) (  ,,,rX   c                    UR                  SS5      nUR                  SS5      nUR                  SS5      nUR                  SS5      nUR                  SS5      nUR                  SS5      nUR                  S	S5      n	UR                  S
S5      n
UR                  SS5      nUR                  SS5      nUR                  SS5      nUb+  [        R                  " S[        5        Ub  [	        S5      eUnSUS.nUb  XS'   [        5       (       a  U	(       d  [        R                  S5        Sn	[        U5      n[        R                  R                  U5      n[        R                  R                  U5      (       a  UnSnO8[        U5      (       a  Un[        U5      nO [        n[!        UUUUUUU	UUU
US9n [)        USSS9 nUR+                  5       nSSS5        [,        R.                  " W5      nU(       a  [        R                  S"U 35        O[        R                  S"W S#U 35        U(       d0  S$U;   a  [3        US$   U5      US$'   S%U;   a  [5        US%   U5      US%'   UU4$ ! ["         a-    Sn[!        UUUUUUU	UUU
US9n[        R%                  S5         N["         a    e [&         a    [#        SU SU S[         S35      ef = f! , (       d  f       GN= f! [,        R0                   a    [#        S U S!35      ef = f)&a  
From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
video processor of type [`~video_processing_utils.VideoProcessorBase`] using `from_dict`.

Parameters:
    pretrained_model_name_or_path (`str` or `os.PathLike`):
        The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
    subfolder (`str`, *optional*, defaults to `""`):
        In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
        specify the folder name here.

Returns:
    `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the video processor object.
r   Nr   Fresume_downloadproxiesr   r   r   r   	subfolder _from_pipeline
_from_autor   r   video processor)	file_typefrom_auto_classusing_pipelinez+Offline mode: forcing local_files_only=TrueT)	r   r   r   r   r   r   
user_agentr   r   zpreprocessor_config.jsonaA  You have video processor config saved in `preprocessor.json` file which is deprecated. Video processor configs should be saved in their own `video_preprocessor.json` file. You can rename the file or load and save the processor back which renames it automatically. Loading from `preprocessor.json` will be removed in v5.0.z Can't load video processor for 'z'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'z2' is the correct path to a directory containing a z filerutf-8encodingz"It looks like the config file at 'z' is not a valid JSON file.zloading configuration file z from cache at auto_mapcustom_pipelines)r=   r   r   r   r   r   rB   r   strr   r   isdirr   r   r   r   r   EnvironmentErrorwarning_once	ExceptionopenreadjsonloadsJSONDecodeErrorr   r   )r   r   r3   r   r   r   r   r   r   r   r   r   from_pipeliner   r   is_localresolved_video_processor_filevideo_processor_filereadertextr   s                        rP   r   +BaseVideoProcessor.get_video_processor_dict  sr   $ JJ{D1	$4e< **%6=**Y-

7D)$4d;!::&8%@::j$/JJ{B/	

#3T: **\59%MM E   l  #E#4Y
$+8'(%5KKEF#(+,I(J%77==!>?77>>788,I)H899#@ ,89V,W)2 (<$0;1('#1#$3%5)%'1-^		3S7Kv{{} L#'::d#3  KK56S5TUVKK-.B-C?SpRqr 113M(46S4$Z0 "%99;]();<>[<$%78 $V++{ $ 'A$0;1('#1#$3%5)%'1- ##P $   &67T6U V99V8W X//C.DEK  LK ## 	"45R4SSno 	s6    I* :K" KK" *4K -K
KK" "$Lr   c                    UR                  5       nUR                  SS5      nSU;   a  SU;   a  UR                  S5      US'   SU;   a  SU;   a  UR                  S5      US'   U " S0 UD6n/ nUR                  5        H4  u  pg[        XF5      (       d  M  [	        XFU5        UR                  U5        M6     U H  nUR                  US5        M     [        R                  SU 35        U(       a  XB4$ U$ )a  
Instantiates a type of [`~video_processing_utils.VideoProcessorBase`] from a Python dictionary of parameters.

Args:
    video_processor_dict (`Dict[str, Any]`):
        Dictionary that will be used to instantiate the video processor object. Such a dictionary can be
        retrieved from a pretrained checkpoint by leveraging the
        [`~video_processing_utils.VideoProcessorBase.to_dict`] method.
    kwargs (`Dict[str, Any]`):
        Additional parameters from which to initialize the video processor object.

Returns:
    [`~video_processing_utils.VideoProcessorBase`]: The video processor object instantiated from those
    parameters.
return_unused_kwargsFr7   r9   NzVideo processor rz   )copyr=   r?   hasattrr@   rp   rB   r   )r   r   r3   r   video_processor	to_removerL   rM   s           rP   r   BaseVideoProcessor.from_dict  s    "  488:%zz*@%H
 V*> >+1::f+= (& [4H%H06

;0G -5 45 	 ,,.JC,,e4  % ) CJJsD!  	&&789"**""rX   c                 x    [         R                  " U R                  5      nU R                  R                  US'   U$ )z
Serializes this instance to a Python dictionary.

Returns:
    `Dict[str, Any]`: Dictionary of all the attributes that make up this video processor instance.
video_processor_type)r   deepcopy__dict__rO   __name__)rK   outputs     rP   to_dictBaseVideoProcessor.to_dict  s0     t}}-)-)@)@%&rX   c                    U R                  5       nUR                  5        H8  u  p#[        U[        R                  5      (       d  M&  UR                  5       X'   M:     UR                  SS5      nUb  XAS'   [        R                  " USSS9S-   $ )z
Serializes this instance to a JSON string.

Returns:
    `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
r>   Nr6      T)indent	sort_keys
)	r   r?   rh   ri   rj   tolistr=   r   dumps)rK   
dictionaryrL   rM   r>   s        rP   to_json_string!BaseVideoProcessor.to_json_string  s}     \\^
$**,JC%,,"',,.
 - &>>*<dC',<()zz*Q$?$FFrX   json_file_pathc                     [        USSS9 nUR                  U R                  5       5        SSS5        g! , (       d  f       g= f)z
Save this instance to a JSON file.

Args:
    json_file_path (`str` or `os.PathLike`):
        Path to the JSON file in which this image_processor instance's parameters will be saved.
wr   r   N)r   writer  )rK   r  writers      rP   r   BaseVideoProcessor.to_json_file  s3     .#8FLL,,./ 988s	    5
Ac                 T    U R                   R                   SU R                  5        3$ )N )rO   r   r  )rK   s    rP   __repr__BaseVideoProcessor.__repr__  s(    ..))*!D,?,?,A+BCCrX   	json_filec                     [        USSS9 nUR                  5       nSSS5        [        R                  " W5      nU " S0 UD6$ ! , (       d  f       N,= f)a  
Instantiates a video processor of type [`~video_processing_utils.VideoProcessorBase`] from the path to a JSON
file of parameters.

Args:
    json_file (`str` or `os.PathLike`):
        Path to the JSON file containing the parameters.

Returns:
    A video processor of type [`~video_processing_utils.VideoProcessorBase`]: The video_processor object
    instantiated from that JSON file.
r   r   r   Nrz   )r   r   r   r   )r   r  r   r   r   s        rP   from_json_file!BaseVideoProcessor.from_json_file  sI     )S73v;;=D 4#zz$/*)** 43s   A
Ac                     [        U[        5      (       d  UR                  nSSKJs  Jn  [        X!5      (       d  [        U S35      eXl        g)a  
Register this class with a given auto class. This should only be used for custom video processors as the ones
in the library are already mapped with `AutoVideoProcessor `.

<Tip warning={true}>

This API is experimental and may have some slight breaking changes in the next releases.

</Tip>

Args:
    auto_class (`str` or `type`, *optional*, defaults to `"AutoVideoProcessor "`):
        The auto class to register this new video processor with.
r   Nz is not a valid auto class.)	rh   r   r   transformers.models.automodelsautor   r   r   )r   
auto_classauto_modules      rP   register_for_auto_class*BaseVideoProcessor.register_for_auto_class  sE      *c**#,,J66{//
|+FGHH$rX   video_url_or_urlsc                     [        U[        5      (       a!  U Vs/ s H  o R                  U5      PM     sn$ [        U[        5      (       a  [	        U5      $ [        S[        U5       35      es  snf )z
Convert a single or a list of urls into the corresponding `np.array` objects.

If a single url is passed, the return value will be a single object. If a list is passed a list of objects is
returned.
z=only a single or a list of entries is supported but got type=)rh   rD   fetch_videosr   r%   	TypeErrortype)rK   r  xs      rP   r  BaseVideoProcessor.fetch_videos  sm     '..2CD2CQ%%a(2CDD)3///00[\`ar\s[tuvv	 Es   A.)r>   r9   rH   r7   )NNrS   )NFFNmain)F)AutoVideoProcessor)>r   
__module____qualname____firstlineno__r   rw   r   r   r7   r   r8   r9   r   r   r   r   r   r   r   r   rE   model_input_namesr   r<   r   rV   r#   rc   r   r   r   r   r   rr   r   BASE_VIDEO_PROCESSOR_DOCSTRINGrT   boolr   r~   floatr   r   classmethodr   PathLiker   r   r   r   r   r   r   r   r  r   r  r  r  r  __static_attributes____classcell__)rO   s   @rP   r0   r0      s    KHJIDLIINFJNLNL./=!5 =$ =>1L 1 
8 EI+/	   $E#/?*?$@A  (	 
 
n	 2 8999 &9 
	9 :9\ <@!/h^$/h /h 	/h
 /h sm/h   56/h /h /h /h /h /h /h U5$u+#567/h E%e"456/h  !sJ!78!/h" 
#/hb  8<$!&,0o=',S"++-='>o= E#r{{"234o= 	o=
 o= c4i()o= o= o=b;-eC4D.E ;-TX ;-z I,,1#r{{2B,CI,	tCH~tCH~-	.I, I,V *#T#s(^ *# *#X
c3h 
G G*	05bkk1A+B 	0D +uS"++-='> + +$ % %2weCcN.C w wrX   r0   r   r$  zvideo processor file)objectobject_classobject_files)Cr   r   r   r   typingr   r   r   r   r   r   numpyri   dynamic_module_utilsr
   image_processing_utilsr   r   image_processing_utils_fastr   image_utilsr   r   r   processing_utilsr   r   utilsr   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   utils.import_utilsr"   video_utilsr#   r$   r%   r&   r'   r(   r)   rl   r*   torchvision.transforms.v2r+   r^   torchvision.transforms
get_loggerr   rB   r)  r0   r   __doc__formatrz   rX   rP   <module>rB     sQ      	  : :  4 @ 
 3    " )  /<"$$=:			H	%8"l v '" 
,-H
w/ H
w .	
H
wV "++=+I+I!J  !!))5-?-K-K-S-S-Z-Z /CRh .[ .""* 6rX   