o
    Zh`                     @   s
  d Z ddlmZmZmZmZmZmZ ddlZ	ddl
mZmZmZmZ ddlmZmZ ddlmZmZmZmZmZmZmZmZmZmZmZ ddlmZm Z m!Z!m"Z"m#Z#m$Z$ dd	l%m&Z& dd
l'm(Z( e# rgddl)Z)e! rnddl*Z*e$+e,Z-e(ddG dd deZ.dgZ/dS )zImage processor class for Beit.    )AnyDictListOptionalTupleUnionN   )INIT_SERVICE_KWARGSBaseImageProcessorBatchFeatureget_size_dict)resizeto_channel_dimension_format)IMAGENET_STANDARD_MEANIMAGENET_STANDARD_STDChannelDimension
ImageInputPILImageResamplinginfer_channel_dimension_formatis_scaled_imagemake_list_of_imagesto_numpy_arrayvalid_imagesvalidate_preprocess_arguments)
TensorTypefilter_out_non_signature_kwargsis_torch_availableis_torch_tensoris_vision_availablelogging)deprecate_kwarg)requires)Zvision)backendsc                &       sF  e Zd ZdZdgZeddddeeddd	ej	dd	d
ddd	d	dfde
deeeef  dede
deeeef  deeef de
de
deeeee f  deeeee f  de
dd	f fddZedeeef f fddZej	d	d	fdejdeeef dedeeeef  deeeef  dejfdd Zd!edejfd"d#Z																								d5dedee
 dee
 deeeef  dedee
 deeeef  dee
 dee dee
 deeeee f  deeeee f  deeeef  fd$d%Z																								d5dedee
 deeeef  dedee
 deeeef  dee
 dee dee
 deeeee f  deeeee f  deeeef  deeeef  dejfd&d'Z														d6d(edee
 deeeef  dedee
 deeeef  dee
 deeeef  fd)d*Zd7 fd+d,	Zedddde d	d	d	d	d	d	d	d	d	d	d	d	d	ej d	fd-ed.ee dee
 deeeef  dedee
 deeeef  dee
 dee dee
 deeeee f  deeeee f  dee
 d/eeee!f  dedeeeef  de"j#j#f"d0d1Z$d7d2eee%  fd3d4Z&  Z'S )8BeitImageProcessoraK  
    Constructs a BEiT image processor.

    Args:
        do_resize (`bool`, *optional*, defaults to `True`):
            Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
            `do_resize` parameter in the `preprocess` method.
        size (`Dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
            Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
            method.
        resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
            Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
            `preprocess` method.
        do_center_crop (`bool`, *optional*, defaults to `True`):
            Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image
            is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in the
            `preprocess` method.
        crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 224, "width": 224}`):
            Desired output size when applying center-cropping. Only has an effect if `do_center_crop` is set to `True`.
            Can be overridden by the `crop_size` parameter in the `preprocess` method.
        rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
            Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
            `preprocess` method.
        do_rescale (`bool`, *optional*, defaults to `True`):
            Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
            parameter in the `preprocess` method.
        do_normalize (`bool`, *optional*, defaults to `True`):
            Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
            method.
        image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
            The mean to use if normalizing the image. This is a float or list of floats of length of the number of
            channels of the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
        image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
            The standard deviation to use if normalizing the image. This is a float or list of floats of length of the
            number of channels of the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
        do_reduce_labels (`bool`, *optional*, defaults to `False`):
            Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is
            used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The
            background label will be replaced by 255. Can be overridden by the `do_reduce_labels` parameter in the
            `preprocess` method.
    pixel_valuesreduce_labelsdo_reduce_labelsz4.41.0)new_nameversion)extraTNgp?F	do_resizesizeresampledo_center_crop	crop_sizerescale_factor
do_rescaledo_normalize
image_mean	image_stdreturnc                    s   t  jdi | |d ur|nddd}t|}|d ur|nddd}t|dd}|| _|| _|| _|| _|| _|| _|| _	|| _
|	d urG|	nt| _|
d urP|
nt| _|| _d S )N   )heightwidth   r.   )
param_name )super__init__r   r*   r+   r,   r-   r.   r0   r/   r1   r   r2   r   r3   r&   )selfr*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r&   kwargs	__class__r:   ]/var/www/auris/lib/python3.10/site-packages/transformers/models/beit/image_processing_beit.pyr<   h   s    
zBeitImageProcessor.__init__image_processor_dictc                    s2   |  }d|v r|d|d< t j|fi |S )z
        Overrides the `from_dict` method from the base class to save support of deprecated `reduce_labels` in old configs
        r%   r&   )copypopr;   	from_dict)clsrB   r>   r?   r:   rA   rE      s   zBeitImageProcessor.from_dictimagedata_formatinput_data_formatc                 K   sV   t |ddd}d|vsd|vrtd|  t|f|d |d f|||d|S )a  
        Resize an image to (size["height"], size["width"]).

        Args:
            image (`np.ndarray`):
                Image to resize.
            size (`Dict[str, int]`):
                Size of the output image.
            resample (`PILImageResampling`, *optional*, defaults to `PIL.Image.BICUBIC`):
                Resampling filter to use when resiizing the image.
            data_format (`str` or `ChannelDimension`, *optional*):
                The channel dimension format of the image. If not provided, it will be the same as the input image.
            input_data_format (`str` or `ChannelDimension`, *optional*):
                The channel dimension format of the input image. If not provided, it will be inferred.
        Tr+   Zdefault_to_squarer9   r6   r7   z@The `size` argument must contain `height` and `width` keys. Got )r+   r,   rH   rI   )r   
ValueErrorkeysr   )r=   rG   r+   r,   rH   rI   r>   r:   r:   rA   r      s   zBeitImageProcessor.resizelabelc                 C   s,   t |}d||dk< |d }d||dk< |S )N   r         )r   )r=   rM   r:   r:   rA   reduce_label   s
   zBeitImageProcessor.reduce_labelc                 C   sf   |r|  |}|r| j||||d}|r| j|||d}|r&| j||	|d}|
r1| j||||d}|S )N)rG   r+   r,   rI   )rG   r+   rI   )rG   scalerI   )rG   meanZstdrI   )rQ   r   Zcenter_cropZrescale	normalize)r=   rG   r&   r*   r+   r,   r-   r.   r0   r/   r1   r2   r3   rI   r:   r:   rA   _preprocess   s   
zBeitImageProcessor._preprocessc                 C   sl   t |}|rt|rtd |du rt|}| j|d||||||||	|
||d}|dur4t|||d}|S )zPreprocesses a single image.zIt looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.NF)r&   r*   r+   r,   r-   r.   r0   r/   r1   r2   r3   rI   )Zinput_channel_dim)r   r   loggerZwarning_oncer   rU   r   )r=   rG   r*   r+   r,   r-   r.   r0   r/   r1   r2   r3   rH   rI   r:   r:   rA   _preprocess_image   s2   z$BeitImageProcessor._preprocess_imagesegmentation_mapc	           
      C   s   t |}|jdkr|d }d}	tj}nd}	|du rt|dd}| j|||||||ddtjd
}|	r8tj|d	d
}|tj	}|S )z'Preprocesses a single segmentation map.   )N.TFNrO   )Znum_channels)
rG   r&   r*   r,   r+   r-   r.   r1   r0   rI   r   )Zaxis)
r   ndimr   FIRSTr   rU   npZsqueezeZastypeZint64)
r=   rX   r*   r+   r,   r-   r.   r&   rI   Zadded_dimensionr:   r:   rA   _preprocess_segmentation_map  s0   
z/BeitImageProcessor._preprocess_segmentation_mapc                    s   t  j|fd|i|S )Nsegmentation_maps)r;   __call__)r=   imagesr^   r>   r?   r:   rA   r_   7  s   zBeitImageProcessor.__call__r`   r^   return_tensorsc                    s  durnj durnjtddd
dur
nj
dur(nj dur1 nj t ddd durAnjdurJnjdurSnjdur\nj	durenj
durnnjt|}|durt|dd}|durt|stdt|std	t 
d

  	
fdd|D }d|i}|durч 
fdd|D }||d< t||dS )aI  
        Preprocess an image or batch of images.

        Args:
            images (`ImageInput`):
                Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
                passing in images with pixel values between 0 and 1, set `do_rescale=False`.
            segmentation_maps (`ImageInput`, *optional*)
                Segmentation maps to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
                passing in images with pixel values between 0 and 1, set `do_rescale=False`.
            do_resize (`bool`, *optional*, defaults to `self.do_resize`):
                Whether to resize the image.
            size (`Dict[str, int]`, *optional*, defaults to `self.size`):
                Size of the image after resizing.
            resample (`int`, *optional*, defaults to `self.resample`):
                Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
                has an effect if `do_resize` is set to `True`.
            do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
                Whether to center crop the image.
            crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
                Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be
                padded with zeros and then cropped
            do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
                Whether to rescale the image values between [0 - 1].
            rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
                Rescale factor to rescale the image by if `do_rescale` is set to `True`.
            do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
                Whether to normalize the image.
            image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
                Image mean.
            image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
                Image standard deviation.
            do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
                Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
                is used for background, and background itself is not included in all classes of a dataset (e.g.
                ADE20k). The background label will be replaced by 255.
            return_tensors (`str` or `TensorType`, *optional*):
                The type of tensors to return. Can be one of:
                    - Unset: Return a list of `np.ndarray`.
                    - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
                    - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
                    - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
                    - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
            data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
                The channel dimension format for the output image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - Unset: Use the channel dimension format of the input image.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the input image. If unset, the channel dimension format is inferred
                from the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
        NTr+   rJ   r.   rY   )Zexpected_ndimszwInvalid segmentation_maps type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.zkInvalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.)
r0   r/   r1   r2   r3   r-   r.   r*   r+   r,   c                    s0   g | ]}j |	
 d qS ))rG   r*   r-   r0   r1   r,   r+   r/   r.   r2   r3   rH   rI   )rW   ).0Zimg)r.   rH   r-   r1   r0   r*   r2   r3   rI   r,   r/   r=   r+   r:   rA   
<listcomp>  s$    z1BeitImageProcessor.preprocess.<locals>.<listcomp>r$   c                    s$   g | ]}j | d qS ))rX   r&   r*   r,   r+   r-   r.   )r]   )rb   rX   )r.   r-   r&   r*   r,   r=   r+   r:   rA   rc     s    
labels)dataZtensor_type)r*   r+   r   r,   r-   r.   r0   r/   r1   r2   r3   r&   r   r   rK   r   r   )r=   r`   r^   r*   r+   r,   r-   r.   r0   r/   r1   r2   r3   r&   ra   rH   rI   re   r:   )r.   rH   r-   r1   r&   r0   r*   r2   r3   rI   r,   r/   r=   r+   rA   
preprocess<  s\   L"
zBeitImageProcessor.preprocesstarget_sizesc                    s   |j }|durHt|t|krtdt|r| }g  tt|D ]"}tjjj	|| j
dd|| ddd}|d jdd} | q# S |jdd  fd	d
t jd D   S )a6  
        Converts the output of [`BeitForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.

        Args:
            outputs ([`BeitForSemanticSegmentation`]):
                Raw outputs of the model.
            target_sizes (`List[Tuple]` of length `batch_size`, *optional*):
                List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
                predictions will not be resized.

        Returns:
            semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
            segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
            specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
        NzTMake sure that you pass in as many target sizes as the batch dimension of the logitsr   )dimZbilinearF)r+   modeZalign_cornersrO   c                    s   g | ]} | qS r:   r:   )rb   iZsemantic_segmentationr:   rA   rc      s    zIBeitImageProcessor.post_process_semantic_segmentation.<locals>.<listcomp>)logitslenrK   r   numpyrangetorchnnZ
functionalZinterpolateZ	unsqueezeZargmaxappendshape)r=   Zoutputsrg   rl   idxZresized_logitsZsemantic_mapr:   rk   rA   "post_process_semantic_segmentation  s&   z5BeitImageProcessor.post_process_semantic_segmentation)NNNNNNNNNNNN)NNNNNNN)N)(__name__
__module____qualname____doc__Zmodel_input_namesr    r   r	   r   ZBICUBICboolr   r   strintr   floatr   r<   classmethodr   rE   r\   Zndarrayr   r   r   rQ   rU   rW   r]   r_   r[   r   PILZImagerf   r   ru   __classcell__r:   r:   r?   rA   r#   :   s   *
	
 

$	

$	

0	
)	
  r#   )0ry   typingr   r   r   r   r   r   rn   r\   Zimage_processing_utilsr	   r
   r   r   Zimage_transformsr   r   Zimage_utilsr   r   r   r   r   r   r   r   r   r   r   utilsr   r   r   r   r   r   Zutils.deprecationr    Zutils.import_utilsr!   r   rp   Z
get_loggerrv   rV   r#   __all__r:   r:   r:   rA   <module>   s(    4 
   
M