o
    ZhI                     @   sh  d Z ddlmZmZmZmZmZmZ ddlZ	ddl
mZmZ ddlmZmZmZ ddlmZmZ ddlmZmZmZmZmZmZmZmZmZmZmZm Z  dd	l!m"Z"m#Z#m$Z$ dd
l%m&Z& e rcddl'Z'erkddl(m)Z) e rrddl*Z*e#+e,Z-	ddedeee.ef  fddZ/	ddedeee.ef  defddZ0defddZ1e&ddG dd deZ2dgZ3dS )z%Image processor class for SuperPoint.    )TYPE_CHECKINGDictListOptionalTupleUnionN   )is_torch_availableis_vision_available)BaseImageProcessorBatchFeatureget_size_dict)resizeto_channel_dimension_format)ChannelDimension
ImageInput	ImageTypePILImageResamplingget_image_typeinfer_channel_dimension_formatis_pil_imageis_scaled_imageis_valid_imageto_numpy_arrayvalid_imagesvalidate_preprocess_arguments)
TensorTypeloggingrequires_backends)requires   )KeypointMatchingOutputimageinput_data_formatc                 C   s   |t jkr$| jd dkrdS t| d | d ko#t| d | d kS |t jkrH| jd dkr2dS t| d | d	 koGt| d	 | d
 kS d S )Nr   r    Tr   .r    .   ..r   .r    .r'   )r   FIRSTshapenpallLAST)r"   r#    r1   g/var/www/auris/lib/python3.10/site-packages/transformers/models/superglue/image_processing_superglue.pyis_grayscale6   s   
,
,r3   returnc                 C   s   t tdg t| tjrZt| |dr| S |tjkr7| d d | d d  | d d  }tj|gd	 d
d}|S |tj	krX| d d | d d  | d d  }tj|gd	 dd}|S t| t
jjsc| S | d} | S )ao  
    Converts an image to grayscale format using the NTSC formula. Only support numpy and PIL Image. TODO support torch
    and tensorflow grayscale conversion

    This function is supposed to return a 1-channel image, but it returns a 3-channel image with the same value in each
    channel, because of an issue that is discussed in :
    https://github.com/huggingface/transformers/pull/25786#issuecomment-1730176446

    Args:
        image (Image):
            The image to convert.
        input_data_format (`ChannelDimension` or `str`, *optional*):
            The channel dimension format for the input image.
    Zvisionr#   r$   gŏ1w-!?r%   gbX9?r&   gv/?r   r   )Zaxisr)   r*   r+   r(   L)r   convert_to_grayscale
isinstancer.   ndarrayr3   r   r,   stackr0   PILZImageconvert)r"   r#   Z
gray_imager1   r1   r2   r7   E   s    
$
$
r7   imagesc                    sh   d}dd  t | tr0t| dkrt fdd| D r| S t fdd| D r0dd	 | D S t|)
N)z-Input images must be a one of the following :z - A pair of PIL images.z - A pair of 3D arrays.z! - A list of pairs of PIL images.z  - A list of pairs of 3D arrays.c                 S   s,   t | pt| ot| tjkot| jdkS )z$images is a PIL Image or a 3D array.r   )r   r   r   r   r;   lenr-   )r"   r1   r1   r2   _is_valid_imaget   s   "z8validate_and_format_image_pairs.<locals>._is_valid_imager'   c                 3       | ]} |V  qd S Nr1   .0r"   r?   r1   r2   	<genexpr>{       z2validate_and_format_image_pairs.<locals>.<genexpr>c                 3   s<    | ]}t |tot|d kot fdd|D V  qdS )r'   c                 3   r@   rA   r1   rB   rD   r1   r2   rE      rF   z<validate_and_format_image_pairs.<locals>.<genexpr>.<genexpr>N)r8   listr>   r/   )rC   
image_pairrD   r1   r2   rE   }   s    


c                 S   s   g | ]	}|D ]}|qqS r1   r1   )rC   rH   r"   r1   r1   r2   
<listcomp>   s    z3validate_and_format_image_pairs.<locals>.<listcomp>)r8   rG   r>   r/   
ValueError)r=   error_messager1   rD   r2   validate_and_format_image_pairsk   s   
"rL   )torch)backendsc                       sV  e Zd ZdZdgZddejdddfdedee	e
ef  ded	ed
ededdf fddZ		ddejde	e
ef deee
ef  deee
ef  fddZdddddddejdf	dee dee	e
ef  ded	ee d
ee dee deee
ef  dedeee
ef  defddZ	ddddeeee f dedee	e
ejf  fddZ  ZS ) SuperGlueImageProcessorap  
    Constructs a SuperGlue image processor.

    Args:
        do_resize (`bool`, *optional*, defaults to `True`):
            Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden
            by `do_resize` in the `preprocess` method.
        size (`Dict[str, int]` *optional*, defaults to `{"height": 480, "width": 640}`):
            Resolution of the output image after `resize` is applied. Only has an effect if `do_resize` is set to
            `True`. Can be overridden by `size` in the `preprocess` method.
        resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
            Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
        do_rescale (`bool`, *optional*, defaults to `True`):
            Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
            the `preprocess` method.
        rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
            Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
            method.
        do_grayscale (`bool`, *optional*, defaults to `True`):
            Whether to convert the image to grayscale. Can be overridden by `do_grayscale` in the `preprocess` method.
    pixel_valuesTNgp?	do_resizesizeresample
do_rescalerescale_factordo_grayscaler4   c                    s\   t  jdi | |d ur|nddd}t|dd}|| _|| _|| _|| _|| _|| _d S )Ni  i  )heightwidthFZdefault_to_squarer1   )	super__init__r   rQ   rR   rS   rT   rU   rV   )selfrQ   rR   rS   rT   rU   rV   kwargs	__class__r1   r2   r[      s   

z SuperGlueImageProcessor.__init__r"   data_formatr#   c                 K   s0   t |dd}t|f|d |d f||d|S )aL  
        Resize an image.

        Args:
            image (`np.ndarray`):
                Image to resize.
            size (`Dict[str, int]`):
                Dictionary of the form `{"height": int, "width": int}`, specifying the size of the output image.
            data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format of the output image. If not provided, it will be inferred from the input
                image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the input image. If unset, the channel dimension format is inferred
                from the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
        FrY   rW   rX   )rR   r`   r#   )r   r   )r\   r"   rR   r`   r#   r]   r1   r1   r2   r      s   zSuperGlueImageProcessor.resizereturn_tensorsc                    sp  |dur|n| j }|dur|n| j}|dur|n| j}|dur!|n| j}|dur*|n| j}|dur3|n| j}t|dd}t|}t|sHt	dt
|||||d dd |D }t|d re|retd	 |
du rot|d }
g  |D ]+}|r| j||||
d
}|r| j|||
d}|rt||
d}t||	|
d} | qs fddtdt dD }d|i}t||dS )a   
        Preprocess an image or batch of images.

        Args:
            images (`ImageInput`):
                Image pairs to preprocess. Expects either a list of 2 images or a list of list of 2 images list with
                pixel values ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set
                `do_rescale=False`.
            do_resize (`bool`, *optional*, defaults to `self.do_resize`):
                Whether to resize the image.
            size (`Dict[str, int]`, *optional*, defaults to `self.size`):
                Size of the output image after `resize` has been applied. If `size["shortest_edge"]` >= 384, the image
                is resized to `(size["shortest_edge"], size["shortest_edge"])`. Otherwise, the smaller edge of the
                image will be matched to `int(size["shortest_edge"]/ crop_pct)`, after which the image is cropped to
                `(size["shortest_edge"], size["shortest_edge"])`. Only has an effect if `do_resize` is set to `True`.
            resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
                Resampling filter to use if resizing the image. This can be one of `PILImageResampling`, filters. Only
                has an effect if `do_resize` is set to `True`.
            do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
                Whether to rescale the image values between [0 - 1].
            rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
                Rescale factor to rescale the image by if `do_rescale` is set to `True`.
            do_grayscale (`bool`, *optional*, defaults to `self.do_grayscale`):
                Whether to convert the image to grayscale.
            return_tensors (`str` or `TensorType`, *optional*):
                The type of tensors to return. Can be one of:
                    - Unset: Return a list of `np.ndarray`.
                    - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
                    - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
                    - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
                    - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
            data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
                The channel dimension format for the output image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - Unset: Use the channel dimension format of the input image.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the input image. If unset, the channel dimension format is inferred
                from the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
        NFrY   zkInvalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.)rQ   rR   rS   rT   rU   c                 S   s   g | ]}t |qS r1   )r   rB   r1   r1   r2   rI   3  s    z6SuperGlueImageProcessor.preprocess.<locals>.<listcomp>r   zIt looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.)r"   rR   rS   r#   )r"   scaler#   r5   )Zinput_channel_dimc                    s   g | ]
} ||d   qS )r'   r1   )rC   iZ
all_imagesr1   r2   rI   N  s    r'   rP   )dataZtensor_type)rQ   rS   rT   rU   rV   rR   r   rL   r   rJ   r   r   loggerZwarning_oncer   r   Zrescaler7   r   appendranger>   r   )r\   r=   rQ   rR   rS   rT   rU   rV   ra   r`   r#   r]   r"   Zimage_pairsre   r1   rd   r2   
preprocess   sN   :	z"SuperGlueImageProcessor.preprocess        outputsr!   target_sizes	thresholdc                 C   sp  |j jd t|krtdtdd |D stdt|tr*tj||j j	d}n|jd dks8|jd dkr<td|}|j
 }||d	d	ddd }|tj}g }t|j ||jd
d
df |jd
d
df D ]G\}}}	}
|d dk}|d dk}|d | }|d | }|	| }|
| }t||k|d	k}|| }|||  }|| }||||d qn|S )a  
        Converts the raw output of [`KeypointMatchingOutput`] into lists of keypoints, scores and descriptors
        with coordinates absolute to the original image sizes.
        Args:
            outputs ([`KeypointMatchingOutput`]):
                Raw outputs of the model.
            target_sizes (`torch.Tensor` or `List[Tuple[Tuple[int, int]]]`, *optional*):
                Tensor of shape `(batch_size, 2, 2)` or list of tuples of tuples (`Tuple[int, int]`) containing the
                target size `(height, width)` of each image in the batch. This must be the original image size (before
                any processing).
            threshold (`float`, *optional*, defaults to 0.0):
                Threshold to filter out the matches with low scores.
        Returns:
            `List[Dict]`: A list of dictionaries, each dictionary containing the keypoints in the first and second image
            of the pair, the matching scores and the matching indices.
        r   zRMake sure that you pass in as many target sizes as the batch dimension of the maskc                 s   s    | ]	}t |d kV  qdS )r'   N)r>   )rC   Ztarget_sizer1   r1   r2   rE   l  s    zISuperGlueImageProcessor.post_process_keypoint_matching.<locals>.<genexpr>zTEach element of target_sizes must contain the size (h, w) of each image of the batch)devicer    r'   r(   N)
keypoints0
keypoints1matching_scores)maskr-   r>   rJ   r/   r8   r   rM   Ztensorrn   	keypointscloneflipZreshapetoZint32zipmatchesrq   logical_andrg   )r\   rk   rl   rm   Zimage_pair_sizesrs   resultsZ	mask_pairZkeypoints_pairrx   ZscoresZmask0Zmask1ro   rp   Zmatches0Zscores0Zvalid_matchesZmatched_keypoints0Zmatched_keypoints1rq   r1   r1   r2   post_process_keypoint_matchingT  sF   

&z6SuperGlueImageProcessor.post_process_keypoint_matching)NN)rj   )__name__
__module____qualname____doc__Zmodel_input_namesr   ZBILINEARboolr   r   strintfloatr[   r.   r9   r   r   r   r,   r   r   ri   r   r   rM   ZTensorr{   __classcell__r1   r1   r^   r2   rO      s    	

*	

zrO   rA   )4r   typingr   r   r   r   r   r   numpyr.    r	   r
   Zimage_processing_utilsr   r   r   Zimage_transformsr   r   Zimage_utilsr   r   r   r   r   r   r   r   r   r   r   r   utilsr   r   r   Zutils.import_utilsr   rM   Zmodeling_supergluer!   r;   Z
get_loggerr|   rf   r   r3   r7   rL   rO   __all__r1   r1   r1   r2   <module>   sH    8


&  
