o
    Zh$                    @   s  d Z ddlmZ ddlmZ ddlmZmZmZm	Z	m
Z
 ddlZddlZddlmZmZ ddlmZ dd	lmZmZ dd
lmZmZ ddlmZ ddlmZmZmZmZmZ ddlm Z m!Z!m"Z" e riddl#m$Z$ e%e&Z'dejdejfddZ(dejdejfddZ)eG dd deZ*dedefddZ+dedefddZ,dd  Z-d!d" Z.eG d#d$ d$eZ/eG d%d& d&eZ0G d'd( d(ej1Z2G d)d* d*ej1Z3G d+d, d,ej1Z4G d-d. d.ej1Z5G d/d0 d0ej1Z6eG d1d2 d2eZ7G d3d4 d4ej1Z8G d5d6 d6ej1Z9G d7d8 d8e7Z:G d9d: d:ej1Z;G d;d< d<e7Z<eG d=d> d>e7Z=G d?d@ d@ej1Z>G dAdB dBej1Z?G dCdD dDe7Z@g dEZAdS )FzPyTorch OWL-ViT model.    )	dataclass)	lru_cache)AnyDictOptionalTupleUnionN)Tensornn   )ACT2FN) _create_4d_causal_attention_mask_prepare_4d_attention_mask)BaseModelOutputBaseModelOutputWithPooling)PreTrainedModel)ModelOutputauto_docstringis_vision_availablelogging	torch_int   )OwlViTConfigOwlViTTextConfigOwlViTVisionConfig)center_to_corners_formatlogitsreturnc                 C   s   t j| tjt| | jdS )Ndevice)r
   
functionalZcross_entropytorcharangelenr   )r    r$   Y/var/www/auris/lib/python3.10/site-packages/transformers/models/owlvit/modeling_owlvit.pycontrastive_loss,   s   r&   
similarityc                 C   s    t | }t |  }|| d S )Ng       @)r&   t)r'   Zcaption_lossZ
image_lossr$   r$   r%   owlvit_loss1   s   r)   c                   @   s   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eej ed< dZeej ed< dZeej ed< dZeed< dZeed	< d
ee fddZdS )OwlViTOutputa%  
    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
            Contrastive loss for image-text similarity.
        logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
            The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
            similarity scores.
        logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
            The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
            similarity scores.
        text_embeds (`torch.FloatTensor` of shape `(batch_size * num_max_text_queries, output_dim`):
            The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`].
        image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
            The image embeddings obtained by applying the projection layer to the pooled output of
            [`OwlViTVisionModel`].
        text_model_output (Tuple[`BaseModelOutputWithPooling`]):
            The output of the [`OwlViTTextModel`].
        vision_model_output (`BaseModelOutputWithPooling`):
            The output of the [`OwlViTVisionModel`].
    Nlosslogits_per_imagelogits_per_texttext_embedsimage_embedstext_model_outputvision_model_outputr   c                       t  fdd  D S )Nc                 3   .    | ]}|d vr | nt  | V  qdS )r0   r1   Ngetattrto_tuple.0kselfr$   r%   	<genexpr>W   
    
z(OwlViTOutput.to_tuple.<locals>.<genexpr>tuplekeysr;   r$   r;   r%   r7   V      zOwlViTOutput.to_tuple)__name__
__module____qualname____doc__r+   r   r!   FloatTensor__annotations__r,   r-   r.   r/   r0   r   r1   r   r   r7   r$   r$   r$   r%   r*   7   s   
 r*   r(   c                 C   sD   |   r| jtjtjfv r| S |  S | jtjtjfv r| S |  S N)	Zis_floating_pointdtyper!   float32Zfloat64floatZint32Zint64int)r(   r$   r$   r%   _upcast^   s   rN   boxesc                 C   sH   t | } | dddf | dddf  | dddf | dddf   S )a  
    Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.

    Args:
        boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):
            Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1
            < x2` and `0 <= y1 < y2`.

    Returns:
        `torch.FloatTensor`: a tensor containing the area for each box.
    N   r   r   r   )rN   )rO   r$   r$   r%   box_areag   s   @rQ   c           
      C   s   t | }t |}t| d d d d df |d d d df }t| d d d dd f |d d dd f }|| jdd}|d d d d df |d d d d df  }|d d d f | | }|| }	|	|fS )NrP   r   minr   )rQ   r!   maxrS   clamp)
boxes1boxes2Zarea1Zarea2Zleft_topZright_bottomwidth_heightinterunioniour$   r$   r%   box_ioux   s   ..,r\   c                 C   s*  | ddddf | ddddf k  std|  |ddddf |ddddf k  s:td| t| |\}}t| dddddf |ddddf }t| dddddf |ddddf }|| jdd}|dddddf |dddddf  }||| |  S )z
    Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format.

    Returns:
        `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2)
    NrP   z<boxes1 must be in [x0, y0, x1, y1] (corner) format, but got z<boxes2 must be in [x0, y0, x1, y1] (corner) format, but got r   rR   r   )all
ValueErrorr\   r!   rS   rT   rU   )rV   rW   r[   rZ   top_leftbottom_rightrX   Zarear$   r$   r%   generalized_box_iou   s   ,	,..,ra   c                   @   s   e Zd ZU dZdZeej ed< dZ	ee
 ed< dZeej ed< dZeej ed< dZeej ed< dZeej ed< dZeej ed	< dZeed
< dZeed< dee fddZdS )OwlViTObjectDetectionOutputa  
    Output type of [`OwlViTForObjectDetection`].

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
            Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
            bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
            scale-invariant IoU loss.
        loss_dict (`Dict`, *optional*):
            A dictionary containing the individual losses. Useful for logging.
        logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`):
            Classification logits (including no-object) for all queries.
        pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
            Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
            values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
            possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to retrieve the
            unnormalized bounding boxes.
        text_embeds (`torch.FloatTensor` of shape `(batch_size, num_max_text_queries, output_dim`):
            The text embeddings obtained by applying the projection layer to the pooled output of [`OwlViTTextModel`].
        image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
            Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes
            image embeddings for each patch.
        class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
            Class embeddings of all image patches. OWL-ViT represents images as a set of image patches where the total
            number of patches is (image_size / patch_size)**2.
        text_model_output (Tuple[`BaseModelOutputWithPooling`]):
            The output of the [`OwlViTTextModel`].
        vision_model_output (`BaseModelOutputWithPooling`):
            The output of the [`OwlViTVisionModel`].
    Nr+   	loss_dictr   
pred_boxesr.   r/   class_embedsr0   r1   r   c                    r2   )Nc                 3   r3   r4   r5   r8   r;   r$   r%   r=      r>   z7OwlViTObjectDetectionOutput.to_tuple.<locals>.<genexpr>r?   r;   r$   r;   r%   r7      rB   z$OwlViTObjectDetectionOutput.to_tuple)rC   rD   rE   rF   r+   r   r!   rG   rH   rc   r   r   rd   r.   r/   re   r0   r   r1   r   r   r7   r$   r$   r$   r%   rb      s   
 rb   c                   @   s   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eej ed< dZeej ed< dZeej ed< dZeej ed< dZeed	< dZeed
< dee fddZdS )&OwlViTImageGuidedObjectDetectionOutputa  
    Output type of [`OwlViTForObjectDetection.image_guided_detection`].

    Args:
        logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`):
            Classification logits (including no-object) for all queries.
        target_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
            Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
            values are normalized in [0, 1], relative to the size of each individual target image in the batch
            (disregarding possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to
            retrieve the unnormalized bounding boxes.
        query_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
            Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
            values are normalized in [0, 1], relative to the size of each individual query image in the batch
            (disregarding possible padding). You can use [`~OwlViTImageProcessor.post_process_object_detection`] to
            retrieve the unnormalized bounding boxes.
        image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
            Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes
            image embeddings for each patch.
        query_image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
            Pooled output of [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches and computes
            image embeddings for each patch.
        class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
            Class embeddings of all image patches. OWL-ViT represents images as a set of image patches where the total
            number of patches is (image_size / patch_size)**2.
        text_model_output (Tuple[`BaseModelOutputWithPooling`]):
            The output of the [`OwlViTTextModel`].
        vision_model_output (`BaseModelOutputWithPooling`):
            The output of the [`OwlViTVisionModel`].
    Nr   r/   query_image_embedstarget_pred_boxesquery_pred_boxesre   r0   r1   r   c                    r2   )Nc                 3   r3   r4   r5   r8   r;   r$   r%   r=      r>   zBOwlViTImageGuidedObjectDetectionOutput.to_tuple.<locals>.<genexpr>r?   r;   r$   r;   r%   r7      rB   z/OwlViTImageGuidedObjectDetectionOutput.to_tuple)rC   rD   rE   rF   r   r   r!   rG   rH   r/   rg   rh   ri   re   r0   r   r1   r   r   r7   r$   r$   r$   r%   rf      s   
 rf   c                       s\   e Zd Zdef fddZdejdededejfdd	Zddej	de
dejfddZ  ZS )OwlViTVisionEmbeddingsconfigc                    s   t    |j| _|| _|j| _tt	|j| _
tj|j| j|j|jdd| _|j|j d | _| jd | _t| j| j| _| jdt| jddd d S )NF)Zin_channelsZout_channelsZkernel_sizeZstridebiasrP   r   position_idsr   
persistent)super__init__
patch_sizerk   hidden_size	embed_dimr
   	Parameterr!   Zrandnclass_embeddingZConv2dZnum_channelspatch_embedding
image_sizenum_patchesnum_positions	Embeddingposition_embeddingregister_bufferr"   expandr<   rk   	__class__r$   r%   rs     s    
"zOwlViTVisionEmbeddings.__init__
embeddingsheightwidthr   c                 C   s  |j d d }| jjd}|j d d }tj s(||kr(||kr(| | jS |ddddf }|ddddf }|j d }	|| j }
|| j }t	|d }|
d|||	}|dddd}tjj||
|fdd	d
}|dddddd|	}tj||fddS )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   r   Nro   g      ?r   rP   ZbicubicF)sizemodeZalign_cornersdim)shaper~   weight	unsqueezer!   Zjit
is_tracingrm   rt   r   reshapeZpermuter
   r    Zinterpolateviewcat)r<   r   r   r   r{   r~   r|   Zclass_pos_embedZpatch_pos_embedr   Z
new_heightZ	new_widthZsqrt_num_positionsr$   r$   r%   interpolate_pos_encoding  s*   



z/OwlViTVisionEmbeddings.interpolate_pos_encodingFpixel_valuesr   c           
      C   sz   |j \}}}}| |}|ddd}| j|dd}tj||gdd}	|r3|	| |	|| }	|	S |	| 	| j
 }	|	S )NrP   r   ro   r   )r   ry   flatten	transposerx   r   r!   r   r   r~   rm   )
r<   r   r   
batch_size_r   r   Zpatch_embedsre   r   r$   r$   r%   forward@  s   
zOwlViTVisionEmbeddings.forwardF)rC   rD   rE   r   rs   r!   r	   rM   r   rG   boolr   __classcell__r$   r$   r   r%   rj     s    $&rj   c                	       sX   e Zd Zdef fddZ			ddeej deej deej dej	fd	d
Z
  ZS )OwlViTTextEmbeddingsrk   c                    sP   t    t|j|j| _t|j|j| _| j	dt
|jddd d S )Nrm   rn   Frp   )rr   rs   r
   r}   Z
vocab_sizeru   token_embeddingZmax_position_embeddingsr~   r   r!   r"   r   r   r   r$   r%   rs   N  s   

zOwlViTTextEmbeddings.__init__N	input_idsrm   inputs_embedsr   c                 C   sb   |d ur	|j d n|j d }|d u r| jd d d |f }|d u r&| |}| |}|| }|S )Nro   )r   rm   r   r~   )r<   r   rm   r   Z
seq_lengthZposition_embeddingsr   r$   r$   r%   r   X  s   

zOwlViTTextEmbeddings.forward)NNN)rC   rD   rE   r   rs   r   r!   
LongTensorrG   r	   r   r   r$   r$   r   r%   r   M  s    r   c                       s   e Zd ZdZ fddZdejdedefddZ					
ddejde	ej de	ej de	e
 deeje	ej e	eej  f f
ddZ  ZS )OwlViTAttentionz=Multi-headed attention from 'Attention Is All You Need' paperc                    s   t    || _|j| _|j| _| j| j | _| j| j | jkr-td| j d| j d| jd | _	|j
| _t| j| j| _t| j| j| _t| j| j| _t| j| j| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).      )rr   rs   rk   ru   rv   Znum_attention_heads	num_headshead_dimr^   scaleZattention_dropoutdropoutr
   Lineark_projv_projq_projout_projr   r   r$   r%   rs   o  s"   

zOwlViTAttention.__init__tensorseq_lenbszc                 C   s    | ||| j| jdd S )Nr   rP   )r   r   r   r   
contiguous)r<   r   r   r   r$   r$   r%   _shape  s    zOwlViTAttention._shapeNFhidden_statesattention_maskcausal_attention_maskoutput_attentionsr   c                 C   s  |  \}}}| || j }| | |d|}	| | |d|}
|| j d| jf}| |||j| }|	j| }	|
j| }
|	 d}t	
||	dd}|  || j ||fkrmtd|| j ||f d|   |dur|  |d||fkrtd|d||f d|   ||| j||| }||| j ||}|dur|  |d||fkrtd|d||f d|   ||| j||| }||| j ||}tjj|dd}|r||| j||}||| j ||}nd}tjj|| j| jd	}||
j}t	
||
}|  || j || jfkr*td
|| j|| jf d|   ||| j|| j}|dd}||||}| |}||fS )z#Input shape: Batch x Time x Channelro   r   rP   z$Attention weights should be of size z	, but is Nz!Attention mask should be of size r   )ptrainingz `attn_output` should be of size )r   r   r   r   r   r   r   r   r   r!   Zbmmr   r^   r
   r    Zsoftmaxr   r   torJ   r   r   )r<   r   r   r   r   r   Ztgt_lenrv   Zquery_statesZ
key_statesZvalue_statesZ
proj_shapeZsrc_lenattn_weightsZattn_weights_reshapedZ
attn_probsZattn_outputr$   r$   r%   r     sf   	



zOwlViTAttention.forwardNNF)rC   rD   rE   rF   rs   r!   r	   rM   r   r   r   r   r   r   r$   r$   r   r%   r   l  s$    r   c                       s2   e Zd Z fddZdejdejfddZ  ZS )	OwlViTMLPc                    sD   t    || _t|j | _t|j|j	| _
t|j	|j| _d S rI   )rr   rs   rk   r   Z
hidden_actactivation_fnr
   r   ru   Zintermediate_sizefc1fc2r   r   r$   r%   rs     s
   
zOwlViTMLP.__init__r   r   c                 C   s"   |  |}| |}| |}|S rI   )r   r   r   )r<   r   r$   r$   r%   r     s   


zOwlViTMLP.forward)rC   rD   rE   rs   r!   r	   r   r   r$   r$   r   r%   r     s    r   c                       sT   e Zd Zdef fddZ	ddejdejdejdee d	e	ej
 f
d
dZ  ZS )OwlViTEncoderLayerrk   c                    sR   t    |j| _t|| _tj| j|jd| _	t
|| _tj| j|jd| _d S N)Zeps)rr   rs   ru   rv   r   	self_attnr
   	LayerNormlayer_norm_epslayer_norm1r   mlplayer_norm2r   r   r$   r%   rs     s   


zOwlViTEncoderLayer.__init__Fr   r   r   r   r   c                 C   sd   |}|  |}| j||||d\}}|| }|}| |}| |}|| }|f}|r0||f7 }|S )aI  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
                `(config.encoder_attention_heads,)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )r   r   r   r   )r   r   r   r   )r<   r   r   r   r   Zresidualr   outputsr$   r$   r%   r     s"   




zOwlViTEncoderLayer.forwardr   )rC   rD   rE   r   rs   r!   r	   r   r   r   rG   r   r   r$   r$   r   r%   r     s    r   c                   @   s&   e Zd ZeZdZdZdgZdd ZdS )OwlViTPreTrainedModelowlvitTr   c                 C   sV  | j j}t|tr"|jjjjd|d d |jjjjd|d d nt|t	rW| j j}t
jj|jd|jd | d t
jj|jj|j j| d t
jj|jj|j j| d nt|tr| j j}|jd d|j j d  | }|jd | }t
jj|jj|d t
jj|jj|d t
jj|jj|d t
jj|jj|d n_t|tr| j j}|j jd d|j j d  | }d|j j d | }t
jj|jj|d t
jj|jj|d n't|trt
jj|jj|jd | j j d t
jj|jj|jd | j j d t|t
jr|j j!  |jj"d t|t
j#r'|j dur)|j j!  dS dS dS )	zInitialize the weights        g{Gz?)meanstdr   )r   rP         ?N)$rk   Zinitializer_factor
isinstancer   r   r   dataZnormal_r~   rj   r
   initrx   rv   ry   Zinitializer_ranger   num_hidden_layersr   r   r   r   r   ru   r   r   OwlViTModeltext_projectiontext_embed_dimvisual_projectionvision_embed_dimr   rl   Zzero_Zfill_r   )r<   modulefactorZin_proj_stdZout_proj_stdZfc_stdr$   r$   r%   _init_weights!  sL   



 
z#OwlViTPreTrainedModel._init_weightsN)	rC   rD   rE   r   config_classZbase_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesr   r$   r$   r$   r%   r     s    r   c                       st   e Zd ZdZdef fddZ					ddeej deej dee	 d	ee	 d
ee	 de
eef fddZ  ZS )OwlViTEncoderz
    Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
    [`OwlViTEncoderLayer`].

    Args:
        config: OwlViTConfig
    rk   c                    s4   t    t fddt jD | _d| _d S )Nc                    s   g | ]}t  qS r$   )r   )r9   r   rk   r$   r%   
<listcomp>U  s    z*OwlViTEncoder.__init__.<locals>.<listcomp>F)rr   rs   r
   Z
ModuleListranger   layersgradient_checkpointingr   r   r   r%   rs   S  s   
 
zOwlViTEncoder.__init__Nr   r   r   output_hidden_statesreturn_dictr   c                 C   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}|r"dnd}|r(dnd}|}	| jD ]/}
|r8||	f }| jrI| jrI| |
j|	|||}n|
|	|||d}|d }	|r^||d f }q/|rf||	f }|stt	dd |	||fD S t
|	||dS )	a  
        Args:
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`).
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.
                [What are attention masks?](../glossary#attention-mask)
            causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Causal mask for the text model. Mask values selected in `[0, 1]`:
                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.
                [What are attention masks?](../glossary#attention-mask)
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        Nr$   )r   r   r   c                 s       | ]	}|d ur|V  qd S rI   r$   )r9   vr$   r$   r%   r=         z(OwlViTEncoder.forward.<locals>.<genexpr>)last_hidden_stater   
attentions)rk   r   r   use_return_dictr   r   r   Z_gradient_checkpointing_func__call__r@   r   )r<   r   r   r   r   r   r   Zencoder_statesZall_attentionsr   Zencoder_layerZlayer_outputsr$   r$   r%   r   X  sF   


zOwlViTEncoder.forwardNNNNN)rC   rD   rE   rF   r   rs   r   r!   r	   r   r   r   r   r   r   r$   r$   r   r%   r   J  s*    
r   c                       sz   e Zd Zdef fddZe					ddejdeej deej dee	 d	ee	 d
ee	 de
eef fddZ  ZS )OwlViTTextTransformerrk   c                    s@   t    || _|j}t|| _t|| _tj	||j
d| _d S r   )rr   rs   rk   ru   r   r   r   encoderr
   r   r   final_layer_norm)r<   rk   rv   r   r$   r%   rs     s   


zOwlViTTextTransformer.__init__Nr   r   rm   r   r   r   r   c                 C   s  |dur|n| j j}|dur|n| j j}|dur|n| j j}| }|d|d }| j||d}t||j|j	d}	|durDt
||j}| j|||	|||d}
|
d }| |}|tj|jd |j	d|tjjdd|j	f }|s||f|
dd  S t|||
j|
jd	S )
a|  
        input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
            [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
            IDs?](../glossary#input-ids)
        Nro   )r   rm   r   )r   r   r   r   r   r   r   r   r   r   Zpooler_outputr   r   )rk   r   r   r   r   r   r   r   rJ   r   r   r   r   r!   r"   r   r   rM   Zargmaxr   r   r   )r<   r   r   rm   r   r   r   Zinput_shaper   r   encoder_outputsr   pooled_outputr$   r$   r%   r     sF   
	
zOwlViTTextTransformer.forwardr   )rC   rD   rE   r   rs   r   r!   r	   r   r   r   r   r   r   r   r$   r$   r   r%   r     s.    
r   c                       s   e Zd ZeZdef fddZdejfddZdd Z	e
								dd
ejdeej dee dee dee deeef fddZ  ZS )OwlViTTextModelrk   c                    "   t  | t|| _|   d S rI   )rr   rs   r   
text_model	post_initr   r   r$   r%   rs        
zOwlViTTextModel.__init__r   c                 C   
   | j jjS rI   r   r   r   r;   r$   r$   r%   get_input_embeddings     
z$OwlViTTextModel.get_input_embeddingsc                 C   s   || j j_d S rI   r   )r<   valuer$   r$   r%   set_input_embeddings  s   z$OwlViTTextModel.set_input_embeddingsNr   r   r   r   r   c                 C      | j |||||dS )a  
        input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
            [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
            IDs?](../glossary#input-ids)

        Examples:
        ```python
        >>> from transformers import AutoProcessor, OwlViTTextModel

        >>> model = OwlViTTextModel.from_pretrained("google/owlvit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
        >>> inputs = processor(
        ...     text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt"
        ... )
        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled (EOS token) states
        ```r   r   r   r   r   )r   )r<   r   r   r   r   r   r$   r$   r%   r     s   zOwlViTTextModel.forward)NNNN)rC   rD   rE   r   r   rs   r
   Moduler   r   r   r!   r	   r   r   r   r   r   r   r   r$   r$   r   r%   r     s.    
r   c                       sl   e Zd Zdef fddZe				ddejdee	 dee	 d	ee	 d
ee	 de
eef fddZ  ZS )OwlViTVisionTransformerrk   c                    sP   t    || _t|| _tj|j|jd| _	t
|| _tj|j|jd| _d S r   )rr   rs   rk   rj   r   r
   r   ru   r   pre_layernormr   r   post_layernormr   r   r$   r%   rs   &  s   


z OwlViTVisionTransformer.__init__NFr   r   r   r   r   r   c                 C   s   |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}| jjjj}||}| j||d}| 	|}| j
||||d}|d }	|	d d dd d f }
| |
}
|s^|	|
f|dd   S t|	|
|j|jdS )N)r   )r   r   r   r   r   r   r   )rk   r   r   r   r   ry   r   rJ   r   r   r   r  r   r   r   )r<   r   r   r   r   r   Zexpected_input_dtyper   r   r   r   r$   r$   r%   r   /  s2   	


zOwlViTVisionTransformer.forward)NNFN)rC   rD   rE   r   rs   r   r!   rG   r   r   r   r   r   r   r   r$   r$   r   r%   r   %  s(    	
r   c                       s   e Zd ZeZdZdef fddZdejfddZ	e
						ddeej d
ee dee dedee deeef fddZ  ZS )OwlViTVisionModelr   rk   c                    r   rI   )rr   rs   r   vision_modelr   r   r   r$   r%   rs   `  r   zOwlViTVisionModel.__init__r   c                 C   r   rI   )r  r   ry   r;   r$   r$   r%   r   f  r   z&OwlViTVisionModel.get_input_embeddingsNFr   r   r   r   c                 C   r   )a  
        Examples:
        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, OwlViTVisionModel

        >>> model = OwlViTVisionModel.from_pretrained("google/owlvit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled CLS states
        ```r   r   r   r   r   )r  )r<   r   r   r   r   r   r$   r$   r%   r   i  s   zOwlViTVisionModel.forwardNNNFN)rC   rD   rE   r   r   Zmain_input_namers   r
   r   r   r   r   r!   rG   r   r   r   r   r   r   r$   r$   r   r%   r  \  s0    
r  c                       s0  e Zd ZeZdef fddZe					ddeej	 deej	 dee
 dee
 d	ee
 d
ejfddZe					ddeej dee
 dee
 de
d	ee
 d
ejfddZe									ddeej deej deej	 dee
 dee
 dee
 de
dee
 d	ee
 d
eeef fddZ  ZS )r   rk   c                    s   t  | t|jtstdt|j dt|jts(tdt|j d|j}|j}|j	| _	|j
| _|j
| _t|| _t|| _tj| j| j	dd| _tj| j| j	dd| _tt|j| _|   d S )NzMconfig.text_config is expected to be of type OwlViTTextConfig but is of type .zQconfig.vision_config is expected to be of type OwlViTVisionConfig but is of type F)rl   )rr   rs   r   text_configr   	TypeErrortypevision_configr   Zprojection_dimru   r   r   r   r   r   r  r
   r   r   r   rw   r!   r   Zlogit_scale_init_valuelogit_scaler   )r<   rk   r  r
  r   r$   r%   rs     s0   

zOwlViTModel.__init__Nr   r   r   r   r   r   c           	      C   s:   |dur|n| j j}| j|||d}|d }| |}|S )a?  
        input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
            [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
            IDs?](../glossary#input-ids)

        Returns:
            text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
            applying the projection layer to the pooled output of [`OwlViTTextModel`].

        Examples:
        ```python
        >>> from transformers import AutoProcessor, OwlViTModel

        >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
        >>> inputs = processor(
        ...     text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt"
        ... )
        >>> text_features = model.get_text_features(**inputs)
        ```N)r   r   r   r   )rk   r   r   r   )	r<   r   r   r   r   r   Ztext_outputr   Ztext_featuresr$   r$   r%   get_text_features  s
   
zOwlViTModel.get_text_featuresFr   r   c           	      C   sf   |dur|n| j j}|dur|n| j j}|dur|n| j j}| j|||||d}|d }| |}|S )aB  
        Returns:
            image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
            applying the projection layer to the pooled output of [`OwlViTVisionModel`].

        Examples:
        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, OwlViTModel

        >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)
        >>> inputs = processor(images=image, return_tensors="pt")
        >>> image_features = model.get_image_features(**inputs)
        ```Nr  r   )rk   r   r   r   r  r   )	r<   r   r   r   r   r   vision_outputsr   image_featuresr$   r$   r%   get_image_features  s   
zOwlViTModel.get_image_featuresreturn_lossreturn_base_image_embedsc
              	   C   s:  |dur|n| j j}|dur|n| j j}|	dur|	n| j j}	| j|||||	d}
| j|||||	d}|d }| |}|
d }| |}|tj	j
|dddd }|tj	j
|dddd }| j |j}t|| | }| }d}|r{t|}|}|	s||||||
f}|dur|f| S |S t|||||||
d	S )
a&  
        return_loss (`bool`, *optional*):
            Whether or not to return the contrastive loss.
        return_base_image_embeds (`bool`, *optional*):
            Whether or not to return the base image embeddings.

        Examples:
        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, OwlViTModel

        >>> model = OwlViTModel.from_pretrained("google/owlvit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32")
        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)
        >>> inputs = processor(text=[["a photo of a cat", "a photo of a dog"]], images=image, return_tensors="pt")
        >>> outputs = model(**inputs)
        >>> logits_per_image = outputs.logits_per_image  # this is the image-text similarity score
        >>> probs = logits_per_image.softmax(dim=1)  # we can take the softmax to get the label probabilities
        ```Nr  r   r   rP   ro   T)ordr   keepdim)r+   r,   r-   r.   r/   r0   r1   )rk   r   r   r   r  r   r   r   r!   linalgnormr  expr   r   matmulr(   r)   r*   )r<   r   r   r   r  r   r   r   r  r   r  text_outputsr.   r/   Ztext_embeds_normr  r-   r,   r+   outputr$   r$   r%   r     sV   #	

zOwlViTModel.forwardr   r  )	NNNNNNFNN)rC   rD   rE   r   r   rs   r   r   r!   r	   r   rG   r  r  r   r   r   r*   r   r   r$   r$   r   r%   r     s     '.	

r   c                       s>   e Zd Zd
dedef fddZdejdejfdd	Z	  Z
S )OwlViTBoxPredictionHead   rk   out_dimc                    sJ   t    |jj}t||| _t||| _t | _	t||| _
d S rI   )rr   rs   r
  ru   r
   r   dense0dense1ZGELUgeludense2)r<   rk   r  r   r   r$   r%   rs   g  s   

z OwlViTBoxPredictionHead.__init__r  r   c                 C   s6   |  |}| |}| |}| |}| |}|S rI   )r  r  r  r   )r<   r  r  r$   r$   r%   r   p  s   




zOwlViTBoxPredictionHead.forward)r  )rC   rD   rE   r   rM   rs   r!   r	   rG   r   r   r$   r$   r   r%   r  f  s    	r  c                	       sP   e Zd Zdef fddZdejdeej deej de	ej fdd	Z
  ZS )
OwlViTClassPredictionHeadrk   c                    sZ   t    |jj}|jj| _t| j|| _t| jd| _	t| jd| _
t | _d S )Nr   )rr   rs   r  ru   r
  	query_dimr
   r   r  logit_shiftr  ZELUelu)r<   rk   r  r   r$   r%   rs   z  s   

z"OwlViTClassPredictionHead.__init__r/   query_embeds
query_maskr   c                 C   s
  |  |}|d u r%|j}|jd d \}}t||| jf|}||fS |tjj|dddd  }|tjj|dddd  }t	d||}| 
|}	| |}
| |
d }
||	 |
 }|d ur|jdkrmtj|dd	}t|d
kt|jj|}|tj}||fS )NrP   ro   T)r   r  gư>z...pd,...qd->...pqr   r   r   r   )r  r   r   r!   Zzerosr"  r   r  r  einsumr#  r  r$  ndimr   whereZfinforJ   rS   rK   )r<   r/   r%  r&  image_class_embedsr   r   r{   pred_logitsr#  r  r$   r$   r%   r     s&   



z!OwlViTClassPredictionHead.forward)rC   rD   rE   r   rs   r!   rG   r   r	   r   r   r   r$   r$   r   r%   r!  y  s    r!  c                       s   e Zd ZeZdef fddZedededej	fddZ
ed	d
	d+dededeej dej	fddZ	d,dejdejdedejfddZ		d-dejdeej deej	 deej fddZ			d.dej	dejdej	dee dee dedeej fddZ			d.dejdee dee dedeej f
dd Z	d,d!ejd"ejdedejfd#d$Ze					d/dejd%eej dee dee ded&ee defd'd(Ze					d/dej	dejdeej	 dee dee ded&ee defd)d*Z  ZS )0OwlViTForObjectDetectionrk   c                    s   t  | t|| _t|| _t|| _tj	|j
j|j
jd| _t | _|| _| jj
j| jj
j | _| jj
j| jj
j | _| | j| j| _d S r   )rr   rs   r   r   r!  
class_headr  box_headr
   r   r
  ru   r   
layer_normZSigmoidsigmoidrk   rz   rt   num_patches_heightnum_patches_widthcompute_box_biasbox_biasr   r   r$   r%   rs     s   



z!OwlViTForObjectDetection.__init__r1  r2  r   c                 C   s   t jd|d t jd}t jd| d t jd}t j||dd\}}t j||fdd}|d  |  < |d  |   < |dd	}|S )
Nr   )rJ   Zxy)Zindexingro   r   .r   .r   rP   )r!   r"   rK   Zmeshgridstackr   )r1  r2  Zx_coordinatesZy_coordinatesxxyybox_coordinatesr$   r$   r%   !normalize_grid_corner_coordinates  s   z:OwlViTForObjectDetection.normalize_grid_corner_coordinatesrP   )maxsizeNfeature_mapc           	      C   s   |d urt d| ||}t|dd}t|d t| d  }t|d}|d  |  < |d  |  < t|d t| d  }tj||gdd}|S )	NzOfeature_map has been deprecated as an input. Please pass in num_patches insteadr   r   g-C6?r5  r6  ro   r   )r^   r;  r!   Zcliploglog1pZ	full_liker   )	r<   r1  r2  r=  r:  Zbox_coord_biasZbox_sizeZbox_size_biasr4  r$   r$   r%   r3    s   z)OwlViTForObjectDetection.compute_box_biasFimage_featsr   c           	      C   sR   |  |}|r|j\}}}}| ||}n| j}||j}||7 }| |}|S )a  
        Args:
            image_feats:
                Features extracted from the image, returned by the `image_text_embedder` method.
            feature_map:
                A spatial re-arrangement of image_features, also returned by the `image_text_embedder` method.
            interpolate_pos_encoding:
                Whether to interpolate the pre-trained position encodings.
        Returns:
            pred_boxes:
                List of predicted boxes (cxcywh normalized to 0, 1) nested within a dictionary.
        )r.  r   r3  r4  r   r   r0  )	r<   r@  r=  r   rd   r   r1  r2  r4  r$   r$   r%   box_predictor  s   

z&OwlViTForObjectDetection.box_predictorr%  r&  c                 C   s   |  |||\}}||fS )a8  
        Args:
            image_feats:
                Features extracted from the `image_text_embedder`.
            query_embeds:
                Text query embeddings.
            query_mask:
                Must be provided with query_embeddings. A mask indicating which query embeddings are valid.
        )r-  )r<   r@  r%  r&  r+  r*  r$   r$   r%   class_predictor  s   z(OwlViTForObjectDetection.class_predictorr   r   r   r   r   c              	   C   s   | j ||||||dd}|r$|j\}}}	}
|	| jjj }|
| jjj }n| j}| j}|jd }| j j	|}t
|d d d dd d f |d d d df j}|d d dd d d f | }| |}|jd |||jd f}||}|d }|||fS )NT)r   r   r   r   r   r   r   r   r   ro   )r   r   rk   r
  rt   r1  r2  r1   r  r  r!   broadcast_tor/  r   )r<   r   r   r   r   r   r   r   r   r   r   r1  r2  r   r/   class_token_outnew_sizer.   r$   r$   r%   image_text_embedder  s8   


4


z,OwlViTForObjectDetection.image_text_embedderc                 C   s   | j j||dd}|r!|j\}}}}|| jjj }	|| jjj }
n| j}	| j}
|d }| j j|}t	
|d d d dd d f |d d d df j}|d d dd d d f | }| |}|jd |	|
|jd f}||}||fS )NT)r   r   r   r   r   ro   )r   r  r   rk   r
  rt   r1  r2  r  r!   rD  r/  r   )r<   r   r   r   r   r  r   r   r   r1  r2  r   r/   rE  rF  r$   r$   r%   image_embedderI  s*   4

z'OwlViTForObjectDetection.image_embedderquery_image_featuresquery_feature_mapc                 C   s:  |  |\}}| |||}t|}g }g }	|j}
t|jd D ]f}tjg dg|
d}|| }t||\}}t	|d dkrEt
||}t|d }|d |k }| r|| |d }tj|| dd}td||}|t| }||| |  |	| q |rt|}t|	}nd	\}}|||fS )
Nr   )r   r   r   r   r   r   g?r   )Zaxiszd,id->iNN)rB  rA  r   r   r   r   r!   r   r\   r]   ra   rT   ZnonzeroZnumelZsqueezer   r'  Zargminappendr7  )r<   rI  rJ  r   r   re   rd   Zpred_boxes_as_cornersZbest_class_embedsbest_box_indicesZpred_boxes_deviceiZeach_query_boxZeach_query_pred_boxesZiousZiou_thresholdZselected_indsZselected_embeddingsZmean_embedsZmean_simZbest_box_indr%  Zbox_indicesr$   r$   r%   embed_image_querys  s6   



z*OwlViTForObjectDetection.embed_image_queryquery_pixel_valuesr   c              
   C   s(  |dur|n| j j}|dur|n| j j}|dur|n| j j}| j||dd }| j||||d\}}	|j\}
}}}t||
|| |f}|j\}
}}}t||
|| |f}| |||\}}}| j	||d\}}| 
|||}|s|||||||	 f}tdd |D }|S t||||||d|	dS )	a  
        query_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Pixel values of query image(s) to be detected. Pass in one query image per target image.

        Examples:
        ```python
        >>> import requests
        >>> from PIL import Image
        >>> import torch
        >>> from transformers import AutoProcessor, OwlViTForObjectDetection

        >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch16")
        >>> model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch16")
        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)
        >>> query_url = "http://images.cocodataset.org/val2017/000000001675.jpg"
        >>> query_image = Image.open(requests.get(query_url, stream=True).raw)
        >>> inputs = processor(images=image, query_images=query_image, return_tensors="pt")
        >>> with torch.no_grad():
        ...     outputs = model.image_guided_detection(**inputs)
        >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2]
        >>> target_sizes = torch.Tensor([image.size[::-1]])
        >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
        >>> results = processor.post_process_image_guided_detection(
        ...     outputs=outputs, threshold=0.6, nms_threshold=0.3, target_sizes=target_sizes
        ... )
        >>> i = 0  # Retrieve predictions for the first image
        >>> boxes, scores = results[i]["boxes"], results[i]["scores"]
        >>> for box, score in zip(boxes, scores):
        ...     box = [round(i, 2) for i in box.tolist()]
        ...     print(f"Detected similar object with confidence {round(score.item(), 3)} at location {box}")
        Detected similar object with confidence 0.856 at location [10.94, 50.4, 315.8, 471.39]
        Detected similar object with confidence 1.0 at location [334.84, 25.33, 636.16, 374.71]
        ```N)r   r   r   )r   r   r   r   )r@  r%  c                 s   r   rI   r$   r9   xr$   r$   r%   r=     r   zBOwlViTForObjectDetection.image_guided_detection.<locals>.<genexpr>)r/   rg   rh   ri   r   re   r0   r1   )rk   r   r   r   rH  r   r!   r   rO  rB  rA  r7   r@   rf   )r<   r   rP  r   r   r   r   rJ  r=  r  r   r1  r2  
hidden_dimr@  Zquery_image_featsr%  rM  ri   r+  re   rh   r  r$   r$   r%   image_guided_detection  s^   ,

	z/OwlViTForObjectDetection.image_guided_detectionc              	   C   s4  |dur|n| j j}|dur|n| j j}|dur|n| j j}| j||||||d\}}	}
|
j}|
j}|	j\}}}}t	|	||| |f}|jd | }|	|||jd }|	|||jd }|d dk}| 
|||\}}| ||	|}|s||||	|| | f}tdd |D }|S t|	||||||dS )	a	  
        input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`, *optional*):
            Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
            [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
            IDs?](../glossary#input-ids).
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the last hidden state. See `text_model_last_hidden_state` and
            `vision_model_last_hidden_state` under returned tensors for more detail.

        Examples:
        ```python
        >>> import requests
        >>> from PIL import Image
        >>> import torch

        >>> from transformers import OwlViTProcessor, OwlViTForObjectDetection

        >>> processor = OwlViTProcessor.from_pretrained("google/owlvit-base-patch32")
        >>> model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch32")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)
        >>> text_labels = [["a photo of a cat", "a photo of a dog"]]
        >>> inputs = processor(text=text_labels, images=image, return_tensors="pt")
        >>> outputs = model(**inputs)

        >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2]
        >>> target_sizes = torch.tensor([(image.height, image.width)])
        >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
        >>> results = processor.post_process_grounded_object_detection(
        ...     outputs=outputs, target_sizes=target_sizes, threshold=0.1, text_labels=text_labels
        ... )
        >>> # Retrieve predictions for the first image for the corresponding text queries
        >>> result = results[0]
        >>> boxes, scores, text_labels = result["boxes"], result["scores"], result["text_labels"]
        >>> for box, score, text_label in zip(boxes, scores, text_labels):
        ...     box = [round(i, 2) for i in box.tolist()]
        ...     print(f"Detected {text_label} with confidence {round(score.item(), 3)} at location {box}")
        Detected a photo of a cat with confidence 0.707 at location [324.97, 20.44, 640.58, 373.29]
        Detected a photo of a cat with confidence 0.717 at location [1.46, 55.26, 315.55, 472.17]
        ```N)r   r   r   r   r   r   r   ro   r5  c                 s   r   rI   r$   rQ  r$   r$   r%   r=   i  r   z3OwlViTForObjectDetection.forward.<locals>.<genexpr>)r/   r.   rd   r   re   r0   r1   )rk   r   r   r   rG  r0   r1   r   r!   r   rB  rA  r7   r@   rb   )r<   r   r   r   r   r   r   r   r%  r=  r   r  r  r   r1  r2  rS  r@  Zmax_text_queriesr&  r+  re   rd   r  r$   r$   r%   r     sT   4
	z OwlViTForObjectDetection.forwardrI   r   rK  r   r  )rC   rD   rE   r   r   rs   staticmethodrM   r!   r	   r;  r   r   rG   r3  r   rA  r   rB  rG  rH  rO  r   rf   rT  rb   r   r   r$   r$   r   r%   r,    s    
$

6
.
,f	r,  )r   r   r   r  r,  )BrF   dataclassesr   	functoolsr   typingr   r   r   r   r   r!   Ztorch.utils.checkpointr	   r
   Zactivationsr   Zmodeling_attn_mask_utilsr   r   Zmodeling_outputsr   r   Zmodeling_utilsr   utilsr   r   r   r   r   Zconfiguration_owlvitr   r   r   Ztransformers.image_transformsr   Z
get_loggerrC   loggerr&   r)   r*   rN   rQ   r\   ra   rb   rf   r   rj   r   r   r   r   r   r   r   r   r   r  r   r  r!  r,  __all__r$   r$   r$   r%   <module>   sd   
&	10Il2/YL671 Y0   Q