o
    ZhW6                    @   s  d Z ddlmZ ddlmZ ddlmZmZmZm	Z	m
Z
 ddlZddlZddlmZmZ ddlmZ dd	lmZmZ dd
lmZmZ ddlmZ ddlmZmZmZmZmZ ddlm Z m!Z!m"Z" e riddl#m$Z$ e%e&Z'dejdejfddZ(dejdejfddZ)eG dd deZ*dedefddZ+dedefddZ,dd  Z-d!d" Z.eG d#d$ d$eZ/eG d%d& d&eZ0G d'd( d(ej1Z2G d)d* d*ej1Z3G d+d, d,ej1Z4G d-d. d.ej1Z5G d/d0 d0ej1Z6eG d1d2 d2eZ7G d3d4 d4ej1Z8G d5d6 d6ej1Z9G d7d8 d8e7Z:G d9d: d:ej1Z;G d;d< d<e7Z<eG d=d> d>e7Z=G d?d@ d@ej1Z>G dAdB dBej1Z?G dCdD dDe7Z@g dEZAdS )FzPyTorch OWLv2 model.    )	dataclass)	lru_cache)AnyDictOptionalTupleUnionN)Tensornn   )ACT2FN) _create_4d_causal_attention_mask_prepare_4d_attention_mask)BaseModelOutputBaseModelOutputWithPooling)PreTrainedModel)ModelOutputauto_docstringis_vision_availablelogging	torch_int   )Owlv2ConfigOwlv2TextConfigOwlv2VisionConfig)center_to_corners_formatlogitsreturnc                 C   s   t j| tjt| | jdS )Ndevice)r
   
functionalZcross_entropytorcharangelenr   )r    r$   W/var/www/auris/lib/python3.10/site-packages/transformers/models/owlv2/modeling_owlv2.pycontrastive_loss,   s   r&   
similarityc                 C   s    t | }t |  }|| d S )Ng       @)r&   t)r'   Zcaption_lossZ
image_lossr$   r$   r%   
owlv2_loss1   s   r)   c                   @   s   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eej ed< dZeej ed< dZeej ed< dZeed< dZeed	< d
ee fddZdS )Owlv2Outputa!  
    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
            Contrastive loss for image-text similarity.
        logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
            The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
            similarity scores.
        logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
            The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
            similarity scores.
        text_embeds (`torch.FloatTensor` of shape `(batch_size * num_max_text_queries, output_dim`):
            The text embeddings obtained by applying the projection layer to the pooled output of [`Owlv2TextModel`].
        image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
            The image embeddings obtained by applying the projection layer to the pooled output of
            [`Owlv2VisionModel`].
        text_model_output (Tuple[`BaseModelOutputWithPooling`]):
            The output of the [`Owlv2TextModel`].
        vision_model_output (`BaseModelOutputWithPooling`):
            The output of the [`Owlv2VisionModel`].
    Nlosslogits_per_imagelogits_per_texttext_embedsimage_embedstext_model_outputvision_model_outputr   c                       t  fdd  D S )Nc                 3   .    | ]}|d vr | nt  | V  qdS )r0   r1   Ngetattrto_tuple.0kselfr$   r%   	<genexpr>W   
    
z'Owlv2Output.to_tuple.<locals>.<genexpr>tuplekeysr;   r$   r;   r%   r7   V      zOwlv2Output.to_tuple)__name__
__module____qualname____doc__r+   r   r!   FloatTensor__annotations__r,   r-   r.   r/   r0   r   r1   r   r   r7   r$   r$   r$   r%   r*   7   s   
 r*   r(   c                 C   sD   |   r| jtjtjfv r| S |  S | jtjtjfv r| S |  S N)	Zis_floating_pointdtyper!   float32Zfloat64floatZint32Zint64int)r(   r$   r$   r%   _upcast^   s   rN   boxesc                 C   sH   t | } | dddf | dddf  | dddf | dddf   S )a  
    Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.

    Args:
        boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):
            Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1
            < x2` and `0 <= y1 < y2`.

    Returns:
        `torch.FloatTensor`: a tensor containing the area for each box.
    N   r   r   r   )rN   )rO   r$   r$   r%   box_areag   s   @rQ   c           
      C   s   t | }t |}t| d d d d df |d d d df }t| d d d dd f |d d dd f }|| jdd}|d d d d df |d d d d df  }|d d d f | | }|| }	|	|fS )NrP   r   minr   )rQ   r!   maxrS   clamp)
boxes1boxes2Zarea1Zarea2Zleft_topZright_bottomwidth_heightinterunioniour$   r$   r%   box_ioux   s   ..,r\   c                 C   s*  | ddddf | ddddf k  std|  |ddddf |ddddf k  s:td| t| |\}}t| dddddf |ddddf }t| dddddf |ddddf }|| jdd}|dddddf |dddddf  }||| |  S )z
    Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format.

    Returns:
        `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2)
    NrP   z<boxes1 must be in [x0, y0, x1, y1] (corner) format, but got z<boxes2 must be in [x0, y0, x1, y1] (corner) format, but got r   rR   r   )all
ValueErrorr\   r!   rS   rT   rU   )rV   rW   r[   rZ   top_leftbottom_rightrX   Zarear$   r$   r%   generalized_box_iou   s   ,	,..,ra   c                   @   s   e Zd ZU dZdZeej ed< dZ	ee
 ed< dZeej ed< dZeej ed< dZeej ed< dZeej ed< dZeej ed	< dZeej ed
< dZeed< dZeed< dee fddZdS )Owlv2ObjectDetectionOutputa	  
    Output type of [`Owlv2ForObjectDetection`].

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
            Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
            bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
            scale-invariant IoU loss.
        loss_dict (`Dict`, *optional*):
            A dictionary containing the individual losses. Useful for logging.
        logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`):
            Classification logits (including no-object) for all queries.
        objectness_logits (`torch.FloatTensor` of shape `(batch_size, num_patches, 1)`):
            The objectness logits of all image patches. OWL-ViT represents images as a set of image patches where the
            total number of patches is (image_size / patch_size)**2.
        pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
            Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
            values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
            possible padding). You can use [`~Owlv2ImageProcessor.post_process_object_detection`] to retrieve the
            unnormalized bounding boxes.
        text_embeds (`torch.FloatTensor` of shape `(batch_size, num_max_text_queries, output_dim`):
            The text embeddings obtained by applying the projection layer to the pooled output of [`Owlv2TextModel`].
        image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
            Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes image
            embeddings for each patch.
        class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
            Class embeddings of all image patches. OWLv2 represents images as a set of image patches where the total
            number of patches is (image_size / patch_size)**2.
        text_model_output (Tuple[`BaseModelOutputWithPooling`]):
            The output of the [`Owlv2TextModel`].
        vision_model_output (`BaseModelOutputWithPooling`):
            The output of the [`Owlv2VisionModel`].
    Nr+   	loss_dictr   objectness_logits
pred_boxesr.   r/   class_embedsr0   r1   r   c                    r2   )Nc                 3   r3   r4   r5   r8   r;   r$   r%   r=      r>   z6Owlv2ObjectDetectionOutput.to_tuple.<locals>.<genexpr>r?   r;   r$   r;   r%   r7      rB   z#Owlv2ObjectDetectionOutput.to_tuple)rC   rD   rE   rF   r+   r   r!   rG   rH   rc   r   r   rd   re   r.   r/   rf   r0   r   r1   r   r   r7   r$   r$   r$   r%   rb      s   
 "rb   c                   @   s   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eej ed< dZeej ed< dZeej ed< dZeej ed< dZeed	< dZeed
< dee fddZdS )%Owlv2ImageGuidedObjectDetectionOutputa  
    Output type of [`Owlv2ForObjectDetection.image_guided_detection`].

    Args:
        logits (`torch.FloatTensor` of shape `(batch_size, num_patches, num_queries)`):
            Classification logits (including no-object) for all queries.
        target_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
            Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
            values are normalized in [0, 1], relative to the size of each individual target image in the batch
            (disregarding possible padding). You can use [`~Owlv2ImageProcessor.post_process_object_detection`] to
            retrieve the unnormalized bounding boxes.
        query_pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_patches, 4)`):
            Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
            values are normalized in [0, 1], relative to the size of each individual query image in the batch
            (disregarding possible padding). You can use [`~Owlv2ImageProcessor.post_process_object_detection`] to
            retrieve the unnormalized bounding boxes.
        image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
            Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes
            image embeddings for each patch.
        query_image_embeds (`torch.FloatTensor` of shape `(batch_size, patch_size, patch_size, output_dim`):
            Pooled output of [`Owlv2VisionModel`]. OWLv2 represents images as a set of image patches and computes
            image embeddings for each patch.
        class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`):
            Class embeddings of all image patches. OWLv2 represents images as a set of image patches where the total
            number of patches is (image_size / patch_size)**2.
        text_model_output (Tuple[`BaseModelOutputWithPooling`]):
            The output of the [`Owlv2TextModel`].
        vision_model_output (`BaseModelOutputWithPooling`):
            The output of the [`Owlv2VisionModel`].
    Nr   r/   query_image_embedstarget_pred_boxesquery_pred_boxesrf   r0   r1   r   c                    r2   )Nc                 3   r3   r4   r5   r8   r;   r$   r%   r=     r>   zAOwlv2ImageGuidedObjectDetectionOutput.to_tuple.<locals>.<genexpr>r?   r;   r$   r;   r%   r7     rB   z.Owlv2ImageGuidedObjectDetectionOutput.to_tuple)rC   rD   rE   rF   r   r   r!   rG   rH   r/   rh   ri   rj   rf   r0   r   r1   r   r   r7   r$   r$   r$   r%   rg      s   
 rg   c                       s\   e Zd Zdef fddZdejdededejfdd	Zddej	de
dejfddZ  ZS )Owlv2VisionEmbeddingsconfigc                    s   t    |j| _|| _|j| _tt	|j| _
tj|j| j|j|jdd| _|j|j d | _| jd | _t| j| j| _| jdt| jddd d S )NF)Zin_channelsZout_channelsZkernel_sizeZstridebiasrP   r   position_idsr   
persistent)super__init__
patch_sizerl   hidden_size	embed_dimr
   	Parameterr!   Zrandnclass_embeddingZConv2dZnum_channelspatch_embedding
image_sizenum_patchesnum_positions	Embeddingposition_embeddingregister_bufferr"   expandr<   rl   	__class__r$   r%   rt     s    
"zOwlv2VisionEmbeddings.__init__
embeddingsheightwidthr   c                 C   s  |j d d }| jjd}|j d d }tj s(||kr(||kr(| | jS |ddddf }|ddddf }|j d }	|| j }
|| j }t	|d }|
d|||	}|dddd}tjj||
|fdd	d
}|dddddd|	}tj||fddS )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   r   Nrp   g      ?r   rP   ZbicubicF)sizemodeZalign_cornersdim)shaper   weight	unsqueezer!   Zjit
is_tracingrn   ru   r   reshapeZpermuter
   r    Zinterpolateviewcat)r<   r   r   r   r|   r   r}   Zclass_pos_embedZpatch_pos_embedr   Z
new_heightZ	new_widthZsqrt_num_positionsr$   r$   r%   interpolate_pos_encoding   s*   



z.Owlv2VisionEmbeddings.interpolate_pos_encodingFpixel_valuesr   c           
      C   sz   |j \}}}}| |}|ddd}| j|dd}tj||gdd}	|r3|	| |	|| }	|	S |	| 	| j
 }	|	S )NrP   r   rp   r   )r   rz   flatten	transposery   r   r!   r   r   r   rn   )
r<   r   r   
batch_size_r   r   Zpatch_embedsrf   r   r$   r$   r%   forwardI  s   
zOwlv2VisionEmbeddings.forwardF)rC   rD   rE   r   rt   r!   r	   rM   r   rG   boolr   __classcell__r$   r$   r   r%   rk   
  s    $)rk   c                	       sX   e Zd Zdef fddZ			ddeej deej deej dej	fd	d
Z
  ZS )Owlv2TextEmbeddingsrl   c                    sP   t    t|j|j| _t|j|j| _| j	dt
|jddd d S )Nrn   ro   Frq   )rs   rt   r
   r~   Z
vocab_sizerv   token_embeddingZmax_position_embeddingsr   r   r!   r"   r   r   r   r$   r%   rt   Y  s   

zOwlv2TextEmbeddings.__init__N	input_idsrn   inputs_embedsr   c                 C   sb   |d ur	|j d n|j d }|d u r| jd d d |f }|d u r&| |}| |}|| }|S )Nrp   )r   rn   r   r   )r<   r   rn   r   Z
seq_lengthZposition_embeddingsr   r$   r$   r%   r   c  s   

zOwlv2TextEmbeddings.forward)NNN)rC   rD   rE   r   rt   r   r!   
LongTensorrG   r	   r   r   r$   r$   r   r%   r   X  s    r   c                       s   e Zd ZdZ fddZdejdedefddZ					
ddejde	ej de	ej de	e
 deeje	ej e	eej  f f
ddZ  ZS )Owlv2Attentionz=Multi-headed attention from 'Attention Is All You Need' paperc                    s   t    || _|j| _|j| _| j| j | _| j| j | jkr-td| j d| j d| jd | _	|j
| _t| j| j| _t| j| j| _t| j| j| _t| j| j| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).      )rs   rt   rl   rv   rw   Znum_attention_heads	num_headshead_dimr^   scaleZattention_dropoutdropoutr
   Lineark_projv_projq_projout_projr   r   r$   r%   rt   {  s"   

zOwlv2Attention.__init__tensorseq_lenbszc                 C   s    | ||| j| jdd S )Nr   rP   )r   r   r   r   
contiguous)r<   r   r   r   r$   r$   r%   _shape  s    zOwlv2Attention._shapeNFhidden_statesattention_maskcausal_attention_maskoutput_attentionsr   c                 C   s  |  \}}}| || j }| | |d|}	| | |d|}
|| j d| jf}| |||j| }|	j| }	|
j| }
|	 d}t	
||	dd}|  || j ||fkrmtd|| j ||f d|   |dur|  |d||fkrtd|d||f d|   ||| j||| }||| j ||}|dur|  |d||fkrtd|d||f d|   ||| j||| }||| j ||}tjj|dd}|r||| j||}||| j ||}nd}tjj|| j| jd	}||
j}t	
||
}|  || j || jfkr*td
|| j|| jf d|   ||| j|| j}|dd}||||}| |}||fS )z#Input shape: Batch x Time x Channelrp   r   rP   z$Attention weights should be of size z	, but is Nz!Attention mask should be of size r   )ptrainingz `attn_output` should be of size )r   r   r   r   r   r   r   r   r   r!   Zbmmr   r^   r
   r    Zsoftmaxr   r   torJ   r   r   )r<   r   r   r   r   r   Ztgt_lenrw   Zquery_statesZ
key_statesZvalue_statesZ
proj_shapeZsrc_lenattn_weightsZattn_weights_reshapedZ
attn_probsZattn_outputr$   r$   r%   r     sf   	



zOwlv2Attention.forwardNNF)rC   rD   rE   rF   rt   r!   r	   rM   r   r   r   r   r   r   r$   r$   r   r%   r   x  s$    r   c                       s2   e Zd Z fddZdejdejfddZ  ZS )Owlv2MLPc                    sD   t    || _t|j | _t|j|j	| _
t|j	|j| _d S rI   )rs   rt   rl   r   Z
hidden_actactivation_fnr
   r   rv   Zintermediate_sizefc1fc2r   r   r$   r%   rt     s
   
zOwlv2MLP.__init__r   r   c                 C   s"   |  |}| |}| |}|S rI   )r   r   r   )r<   r   r$   r$   r%   r     s   


zOwlv2MLP.forward)rC   rD   rE   rt   r!   r	   r   r   r$   r$   r   r%   r     s    r   c                       sT   e Zd Zdef fddZ	ddejdejdejdee d	e	ej
 f
d
dZ  ZS )Owlv2EncoderLayerrl   c                    sR   t    |j| _t|| _tj| j|jd| _	t
|| _tj| j|jd| _d S NZeps)rs   rt   rv   rw   r   	self_attnr
   	LayerNormlayer_norm_epslayer_norm1r   mlplayer_norm2r   r   r$   r%   rt     s   


zOwlv2EncoderLayer.__init__Fr   r   r   r   r   c                 C   sd   |}|  |}| j||||d\}}|| }|}| |}| |}|| }|f}|r0||f7 }|S )aI  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
                `(config.encoder_attention_heads,)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )r   r   r   r   )r   r   r   r   )r<   r   r   r   r   Zresidualr   outputsr$   r$   r%   r     s"   




zOwlv2EncoderLayer.forwardr   )rC   rD   rE   r   rt   r!   r	   r   r   r   rG   r   r   r$   r$   r   r%   r     s    r   c                   @   s&   e Zd ZeZdZdZdgZdd ZdS )Owlv2PreTrainedModelowlv2Tr   c                 C   sV  | j j}t|tr"|jjjjd|d d |jjjjd|d d nt|t	rW| j j}t
jj|jd|jd | d t
jj|jj|j j| d t
jj|jj|j j| d nt|tr| j j}|jd d|j j d  | }|jd | }t
jj|jj|d t
jj|jj|d t
jj|jj|d t
jj|jj|d n_t|tr| j j}|j jd d|j j d  | }d|j j d | }t
jj|jj|d t
jj|jj|d n't|trt
jj|jj|jd | j j d t
jj|jj|jd | j j d t|t
jr|j j!  |jj"d t|t
j#r'|j dur)|j j!  dS dS dS )	zInitialize the weights        g{Gz?)meanstdr   )r   rP         ?N)$rl   Zinitializer_factor
isinstancer   r   r   dataZnormal_r   rk   r
   initry   rw   rz   Zinitializer_ranger   num_hidden_layersr   r   r   r   r   rv   r   r   
Owlv2Modeltext_projectiontext_embed_dimvisual_projectionvision_embed_dimr   rm   Zzero_Zfill_r   )r<   modulefactorZin_proj_stdZout_proj_stdZfc_stdr$   r$   r%   _init_weights.  sL   



 
z"Owlv2PreTrainedModel._init_weightsN)	rC   rD   rE   r   config_classZbase_model_prefixZsupports_gradient_checkpointingZ_no_split_modulesr   r$   r$   r$   r%   r   &  s    r   c                       st   e Zd ZdZdef fddZ					ddeej deej dee	 d	ee	 d
ee	 de
eef fddZ  ZS )Owlv2Encoderz
    Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
    [`Owlv2EncoderLayer`].

    Args:
        config: Owlv2Config
    rl   c                    s4   t    t fddt jD | _d| _d S )Nc                    s   g | ]}t  qS r$   )r   )r9   r   rl   r$   r%   
<listcomp>c  s    z)Owlv2Encoder.__init__.<locals>.<listcomp>F)rs   rt   r
   Z
ModuleListranger   layersgradient_checkpointingr   r   r   r%   rt   a  s   
 
zOwlv2Encoder.__init__Nr   r   r   output_hidden_statesreturn_dictr   c                 C   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}|r"dnd}|r(dnd}|}	| jD ]/}
|r8||	f }| jrI| jrI| |
j|	|||}n|
|	|||d}|d }	|r^||d f }q/|rf||	f }|stt	dd |	||fD S t
|	||dS )	a  
        Args:
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`).
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.
                [What are attention masks?](../glossary#attention-mask)
            causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Causal mask for the text model. Mask values selected in `[0, 1]`:
                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.
                [What are attention masks?](../glossary#attention-mask)
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        Nr$   )r   r   r   c                 s       | ]	}|d ur|V  qd S rI   r$   )r9   vr$   r$   r%   r=         z'Owlv2Encoder.forward.<locals>.<genexpr>)last_hidden_stater   
attentions)rl   r   r   use_return_dictr   r   r   Z_gradient_checkpointing_func__call__r@   r   )r<   r   r   r   r   r   r   Zencoder_statesZall_attentionsr   Zencoder_layerZlayer_outputsr$   r$   r%   r   f  sF   


zOwlv2Encoder.forwardNNNNN)rC   rD   rE   rF   r   rt   r   r!   r	   r   r   r   r   r   r   r$   r$   r   r%   r   X  s*    
r   c                       sz   e Zd Zdef fddZe					ddejdeej deej dee	 d	ee	 d
ee	 de
eef fddZ  ZS )Owlv2TextTransformerrl   c                    s@   t    || _|j}t|| _t|| _tj	||j
d| _d S r   )rs   rt   rl   rv   r   r   r   encoderr
   r   r   final_layer_norm)r<   rl   rw   r   r$   r%   rt     s   


zOwlv2TextTransformer.__init__Nr   r   rn   r   r   r   r   c                 C   s  |dur|n| j j}|dur|n| j j}|dur|n| j j}| }|d|d }| j||d}t||j|j	d}	|durDt
||j}| j|||	|||d}
|
d }| |}|tj|jd |j	d|tjjdd|j	f }|s||f|
dd  S t|||
j|
jd	S )
a|  
        input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
            [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
            IDs?](../glossary#input-ids)
        Nrp   )r   rn   r   )r   r   r   r   r   r   r   r   r   r   Zpooler_outputr   r   )rl   r   r   r   r   r   r   r   rJ   r   r   r   r   r!   r"   r   r   rM   Zargmaxr   r   r   )r<   r   r   rn   r   r   r   Zinput_shaper   r   encoder_outputsr   pooled_outputr$   r$   r%   r     sF   
	
zOwlv2TextTransformer.forwardr   )rC   rD   rE   r   rt   r   r!   r	   r   r   r   r   r   r   r   r$   r$   r   r%   r     s.    
r   c                       s   e Zd ZeZdef fddZdejfddZdd Z	e
								dd
ejdeej dee dee dee deeef fddZ  ZS )Owlv2TextModelrl   c                    "   t  | t|| _|   d S rI   )rs   rt   r   
text_model	post_initr   r   r$   r%   rt        
zOwlv2TextModel.__init__r   c                 C   
   | j jjS rI   r   r   r   r;   r$   r$   r%   get_input_embeddings     
z#Owlv2TextModel.get_input_embeddingsc                 C   s   || j j_d S rI   r   )r<   valuer$   r$   r%   set_input_embeddings  s   z#Owlv2TextModel.set_input_embeddingsNr   r   r   r   r   c                 C      | j |||||dS )a  
        input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
            [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
            IDs?](../glossary#input-ids)

        Examples:
        ```python
        >>> from transformers import AutoProcessor, Owlv2TextModel

        >>> model = Owlv2TextModel.from_pretrained("google/owlv2-base-patch16")
        >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16")
        >>> inputs = processor(
        ...     text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt"
        ... )
        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled (EOS token) states
        ```r   r   r   r   r   )r   )r<   r   r   r   r   r   r$   r$   r%   r     s   zOwlv2TextModel.forward)NNNN)rC   rD   rE   r   r   rt   r
   Moduler   r   r   r!   r	   r   r   r   r   r   r   r   r$   r$   r   r%   r     s.    
r   c                       sl   e Zd Zdef fddZe				ddejdee	 dee	 d	ee	 d
ee	 de
eef fddZ  ZS )Owlv2VisionTransformerrl   c                    sP   t    || _t|| _tj|j|jd| _	t
|| _tj|j|jd| _d S r   )rs   rt   rl   rk   r   r
   r   rv   r   pre_layernormr   r   post_layernormr   r   r$   r%   rt   7  s   


zOwlv2VisionTransformer.__init__NFr   r   r   r   r   r   c                 C   s   |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}| jjjj}||}| j||d}| 	|}| j
||||d}|d }	|	d d dd d f }
| |
}
|s^|	|
f|dd   S t|	|
|j|jdS )N)r   )r   r   r   r   r   r   r   )rl   r   r   r   r   rz   r   rJ   r   r  r   r  r   r   r   )r<   r   r   r   r   r   Zexpected_input_dtyper   r   r   r   r$   r$   r%   r   @  s2   	


zOwlv2VisionTransformer.forward)NNFN)rC   rD   rE   r   rt   r   r!   rG   r   r   r   r   r   r   r   r$   r$   r   r%   r  6  s(    	
r  c                       s   e Zd ZeZdZdef fddZdejfddZ	e
						ddeej d
ee dee dedee deeef fddZ  ZS )Owlv2VisionModelr   rl   c                    r   rI   )rs   rt   r  vision_modelr   r   r   r$   r%   rt   r  r   zOwlv2VisionModel.__init__r   c                 C   r   rI   )r  r   rz   r;   r$   r$   r%   r   x  r   z%Owlv2VisionModel.get_input_embeddingsNFr   r   r   r   c                 C   r   )a  
        Examples:
        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, Owlv2VisionModel

        >>> model = Owlv2VisionModel.from_pretrained("google/owlv2-base-patch16")
        >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16")
        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled CLS states
        ```r   r   r   r   r   )r  )r<   r   r   r   r   r   r$   r$   r%   r   {  s   zOwlv2VisionModel.forwardNNNFN)rC   rD   rE   r   r   Zmain_input_namert   r
   r   r   r   r   r!   rG   r   r   r   r   r   r   r$   r$   r   r%   r  n  s0    
r  c                       s0  e Zd ZeZdef fddZe					ddeej	 deej	 dee
 dee
 d	ee
 d
ejfddZe					ddeej dee
 dee
 de
d	ee
 d
ejfddZe									ddeej deej deej	 dee
 dee
 dee
 de
dee
 d	ee
 d
eeef fddZ  ZS )r   rl   c                    s   t  | t|jtstdt|j dt|jts(tdt|j d|j}|j}|j	| _	|j
| _|j
| _t|| _t|| _tj| j| j	dd| _tj| j| j	dd| _tt|j| _|   d S )NzLconfig.text_config is expected to be of type Owlv2TextConfig but is of type .zPconfig.vision_config is expected to be of type Owlv2VisionConfig but is of type F)rm   )rs   rt   r   text_configr   	TypeErrortypevision_configr   Zprojection_dimrv   r   r   r   r   r  r  r
   r   r   r   rx   r!   r   Zlogit_scale_init_valuelogit_scaler   )r<   rl   r	  r  r   r$   r%   rt     s0   

zOwlv2Model.__init__Nr   r   r   r   r   r   c           	      C   s:   |dur|n| j j}| j|||d}|d }| |}|S )aL  
        input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
            [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
            IDs?](../glossary#input-ids)

        Returns:
            text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
            applying the projection layer to the pooled output of [`Owlv2TextModel`].

        Examples:
        ```python
        >>> from transformers import AutoProcessor, Owlv2Model

        >>> model = Owlv2Model.from_pretrained("google/owlv2-base-patch16-ensemble")
        >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble")
        >>> inputs = processor(
        ...     text=[["a photo of a cat", "a photo of a dog"], ["photo of a astranaut"]], return_tensors="pt"
        ... )
        >>> text_features = model.get_text_features(**inputs)
        ```N)r   r   r   r   )rl   r   r   r   )	r<   r   r   r   r   r   Ztext_outputr   Ztext_featuresr$   r$   r%   get_text_features  s
   
zOwlv2Model.get_text_featuresFr   r   c           	      C   sf   |dur|n| j j}|dur|n| j j}|dur|n| j j}| j|||||d}|d }| |}|S )aO  
        Returns:
            image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
            applying the projection layer to the pooled output of [`Owlv2VisionModel`].

        Examples:
        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, Owlv2Model

        >>> model = Owlv2Model.from_pretrained("google/owlv2-base-patch16-ensemble")
        >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble")
        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)
        >>> inputs = processor(images=image, return_tensors="pt")
        >>> image_features = model.get_image_features(**inputs)
        ```Nr  r   )rl   r   r   r   r  r   )	r<   r   r   r   r   r   vision_outputsr   image_featuresr$   r$   r%   get_image_features  s   
zOwlv2Model.get_image_featuresreturn_lossreturn_base_image_embedsc
              	   C   s:  |dur|n| j j}|dur|n| j j}|	dur|	n| j j}	| j|||||	d}
| j|||||	d}|d }| |}|
d }| |}|tj	j
|dddd }|tj	j
|dddd }| j |j}t|| | }| }d}|r{t|}|}|	s||||||
f}|dur|f| S |S t|||||||
d	S )
a4  
        return_loss (`bool`, *optional*):
            Whether or not to return the contrastive loss.
        return_base_image_embeds (`bool`, *optional*):
            Whether or not to return the base image embeddings.

        Examples:
        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, Owlv2Model

        >>> model = Owlv2Model.from_pretrained("google/owlv2-base-patch16-ensemble")
        >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble")
        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)
        >>> inputs = processor(text=[["a photo of a cat", "a photo of a dog"]], images=image, return_tensors="pt")
        >>> outputs = model(**inputs)
        >>> logits_per_image = outputs.logits_per_image  # this is the image-text similarity score
        >>> probs = logits_per_image.softmax(dim=1)  # we can take the softmax to get the label probabilities
        ```Nr  r   r   rP   rp   T)ordr   keepdim)r+   r,   r-   r.   r/   r0   r1   )rl   r   r   r   r  r   r   r   r!   linalgnormr  expr   r   matmulr(   r)   r*   )r<   r   r   r   r  r   r   r   r  r   r  text_outputsr.   r/   Ztext_embeds_normr  r-   r,   r+   outputr$   r$   r%   r     sV   #	

zOwlv2Model.forwardr   r  )	NNNNNNFNN)rC   rD   rE   r   r   rt   r   r   r!   r	   r   rG   r  r  r   r   r   r*   r   r   r$   r$   r   r%   r     s     '.	

r   c                       s>   e Zd Zd
dedef fddZdejdejfdd	Z	  Z
S )Owlv2BoxPredictionHead   rl   out_dimc                    sJ   t    |jj}t||| _t||| _t | _	t||| _
d S rI   )rs   rt   r  rv   r
   r   dense0dense1ZGELUgeludense2)r<   rl   r  r   r   r$   r%   rt   {  s   

zOwlv2BoxPredictionHead.__init__r  r   c                 C   s6   |  |}| |}| |}| |}| |}|S rI   )r  r!  r   r"  )r<   r  r  r$   r$   r%   r     s   




zOwlv2BoxPredictionHead.forward)r  )rC   rD   rE   r   rM   rt   r!   r	   rG   r   r   r$   r$   r   r%   r  z  s    	r  c                	       sP   e Zd Zdef fddZdejdeej deej de	ej fdd	Z
  ZS )
Owlv2ClassPredictionHeadrl   c                    sZ   t    |jj}|jj| _t| j|| _t| jd| _	t| jd| _
t | _d S )Nr   )rs   rt   r	  rv   r  	query_dimr
   r   r  logit_shiftr  ZELUelu)r<   rl   r  r   r$   r%   rt     s   

z!Owlv2ClassPredictionHead.__init__r/   query_embeds
query_maskr   c                 C   s
  |  |}|d u r%|j}|jd d \}}t||| jf|}||fS |tjj|dddd  }|tjj|dddd  }t	d||}| 
|}	| |}
| |
d }
||	 |
 }|d ur|jdkrmtj|dd	}t|d
kt|jj|}|tj}||fS )NrP   rp   T)r   r  gư>z...pd,...qd->...pqr   r   r   r   )r  r   r   r!   Zzerosr$  r   r  r  einsumr%  r  r&  ndimr   whereZfinforJ   rS   rK   )r<   r/   r'  r(  image_class_embedsr   r   r|   pred_logitsr%  r  r$   r$   r%   r     s&   



z Owlv2ClassPredictionHead.forward)rC   rD   rE   r   rt   r!   rG   r   r	   r   r   r   r$   r$   r   r%   r#    s    r#  c                       s  e Zd ZeZdef fddZedededej	fddZ
d	ejdejfd
dZedd	d.dededeej dej	fddZ	d/dejdejdedejfddZ		d0dejdeej deej	 deej fddZ			d1dej	dejdej	dee dee dedeej fd d!Z			d1dejdee dee dedeej f
d"d#Z	d/d$ejd%ejdedejfd&d'Ze					d2dejd(eej dee dee ded)ee defd*d+Ze					d2dej	dejdeej	 dee dee ded)ee defd,d-Z  ZS )3Owlv2ForObjectDetectionrl   c                    s   t  | t|| _t|| _t|| _t|dd| _t	j
|jj|jjd| _t	 | _|| _| jjj| jjj | _| jjj| jjj | _| | j| j| _d S )Nr   )r  r   )rs   rt   r   r   r#  
class_headr  box_headobjectness_headr
   r   r  rv   r   
layer_normZSigmoidsigmoidrl   r{   ru   num_patches_heightnum_patches_widthcompute_box_biasbox_biasr   r   r$   r%   rt     s   



z Owlv2ForObjectDetection.__init__r4  r5  r   c                 C   s   t jd|d t jd}t jd| d t jd}t j||dd\}}t j||fdd}|d  |  < |d  |   < |dd	}|S )
Nr   )rJ   Zxy)Zindexingrp   r   .r   .r   rP   )r!   r"   rK   Zmeshgridstackr   )r4  r5  Zx_coordinatesZy_coordinatesxxyybox_coordinatesr$   r$   r%   !normalize_grid_corner_coordinates  s   z9Owlv2ForObjectDetection.normalize_grid_corner_coordinatesr  c                 C   s   |  }| |}|d }|S )a#  Predicts the probability that each image feature token is an object.

        Args:
            image_features (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_dim)`)):
                Features extracted from the image.
        Returns:
            Objectness scores.
        r8  )detachr1  )r<   r  rd   r$   r$   r%   objectness_predictor  s   	
z,Owlv2ForObjectDetection.objectness_predictorrP   )maxsizeNfeature_mapc           	      C   s   |d urt d| ||}t|dd}t|d t| d  }t|d}|d  |  < |d  |  < t|d t| d  }tj||gdd}|S )	NzOfeature_map has been deprecated as an input. Please pass in num_patches insteadr   r   g-C6?r8  r9  rp   r   )r^   r>  r!   Zcliploglog1pZ	full_liker   )	r<   r4  r5  rB  r=  Zbox_coord_biasZbox_sizeZbox_size_biasr7  r$   r$   r%   r6    s   z(Owlv2ForObjectDetection.compute_box_biasFimage_featsr   c           	      C   sR   |  |}|r|j\}}}}| ||}n| j}||j}||7 }| |}|S )a  
        Args:
            image_feats:
                Features extracted from the image, returned by the `image_text_embedder` method.
            feature_map:
                A spatial re-arrangement of image_features, also returned by the `image_text_embedder` method.
            interpolate_pos_encoding:
                Whether to interpolate the pre-trained position encodings.
        Returns:
            pred_boxes:
                List of predicted boxes (cxcywh normalized to 0, 1) nested within a dictionary.
        )r0  r   r6  r7  r   r   r3  )	r<   rE  rB  r   re   r   r4  r5  r7  r$   r$   r%   box_predictor	  s   

z%Owlv2ForObjectDetection.box_predictorr'  r(  c                 C   s   |  |||\}}||fS )a8  
        Args:
            image_feats:
                Features extracted from the `image_text_embedder`.
            query_embeds:
                Text query embeddings.
            query_mask:
                Must be provided with query_embeddings. A mask indicating which query embeddings are valid.
        )r/  )r<   rE  r'  r(  r-  r,  r$   r$   r%   class_predictor+  s   z'Owlv2ForObjectDetection.class_predictorr   r   r   r   r   c              	   C   s   | j ||||||dd}|r$|j\}}}	}
|	| jjj }|
| jjj }n| j}| j}|jd }| j j	|}t
|d d d dd d f |d d d df j}|d d dd d d f | }| |}|jd |||jd f}||}|d }|||fS )NT)r   r   r   r   r   r   r   r   r   rp   )r   r   rl   r  ru   r4  r5  r1   r  r  r!   broadcast_tor2  r   )r<   r   r   r   r   r   r   r   r   r   r   r4  r5  r   r/   class_token_outnew_sizer.   r$   r$   r%   image_text_embedder?  s8   


4


z+Owlv2ForObjectDetection.image_text_embedderc                 C   s   | j j||dd}|r!|j\}}}}|| jjj }	|| jjj }
n| j}	| j}
|d }| j j|}t	
|d d d dd d f |d d d df j}|d d dd d d f | }| |}|jd |	|
|jd f}||}||fS )NT)r   r   r   r   r   rp   )r   r  r   rl   r  ru   r4  r5  r  r!   rI  r2  r   )r<   r   r   r   r   r  r   r   r   r4  r5  r   r/   rJ  rK  r$   r$   r%   image_embedders  s*   4

z&Owlv2ForObjectDetection.image_embedderquery_image_featuresquery_feature_mapc                 C   s:  |  |\}}| |||}t|}g }g }	|j}
t|jd D ]f}tjg dg|
d}|| }t||\}}t	|d dkrEt
||}t|d }|d |k }| r|| |d }tj|| dd}td||}|t| }||| |  |	| q |rt|}t|	}nd	\}}|||fS )
Nr   )r   r   r   r   r   r   g?r   )Zaxiszd,id->iNN)rG  rF  r   r   r   r   r!   r   r\   r]   ra   rT   ZnonzeroZnumelZsqueezer   r)  Zargminappendr:  )r<   rN  rO  r   r   rf   re   Zpred_boxes_as_cornersZbest_class_embedsbest_box_indicesZpred_boxes_deviceiZeach_query_boxZeach_query_pred_boxesZiousZiou_thresholdZselected_indsZselected_embeddingsZmean_embedsZmean_simZbest_box_indr'  Zbox_indicesr$   r$   r%   embed_image_query  s6   



z)Owlv2ForObjectDetection.embed_image_queryquery_pixel_valuesr   c              
   C   s(  |dur|n| j j}|dur|n| j j}|dur|n| j j}| j||dd }| j||||d\}}	|j\}
}}}t||
|| |f}|j\}
}}}t||
|| |f}| |||\}}}| j	||d\}}| 
|||}|s|||||||	 f}tdd |D }|S t||||||d|	dS )	a  
        query_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Pixel values of query image(s) to be detected. Pass in one query image per target image.

        Examples:
        ```python
        >>> import requests
        >>> from PIL import Image
        >>> import torch
        >>> from transformers import AutoProcessor, Owlv2ForObjectDetection

        >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16-ensemble")
        >>> model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-base-patch16-ensemble")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)
        >>> query_url = "http://images.cocodataset.org/val2017/000000001675.jpg"
        >>> query_image = Image.open(requests.get(query_url, stream=True).raw)
        >>> inputs = processor(images=image, query_images=query_image, return_tensors="pt")

        >>> # forward pass
        >>> with torch.no_grad():
        ...     outputs = model.image_guided_detection(**inputs)

        >>> target_sizes = torch.Tensor([image.size[::-1]])

        >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
        >>> results = processor.post_process_image_guided_detection(
        ...     outputs=outputs, threshold=0.9, nms_threshold=0.3, target_sizes=target_sizes
        ... )
        >>> i = 0  # Retrieve predictions for the first image
        >>> boxes, scores = results[i]["boxes"], results[i]["scores"]
        >>> for box, score in zip(boxes, scores):
        ...     box = [round(i, 2) for i in box.tolist()]
        ...     print(f"Detected similar object with confidence {round(score.item(), 3)} at location {box}")
        Detected similar object with confidence 0.938 at location [327.31, 54.94, 547.39, 268.06]
        Detected similar object with confidence 0.959 at location [5.78, 360.65, 619.12, 366.39]
        Detected similar object with confidence 0.902 at location [2.85, 360.01, 627.63, 380.8]
        Detected similar object with confidence 0.985 at location [176.98, -29.45, 672.69, 182.83]
        Detected similar object with confidence 1.0 at location [6.53, 14.35, 624.87, 470.82]
        Detected similar object with confidence 0.998 at location [579.98, 29.14, 615.49, 489.05]
        Detected similar object with confidence 0.985 at location [206.15, 10.53, 247.74, 466.01]
        Detected similar object with confidence 0.947 at location [18.62, 429.72, 646.5, 457.72]
        Detected similar object with confidence 0.996 at location [523.88, 20.69, 586.84, 483.18]
        Detected similar object with confidence 0.998 at location [3.39, 360.59, 617.29, 499.21]
        Detected similar object with confidence 0.969 at location [4.47, 449.05, 614.5, 474.76]
        Detected similar object with confidence 0.966 at location [31.44, 463.65, 654.66, 471.07]
        Detected similar object with confidence 0.924 at location [30.93, 468.07, 635.35, 475.39]
        ```N)r   r   r   )r   r   r   r   )rE  r'  c                 s   r   rI   r$   r9   xr$   r$   r%   r=   2  r   zAOwlv2ForObjectDetection.image_guided_detection.<locals>.<genexpr>)r/   rh   ri   rj   r   rf   r0   r1   )rl   r   r   r   rM  r   r!   r   rT  rG  rF  r7   r@   rg   )r<   r   rU  r   r   r   r   rO  rB  r  r   r4  r5  
hidden_dimrE  Zquery_image_featsr'  rR  rj   r-  rf   ri   r  r$   r$   r%   image_guided_detection  s^   ;

	z.Owlv2ForObjectDetection.image_guided_detectionc              
   C   sB  |dur|n| j j}|dur|n| j j}|dur|n| j j}| j||||||d\}}	}
|
j}|
j}|	j\}}}}t	|	||| |f}|jd | }|	|||jd }|	|||jd }|d dk}| 
|||\}}| |}| ||	|}|s|||||	|| | f}tdd |D }|S t|	|||||||dS )	a	  
        input_ids (`torch.LongTensor` of shape `(batch_size * num_max_text_queries, sequence_length)`, *optional*):
            Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
            [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input
            IDs?](../glossary#input-ids).
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the last hidden state. See `text_model_last_hidden_state` and
            `vision_model_last_hidden_state` under returned tensors for more detail.

        Examples:
        ```python
        >>> import requests
        >>> from PIL import Image
        >>> import torch

        >>> from transformers import Owlv2Processor, Owlv2ForObjectDetection

        >>> processor = Owlv2Processor.from_pretrained("google/owlv2-base-patch16-ensemble")
        >>> model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-base-patch16-ensemble")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)
        >>> text_labels = [["a photo of a cat", "a photo of a dog"]]
        >>> inputs = processor(text=text_labels, images=image, return_tensors="pt")
        >>> outputs = model(**inputs)

        >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2]
        >>> target_sizes = torch.tensor([(image.height, image.width)])
        >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
        >>> results = processor.post_process_grounded_object_detection(
        ...     outputs=outputs, target_sizes=target_sizes, threshold=0.1, text_labels=text_labels
        ... )
        >>> # Retrieve predictions for the first image for the corresponding text queries
        >>> result = results[0]
        >>> boxes, scores, text_labels = result["boxes"], result["scores"], result["text_labels"]
        >>> for box, score, text_label in zip(boxes, scores, text_labels):
        ...     box = [round(i, 2) for i in box.tolist()]
        ...     print(f"Detected {text_label} with confidence {round(score.item(), 3)} at location {box}")
        Detected a photo of a cat with confidence 0.614 at location [341.67, 23.39, 642.32, 371.35]
        Detected a photo of a cat with confidence 0.665 at location [6.75, 51.96, 326.62, 473.13]
        ```N)r   r   r   r   r   r   r   rp   r8  c                 s   r   rI   r$   rV  r$   r$   r%   r=     r   z2Owlv2ForObjectDetection.forward.<locals>.<genexpr>)r/   r.   re   r   rd   rf   r0   r1   )rl   r   r   r   rL  r0   r1   r   r!   r   rG  r@  rF  r7   r@   rb   )r<   r   r   r   r   r   r   r   r'  rB  r   r  r  r   r4  r5  rX  rE  Zmax_text_queriesr(  r-  rf   rd   re   r  r$   r$   r%   r   @  sZ   4


zOwlv2ForObjectDetection.forwardrI   r   rP  r   r  )rC   rD   rE   r   r   rt   staticmethodrM   r!   r	   r>  rG   r@  r   r   r6  r   rF  r   rG  rL  rM  rT  r   rg   rY  rb   r   r   r$   r$   r   r%   r.    s    
%

7
/
,u	r.  )r   r   r   r  r.  )BrF   dataclassesr   	functoolsr   typingr   r   r   r   r   r!   Ztorch.utils.checkpointr	   r
   Zactivationsr   Zmodeling_attn_mask_utilsr   r   Zmodeling_outputsr   r   Zmodeling_utilsr   utilsr   r   r   r   r   Zconfiguration_owlv2r   r   r   Ztransformers.image_transformsr   Z
get_loggerrC   loggerr&   r)   r*   rN   rQ   r\   ra   rb   rg   r   rk   r   r   r   r   r   r   r   r   r  r  r   r  r#  r.  __all__r$   r$   r$   r%   <module>   sd   
&	51N l20ZM781 Z0   {