o
    Zh                  	   @   sB  d Z ddlZddlZddlmZ ddlmZmZm	Z	 ddl
Z
ddlZ
ddl
mZ ddlmZmZmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZmZ ddlmZ ddlmZ ee Z!eG dd deZ"eG dd deZ#eG dd deZ$eG dd deZ%G dd dej&Z'G dd dej&Z(d>de
j)de*de+d e
j)fd!d"Z,G d#d$ d$ej&Z-G d%d& d&ej&Z.G d'd( d(ej&Z/G d)d* d*ej&Z0G d+d, d,ej&Z1G d-d. d.ej&Z2eG d/d0 d0eZ3eG d1d2 d2e3Z4ed3d4G d5d6 d6e3Z5ed7d4G d8d9 d9e3Z6ed:d4G d;d< d<e3eZ7g d=Z8dS )?zPyTorch FocalNet model.    N)	dataclass)OptionalTupleUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)BackboneOutput)PreTrainedModel)ModelOutputauto_docstringlogging)BackboneMixin   )FocalNetConfigc                   @   sP   e Zd ZU dZdZeej ed< dZ	ee
ej  ed< dZee
ej  ed< dS )FocalNetEncoderOutputa  
    FocalNet encoder's outputs, with potential hidden states.

    Args:
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.

        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nlast_hidden_statehidden_statesreshaped_hidden_states)__name__
__module____qualname____doc__r   r   torchFloatTensor__annotations__r   r   r    r   r   ]/var/www/auris/lib/python3.10/site-packages/transformers/models/focalnet/modeling_focalnet.pyr   &   s
   
 r   c                   @   b   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eeej  ed< dZeeej  ed< dS )FocalNetModelOutputa  
    FocalNet model's outputs that also contains a pooling of the last hidden states.

    Args:
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
            Average pooling of the last layer hidden-state.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nr   pooler_outputr   r   )r   r   r   r   r   r   r   r   r   r#   r   r   r   r   r   r   r    r"   A      
 r"   c                   @   r!   )!FocalNetMaskedImageModelingOutputa  
    FocalNet masked image model outputs.

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
            Masked image modeling (MLM) loss.
        reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Reconstructed pixel values.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nlossreconstructionr   r   )r   r   r   r   r&   r   r   r   r   r'   r   r   r   r   r   r   r    r%   ^   r$   r%   c                   @   r!   )FocalNetImageClassifierOutputaS  
    FocalNet outputs for image classification.

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
            Classification (or regression if config.num_labels==1) loss.
        logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
            Classification (or regression if config.num_labels==1) scores (before SoftMax).
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nr&   logitsr   r   )r   r   r   r   r&   r   r   r   r   r)   r   r   r   r   r   r   r    r(   {   r$   r(   c                       sN   e Zd ZdZd fdd	Z	ddeej deej de	ej
 fd	d
Z  ZS )FocalNetEmbeddingszX
    Construct the patch embeddings and layernorm. Optionally, also the mask token.
    Fc              	      s|   t    t||j|j|j|j|jdd| _| jj	| _
|r(ttdd|jnd | _tj|j|jd| _t|j| _d S )NT)config
image_size
patch_sizenum_channels	embed_dimuse_conv_embedis_stemr   Zeps)super__init__FocalNetPatchEmbeddingsr,   r-   r.   r/   r0   patch_embeddings	grid_size
patch_gridr   	Parameterr   Zzeros
mask_token	LayerNormlayer_norm_epsnormDropouthidden_dropout_probdropout)selfr+   use_mask_token	__class__r   r    r4      s   

	 zFocalNetEmbeddings.__init__Npixel_valuesbool_masked_posreturnc           
      C   st   |  |\}}| |}| \}}}|d ur1| j||d}|d|}	|d|	  ||	  }| |}||fS )N      ?)r6   r=   sizer:   expand	unsqueezeZtype_asr@   )
rA   rE   rF   
embeddingsoutput_dimensions
batch_sizeZseq_len_Zmask_tokensmaskr   r   r    forward   s   

zFocalNetEmbeddings.forward)FN)r   r   r   r   r4   r   r   r   
BoolTensorr   TensorrR   __classcell__r   r   rC   r    r*      s    r*   c                       sR   e Zd Z			d
 fdd	Zdd Zdeej deej	ee
 f fdd	Z  ZS )r5   Fc	                    s
  t    t|tjjr|n||f}t|tjjr|n||f}|d |d  |d |d   }	|| _|| _|| _|	| _	|d |d  |d |d  f| _
|ri|rWd}
d}d}nd}
d}d}tj|||
||d| _n
tj||||d| _|rtj||jd	| _d S d | _d S )
Nr   r            r
   )kernel_sizestridepadding)rZ   r[   r2   )r3   r4   
isinstancecollectionsabcIterabler,   r-   r.   num_patchesr7   r   Conv2d
projectionr;   r<   r=   )rA   r+   r,   r-   r.   r/   add_normr0   r1   ra   rZ   r\   r[   rC   r   r    r4      s0   
 "


z FocalNetPatchEmbeddings.__init__c                 C   s   || j d  dkrd| j d || j d   f}tj||}|| j d  dkr>ddd| j d || j d   f}tj||}|S )Nr   r   )r-   r   
functionalpad)rA   rE   heightwidthZ
pad_valuesr   r   r    	maybe_pad   s    z!FocalNetPatchEmbeddings.maybe_padrE   rG   c                 C   s|   |j \}}}}|| jkrtd| |||}| |}|j \}}}}||f}|ddd}| jd ur:| |}||fS )NzeMake sure that the channel dimension of the pixel values match with the one set in the configuration.rX   r   )shaper.   
ValueErrorri   rc   flatten	transposer=   )rA   rE   rP   r.   rg   rh   rM   rN   r   r   r    rR      s   



zFocalNetPatchEmbeddings.forward)FFF)r   r   r   r4   ri   r   r   r   r   rU   intrR   rV   r   r   rC   r    r5      s    *.	r5           Finput	drop_probtrainingrG   c                 C   sd   |dks|s| S d| }| j d fd| jd   }|tj|| j| jd }|  | || }|S )aF  
    Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

    Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
    however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
    See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
    layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
    argument.
    ro   r   r   )r   )dtypedevice)rj   ndimr   Zrandrs   rt   Zfloor_div)rp   rq   rr   Z	keep_probrj   Zrandom_tensoroutputr   r   r    	drop_path  s   
rx   c                       sT   e Zd ZdZddee ddf fddZdejdejfdd	Z	de
fd
dZ  ZS )FocalNetDropPathzXDrop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).Nrq   rG   c                    s   t    || _d S rS   )r3   r4   rq   )rA   rq   rC   r   r    r4      s   

zFocalNetDropPath.__init__r   c                 C   s   t || j| jS rS   )rx   rq   rr   )rA   r   r   r   r    rR   $  s   zFocalNetDropPath.forwardc                 C   s   d | jS )Nzp={})formatrq   rA   r   r   r    
extra_repr'  s   zFocalNetDropPath.extra_reprrS   )r   r   r   r   r   floatr4   r   rU   rR   strr|   rV   r   r   rC   r    ry     s
    ry   c                       s&   e Zd Zd fdd	Zdd Z  ZS )	FocalNetModulationrX   Tro   c           	         s$  t    || _|j| | _|j| | _|| _|j| _|j	| _	t
j|d| | jd  |d| _t
j||dd|d| _t
 | _t
||| _t
|| _t
 | _g | _t| jD ](}| j| | j }| jt
t
j|||d||d ddt
  | j| qY| jrt
j||jd| _d S d S )NrX   r   )bias)rZ   r[   r   F)rZ   r[   groupsr\   r   r2   )r3   r4   dimZfocal_windowsZfocal_windowZfocal_levelsfocal_levelfocal_factor use_post_layernorm_in_modulationnormalize_modulatorr   Linearprojection_inrb   projection_contextZGELU
activationprojection_outr>   projection_dropout
ModuleListfocal_layersZkernel_sizesrangeappend
Sequentialr;   r<   	layernorm)	rA   r+   indexr   r   r   r   krZ   rC   r   r    r4   ,  s8   
 

zFocalNetModulation.__init__c                 C   s$  |j d }| |dddd }t|||| jd fd\}}}d}t| jD ]}| j| |}|||dd||d f   }q)| 	|j
dddj
ddd}	||	|dd| jdf   }| jrk|| jd  }| |}
||
 }|dddd }| jr| |}| |}| |}|S )	z
        Args:
            hidden_state:
                Input features with shape of (batch_size, height, width, num_channels)
        rH   r   r
   r   rX   NT)Zkeepdim)rj   r   permute
contiguousr   splitr   r   r   r   meanr   r   r   r   r   r   )rA   hidden_stater.   xqctxZgatesZctx_alllevelZ
ctx_globalZ	modulatorZx_outr   r   r    rR   M  s&   
 "



zFocalNetModulation.forward)rX   Tro   r   r   r   r4   rR   rV   r   r   rC   r    r   +  s    !r   c                       s&   e Zd Zd fdd	Zdd Z  ZS )FocalNetMlpNro   c                    sR   t    |p|}|p|}t||| _t|j | _t||| _t	|| _
d S rS   )r3   r4   r   r   fc1r   Z
hidden_actr   fc2r>   drop)rA   r+   in_featureshidden_featuresout_featuresr   rC   r   r    r4   s  s   
zFocalNetMlp.__init__c                 C   s6   |  |}| |}| |}| |}| |}|S rS   )r   r   r   r   )rA   r   r   r   r    rR   |  s   




zFocalNetMlp.forward)NNro   r   r   r   rC   r    r   r  s    	r   c                       s*   e Zd ZdZd fdd	Zdd Z  ZS )FocalNetLayera  Focal Modulation Network layer (block).

    Args:
        config (`FocalNetConfig`):
            Model config.
        index (`int`):
            Layer index.
        dim (`int`):
            Number of input channels.
        input_resolution (`Tuple[int]`):
            Input resolution.
        drop_path (`float`, *optional*, defaults to 0.0):
            Stochastic depth rate.
    ro   c                    s   t    || _|| _|| _|j| _|j| _tj	||j
d| _t|||| jd| _|dkr1t|nt | _tj	||j
d| _t||j }t|||| jd| _d| _d| _|jrwtj|jt| dd| _tj|jt| dd| _d S d S )Nr2   )r+   r   r   r   ro   )r+   r   r   r   rI   T)Zrequires_grad)r3   r4   r+   r   input_resolutionr?   r   use_post_layernormr   r;   r<   norm1r   
modulationry   Identityrx   norm2rn   Z	mlp_ratior   mlpgamma_1gamma_2use_layerscaler9   layerscale_valuer   Zones)rA   r+   r   r   r   rx   Zmlp_hidden_dimrC   r   r    r4     s.   
 zFocalNetLayer.__init__c           	   	   C   s   |\}}|j \}}}|}| jr|n| |}|||||}| |||| |}| js/|n| |}|| | j|  }|| | j| jrN| | 	|n| 	| |  }|S rS   )
rj   r   r   viewr   rx   r   r   r   r   )	rA   r   input_dimensionsrg   rh   rO   rP   r.   Zshortcutr   r   r    rR     s   $zFocalNetLayer.forward)ro   )r   r   r   r   r4   rR   rV   r   r   rC   r    r     s     r   c                       sB   e Zd Z fddZdejdeeef deej fddZ  Z	S )FocalNetStagec              
      s"  t     | _t j| _ fddt| jD }| | jd k r+|d  nd }| jd k r6tnd }dd tj	d j
t jddD }|t jd  t jd d   t fddt j D | _|d ur| d	|d
 jdd| _nd | _d| _d S )Nc                    s   g | ]	} j d |  qS )rX   )r/   .0i)r+   r   r    
<listcomp>  s    z*FocalNetStage.__init__.<locals>.<listcomp>r   c                 S   s   g | ]}|  qS r   )item)r   r   r   r   r    r     s    r   cpu)rt   c              
      s0   g | ]}t  ttr| nd qS ))r+   r   r   r   rx   )r   r]   listr   r+   r   rx   r   r   r   r    r     s    rX   TF)r+   r,   r-   r.   r/   rd   r0   r1   )r3   r4   r+   lendepths
num_stagesr   r5   r   ZlinspaceZdrop_path_ratesumr   r   layersr0   
downsampleZpointing)rA   r+   r   r   r/   Zout_dimr   ZdprrC   r   r    r4     s6   
$,

zFocalNetStage.__init__r   r   rG   c           	      C   s|   |\}}| j D ]}|||}q|}| jd ur1|\}}|dd|jd d||}| |\}}n||||f}|||f}|S )Nr   rX   r   rH   )r   r   rm   reshaperj   )	rA   r   r   rg   rh   Zlayer_module!hidden_states_before_downsamplingrN   stage_outputsr   r   r    rR     s   


zFocalNetStage.forward)
r   r   r   r4   r   rU   r   rn   rR   rV   r   r   rC   r    r     s    .,r   c                       sd   e Zd Z fddZ			ddejdeeef dee	 dee	 d	ee	 d
e
eef fddZ  ZS )FocalNetEncoderc                    sH   t    t j| _ | _t fddt| jD | _	d| _
d S )Nc              	      s6   g | ]}t  |d  d|  d d|  fdqS )r   rX   r   )r+   r   r   )r   )r   Zi_layerr+   r7   r   r    r     s    z,FocalNetEncoder.__init__.<locals>.<listcomp>F)r3   r4   r   r   r   r+   r   r   r   stagesgradient_checkpointing)rA   r+   r7   rC   r   r    r4     s   

zFocalNetEncoder.__init__FTr   r   output_hidden_states(output_hidden_states_before_downsamplingreturn_dictrG   c                 C   s  |rdnd }|r
dnd }|r1|j \}}	}
|j|g||
R  }|dddd}||f7 }||f7 }t| jD ]\}}| jrI| jrI| |j||}n|||}|d }|d }|d }|d |d f}|r|r|j \}}	}
|j|g|d |d f|
R  }|dddd}||f7 }||f7 }q6|r|s|j \}}	}
|j|g||
R  }|dddd}||f7 }||f7 }q6|st	dd	 ||fD S t
|||d
S )Nr   r   r
   r   rX   rH   c                 s   s    | ]	}|d ur|V  qd S rS   r   )r   vr   r   r    	<genexpr>U  s    z*FocalNetEncoder.forward.<locals>.<genexpr>)r   r   r   )rj   r   r   	enumerater   r   rr   Z_gradient_checkpointing_func__call__tupler   )rA   r   r   r   r   r   Zall_hidden_statesZall_reshaped_hidden_statesrO   rP   Zhidden_sizeZreshaped_hidden_stater   Zstage_moduler   r   rN   r   r   r    rR     s\   





zFocalNetEncoder.forward)FFT)r   r   r   r4   r   rU   r   rn   r   boolr   r   rR   rV   r   r   rC   r    r     s$    

r   c                   @   s*   e Zd ZeZdZdZdZdgZdd Z	dS )FocalNetPreTrainedModelfocalnetrE   Tr   c                 C   s   t |tjtjfr#|jjjd| jjd |j	dur!|j	j
  dS dS t |tjr8|j	j
  |jjd dS t |trL|jdurJ|jj
  dS dS t |tri| jjrk|jj| jj |jj| jj dS dS dS )zInitialize the weightsro   )r   ZstdNrI   )r]   r   r   rb   weightdataZnormal_r+   Zinitializer_ranger   Zzero_r;   Zfill_r*   r:   r   r   r   r   r   )rA   moduler   r   r    _init_weightsf  s$   



z%FocalNetPreTrainedModel._init_weightsN)
r   r   r   r   Zconfig_classZbase_model_prefixZmain_input_nameZsupports_gradient_checkpointingZ_no_split_modulesr   r   r   r   r    r   ^  s    r   c                       sn   e Zd Zd fdd	Zdd Ze				ddeej d	eej	 d
ee
 dee
 deeef f
ddZ  ZS )FocalNetModelTFc                    s   t  | || _t|j| _t|jd| jd   | _t	||d| _
t|| j
j| _tj| j|jd| _|r<tdnd| _|   dS )z
        add_pooling_layer (bool, *optional*, defaults to `True`):
            Whether to add a pooling layer
        use_mask_token (`bool`, *optional*, defaults to `False`):
            Whether to use a mask token for masked image modeling.
        rX   r   )rB   r2   N)r3   r4   r+   r   r   r   rn   r/   num_featuresr*   rM   r   r8   encoderr   r;   r<   r   ZAdaptiveAvgPool1dpooler	post_init)rA   r+   add_pooling_layerrB   rC   r   r    r4   |  s   zFocalNetModel.__init__c                 C   s   | j jS rS   )rM   r6   r{   r   r   r    get_input_embeddings  s   z"FocalNetModel.get_input_embeddingsNrE   rF   r   r   rG   c                 C   s   |dur|n| j j}|dur|n| j j}|du rtd| j||d\}}| j||||d}|d }| |}d}	| jdurM| |dd}	t	
|	d}	|s[||	f|dd  }
|
S t||	|j|jdS )	z
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
        Nz You have to specify pixel_values)rF   r   r   r   r   rX   )r   r#   r   r   )r+   r   use_return_dictrk   rM   r   r   r   rm   r   rl   r"   r   r   )rA   rE   rF   r   r   Zembedding_outputr   Zencoder_outputssequence_outputpooled_outputrw   r   r   r    rR     s6   

zFocalNetModel.forward)TFNNNN)r   r   r   r4   r   r   r   r   r   rT   r   r   r   r"   rR   rV   r   r   rC   r    r   z  s&    
r   a  
    FocalNet Model with a decoder on top for masked image modeling.

    This follows the same implementation as in [SimMIM](https://arxiv.org/abs/2111.09886).

    <Tip>

    Note that we provide a script to pre-train this model on custom data in our [examples
    directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).

    </Tip>
    )Zcustom_introc                       d   e Zd Z fddZe				ddeej deej dee	 dee	 de
eef f
d	d
Z  ZS )FocalNetForMaskedImageModelingc                    sz   t  | t|ddd| _t|j| _t|jd| jd   }t	
t	j||jd |j ddt	|j| _|   d S )NFT)r   rB   rX   r   )Zin_channelsZout_channelsrZ   )r3   r4   r   r   r   r   r   rn   r/   r   r   rb   Zencoder_strider.   ZPixelShuffledecoderr   )rA   r+   r   rC   r   r    r4     s   
z'FocalNetForMaskedImageModeling.__init__NrE   rF   r   r   rG   c                 C   s4  |dur|n| j j}| j||||d}|d }|dd}|j\}}}	t|	d  }
}||||
|}| |}d}|durz| j j	| j j
 }|d||}|| j j
d| j j
dd }tjj||dd	}||  | d
  | j j }|s|f|dd  }|dur|f| S |S t|||j|jdS )a?  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).

        Examples:
        ```python
        >>> from transformers import AutoImageProcessor, FocalNetConfig, FocalNetForMaskedImageModeling
        >>> import torch
        >>> from PIL import Image
        >>> import requests

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-base-simmim-window6-192")
        >>> config = FocalNetConfig()
        >>> model = FocalNetForMaskedImageModeling(config)

        >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
        >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
        >>> # create random boolean mask of shape (batch_size, num_patches)
        >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()

        >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
        >>> loss, reconstructed_pixel_values = outputs.loss, outputs.logits
        >>> list(reconstructed_pixel_values.shape)
        [1, 3, 192, 192]
        ```N)rF   r   r   r   r   rX   g      ?rH   none)Z	reductiongh㈵>)r&   r'   r   r   )r+   r   r   rm   rj   mathfloorr   r   r,   r-   Zrepeat_interleaverL   r   r   re   Zl1_lossr   r.   r%   r   r   )rA   rE   rF   r   r   outputsr   rO   r.   Zsequence_lengthrg   rh   Zreconstructed_pixel_valuesZmasked_im_lossrJ   rQ   Zreconstruction_lossrw   r   r   r    rR     sB   $
 z&FocalNetForMaskedImageModeling.forwardr   )r   r   r   r4   r   r   r   r   rT   r   r   r   r%   rR   rV   r   r   rC   r    r     s$    
r   z
    FocalNet Model with an image classification head on top (a linear layer on top of the pooled output) e.g. for
    ImageNet.
    c                       r   )FocalNetForImageClassificationc                    sP   t  | |j| _t|| _|jdkrt| jj|jnt | _	| 
  d S )Nr   )r3   r4   
num_labelsr   r   r   r   r   r   
classifierr   rA   r+   rC   r   r    r4   >  s   
"z'FocalNetForImageClassification.__init__NrE   labelsr   r   rG   c                 C   s\  |dur|n| j j}| j|||d}|d }| |}d}|dur| j jdu rK| jdkr1d| j _n| jdkrG|jtjksB|jtj	krGd| j _nd| j _| j jdkrit
 }	| jdkrc|	| | }n+|	||}n%| j jdkrt }	|	|d| j|d}n| j jdkrt }	|	||}|s|f|dd  }
|dur|f|
 S |
S t|||j|jd	S )
a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        Nr   r   Z
regressionZsingle_label_classificationZmulti_label_classificationrH   rX   )r&   r)   r   r   )r+   r   r   r   Zproblem_typer   rs   r   longrn   r	   Zsqueezer   r   r   r(   r   r   )rA   rE   r   r   r   r   r   r)   r&   Zloss_fctrw   r   r   r    rR   L  sH   


"


z&FocalNetForImageClassification.forwardr   )r   r   r   r4   r   r   r   r   Z
LongTensorr   r   r   r(   rR   rV   r   r   rC   r    r   6  s$    
r   zG
    FocalNet backbone, to be used with frameworks like X-Decoder.
    c                
       sP   e Zd Zdef fddZe		ddejdee	 dee	 de
fd	d
Z  ZS )FocalNetBackboner+   c                    s>   t  | t  | |jg|j | _t|| _|   d S rS   )	r3   r4   Z_init_backboner/   Zhidden_sizesr   r   r   r   r   rC   r   r    r4     s
   
zFocalNetBackbone.__init__NrE   r   r   rG   c           
      C   s   |dur|n| j j}|dur|n| j j}| j|ddd}|j}d}t| jD ]\}}|| jv r6||| f7 }q&|sF|f}	|rD|	|jf7 }	|	S t	||rP|jddS dddS )aj  
        Examples:

        ```python
        >>> from transformers import AutoImageProcessor, AutoBackbone
        >>> import torch
        >>> from PIL import Image
        >>> import requests

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny-lrf")
        >>> model = AutoBackbone.from_pretrained("microsoft/focalnet-tiny-lrf")

        >>> inputs = processor(image, return_tensors="pt")
        >>> outputs = model(**inputs)
        ```NTr   r   )feature_mapsr   Z
attentions)
r+   r   r   r   r   r   Zstage_namesr   r   r   )
rA   rE   r   r   r   r   r   idxZstagerw   r   r   r    rR     s.   
zFocalNetBackbone.forward)NN)r   r   r   r   r4   r   r   rU   r   r   r   rR   rV   r   r   rC   r    r     s    
r   )r   r   r   r   r   )ro   F)9r   collections.abcr^   r   dataclassesr   typingr   r   r   r   Ztorch.utils.checkpointr   Ztorch.nnr   r   r	   Zactivationsr   Zmodeling_outputsr   Zmodeling_utilsr   utilsr   r   r   Zutils.backbone_utilsr   Zconfiguration_focalnetr   Z
get_loggerr   loggerr   r"   r%   r(   Moduler*   r5   rU   r}   r   rx   ry   r   r   r   r   r   r   r   r   r   r   __all__r   r   r   r    <module>   sd   
( HGEBRKbM?