a
    hZ                     @   s  U d dl Z d dlZd dlmZ d dlmZ d dlmZ d dlm	Z	m
Z
mZmZ d dlZd dlmZmZ d dlmZ dd	lmZmZ dd
lmZmZ ddlmZ ddlmZmZmZ ddlmZ ddl m!Z!m"Z"m#Z# g dZ$eG dd dZ%G dd de%Z&G dd de%Z'G dd dej(Z)G dd dej(Z*G dd dej(Z+eee&e'f  e,ee- ee e.e	e+dddZ/e0e	e1eee&e'f  ee- f d d!d"Z2d#eiZ3e4e0e	f e5d$< i e3d%d&d'Z6i e3d(d)d'Z7G d*d+ d+eZ8G d,d- d-eZ9G d.d/ d/eZ:G d0d1 d1eZ;G d2d3 d3eZ<G d4d5 d5eZ=G d6d7 d7eZ>G d8d9 d9eZ?G d:d; d;eZ@G d<d= d=eZAG d>d? d?eZBe e#d@e8jCfdAddBdCee8 e.e	e+dDdEdFZDe e#d@e9jCfdAddBdCee9 e.e	e+dDdGdHZEe e#d@e:jCfdAddBdCee: e.e	e+dDdIdJZFe e#d@e;jCfdAddBdCee; e.e	e+dDdKdLZGe e#d@e<jCfdAddBdCee< e.e	e+dDdMdNZHe e#d@e=jCfdAddBdCee= e.e	e+dDdOdPZIe e#d@e>jCfdAddBdCee> e.e	e+dDdQdRZJe e#d@e?jCfdAddBdCee? e.e	e+dDdSdTZKe e#d@e@jCfdAddBdCee@ e.e	e+dDdUdVZLe e#d@eAjCfdAddBdCeeA e.e	e+dDdWdXZMe e#d@eBjCfdAddBdCeeB e.e	e+dDdYdZZNdS )[    N)Sequence)	dataclass)partial)AnyCallableOptionalUnion)nnTensor)StochasticDepth   )Conv2dNormActivationSqueezeExcitation)ImageClassificationInterpolationMode)_log_api_usage_once   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_make_divisible_ovewrite_named_paramhandle_legacy_interface)EfficientNetEfficientNet_B0_WeightsEfficientNet_B1_WeightsEfficientNet_B2_WeightsEfficientNet_B3_WeightsEfficientNet_B4_WeightsEfficientNet_B5_WeightsEfficientNet_B6_WeightsEfficientNet_B7_WeightsEfficientNet_V2_S_WeightsEfficientNet_V2_M_WeightsEfficientNet_V2_L_Weightsefficientnet_b0efficientnet_b1efficientnet_b2efficientnet_b3efficientnet_b4efficientnet_b5efficientnet_b6efficientnet_b7efficientnet_v2_sefficientnet_v2_mefficientnet_v2_lc                   @   sn   e Zd ZU eed< eed< eed< eed< eed< eed< edejf ed< e	deee
e ed
ddZd	S )_MBConvConfigexpand_ratiokernelstrideinput_channelsout_channels
num_layers.blockN)channels
width_mult	min_valuereturnc                 C   s   t | | d|S )N   )r   )r9   r:   r;    r>   M/var/www/auris/lib/python3.9/site-packages/torchvision/models/efficientnet.pyadjust_channels9   s    z_MBConvConfig.adjust_channels)N)__name__
__module____qualname__float__annotations__intr   r	   Modulestaticmethodr   r@   r>   r>   r>   r?   r1   /   s   
r1   c                       sX   e Zd Zd
eeeeeeeeeedejf  dd
 fddZ	e
eeddd	Z  ZS )MBConvConfig      ?N.)
r2   r3   r4   r5   r6   r7   r:   
depth_multr8   r<   c
           
   	      sL   |  ||}|  ||}| ||}|	d u r0t}	t |||||||	 d S N)r@   adjust_depthMBConvsuper__init__)
selfr2   r3   r4   r5   r6   r7   r:   rK   r8   	__class__r>   r?   rP   @   s    zMBConvConfig.__init__r7   rK   c                 C   s   t t| | S rL   )rF   mathceilrT   r>   r>   r?   rM   S   s    zMBConvConfig.adjust_depth)rJ   rJ   N)rA   rB   rC   rD   rF   r   r   r	   rG   rP   rH   rM   __classcell__r>   r>   rR   r?   rI   >   s"   
   rI   c                       s@   e Zd Zdeeeeeeeedejf  dd fddZ	  Z
S )FusedMBConvConfigN.)r2   r3   r4   r5   r6   r7   r8   r<   c              	      s(   |d u rt }t ||||||| d S rL   )FusedMBConvrO   rP   )rQ   r2   r3   r4   r5   r6   r7   r8   rR   r>   r?   rP   Z   s    
zFusedMBConvConfig.__init__)N)rA   rB   rC   rD   rF   r   r   r	   rG   rP   rW   r>   r>   rR   r?   rX   X   s   
 rX   c                       sR   e Zd Zefeeedejf edejf dd fddZ	e
e
dddZ  ZS )	rN   .N)cnfstochastic_depth_prob
norm_layerse_layerr<   c           	         s  t    d|j  kr dks*n td|jdko>|j|jk| _g }tj}|	|j|j
}||jkr|t|j|d||d |t|||j|j|||d td|jd }||||ttjddd	 |t||jd|d d tj| | _t|d
| _|j| _d S )Nr   r   illegal stride valuekernel_sizer\   activation_layer)r`   r4   groupsr\   ra      T)inplace)Z
activationrow)rO   rP   r4   
ValueErrorr5   r6   use_res_connectr	   SiLUr@   r2   appendr   r3   maxr   
Sequentialr8   r   stochastic_depth)	rQ   rZ   r[   r\   r]   layersra   expanded_channelsZsqueeze_channelsrR   r>   r?   rP   j   sL    

zMBConv.__init__inputr<   c                 C   s&   |  |}| jr"| |}||7 }|S rL   r8   rg   rl   rQ   rp   resultr>   r>   r?   forward   s
    

zMBConv.forward)rA   rB   rC   r   rI   rD   r   r	   rG   rP   r
   rt   rW   r>   r>   rR   r?   rN   i   s   :rN   c                       sB   e Zd Zeeedejf dd fddZe	e	dddZ
  ZS )	rY   .N)rZ   r[   r\   r<   c              
      s   t    d|j  kr dks*n td|jdko>|j|jk| _g }tj}|	|j|j
}||jkr|t|j||j|j||d |t||jd|d d n"|t|j|j|j|j||d tj| | _t|d| _|j| _d S )Nr   r   r^   r`   r4   r\   ra   r_   re   )rO   rP   r4   rf   r5   r6   rg   r	   rh   r@   r2   ri   r   r3   rk   r8   r   rl   )rQ   rZ   r[   r\   rm   ra   rn   rR   r>   r?   rP      sH    

zFusedMBConv.__init__ro   c                 C   s&   |  |}| jr"| |}||7 }|S rL   rq   rr   r>   r>   r?   rt      s
    

zFusedMBConv.forward)rA   rB   rC   rX   rD   r   r	   rG   rP   r
   rt   rW   r>   r>   rR   r?   rY      s   4rY   c                	       sn   e Zd Zdeeeef  eeee	e
dejf  e	e dd fddZeedd	d
ZeedddZ  ZS )r   皙?  N.)inverted_residual_settingdropoutr[   num_classesr\   last_channelr<   c              
      s<  t    t|  |s tdn$t|tr<tdd |D sDtd|du rRtj	}g }|d j
}|td|dd|tjd	 td
d |D }	d}
|D ]p}g }t|jD ]L}t|}|r|j|_
d|_|t|
 |	 }||||| |
d7 }
q|tj|  q|d j}|dur |nd| }|t||d|tjd tj| | _td| _ttj|ddt||| _|  D ]}t|tjrtjj |j!dd |j"dur4tj#|j" nrt|tj	tj$frtj%|j! tj#|j" n@t|tjrdt&'|j( }tj)|j!| | tj#|j" qdS )a  
        EfficientNet V1 and V2 main class

        Args:
            inverted_residual_setting (Sequence[Union[MBConvConfig, FusedMBConvConfig]]): Network structure
            dropout (float): The droupout probability
            stochastic_depth_prob (float): The stochastic depth probability
            num_classes (int): Number of classes
            norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
            last_channel (int): The number of channels on the penultimate layer
        z1The inverted_residual_setting should not be emptyc                 S   s   g | ]}t |tqS r>   )
isinstancer1   ).0sr>   r>   r?   
<listcomp>      z)EfficientNet.__init__.<locals>.<listcomp>z:The inverted_residual_setting should be List[MBConvConfig]Nr      r   ru   c                 s   s   | ]}|j V  qd S rL   )r7   )r}   rZ   r>   r>   r?   	<genexpr>  r   z(EfficientNet.__init__.<locals>.<genexpr>r   rc   r_   T)prd   Zfan_out)moderJ   )*rO   rP   r   rf   r|   r   all	TypeErrorr	   BatchNorm2dr5   ri   r   rh   sumranger7   copyr6   r4   rD   r8   rk   featuresZAdaptiveAvgPool2davgpoolZDropoutZLinear
classifiermodulesZConv2dinitZkaiming_normal_ZweightZbiasZzeros_Z	GroupNormZones_rU   sqrtZout_featuresZuniform_)rQ   rx   ry   r[   rz   r\   r{   rm   Zfirstconv_output_channelsZtotal_stage_blocksZstage_block_idrZ   Zstage_Z	block_cnfZsd_probZlastconv_input_channelsZlastconv_output_channelsmZ
init_rangerR   r>   r?   rP      sx    







zEfficientNet.__init__)xr<   c                 C   s.   |  |}| |}t|d}| |}|S )Nr   )r   r   torchflattenr   rQ   r   r>   r>   r?   _forward_implM  s
    


zEfficientNet._forward_implc                 C   s
   |  |S rL   )r   r   r>   r>   r?   rt   W  s    zEfficientNet.forward)rv   rw   NN)rA   rB   rC   r   r   rI   rX   rD   rF   r   r   r	   rG   rP   r
   r   rt   rW   r>   r>   rR   r?   r      s       c
r   )rx   ry   r{   weightsprogresskwargsr<   c                 K   sT   |d urt |dt|jd  t| |fd|i|}|d urP||j|dd |S )Nrz   
categoriesr{   T)r   Z
check_hash)r   lenmetar   Zload_state_dictZget_state_dict)rx   ry   r{   r   r   r   modelr>   r>   r?   _efficientnet[  s    r   )archr   r<   c                 K   s:  |  drtt|d|dd}|dddddd|d	dd
ddd
|d	dd
ddd
|d	dd
ddd|d	ddddd|d	dd
ddd|d	dddddg}d }n|  drtdddddd
tddd
dddtddd
dddtddd
ddd	td	dddddtd	dd
dddg}d}n|  drtddddddtddd
dddtddd
dddtddd
dddtd	dddddtd	dd
dd d!td	ddd d"dg}d}n|  d#r$tddddddtddd
dddtddd
dd$dtddd
d$dd%td	dddd&d'td	dd
d&d(d)td	ddd(d*dg}d}ntd+|  ||fS ),NZefficientnet_br:   rK   r:   rK   r   r             r         (   P   p      rc   @  r.   0   @         	         i   r/            i0     i   r0   `   
              i  zUnsupported model type )
startswithr   rI   poprX   rf   )r   r   Z
bneck_confrx   r{   r>   r>   r?   _efficientnet_confn  sT    
			r   r   _COMMON_META)r   r   zUhttps://github.com/pytorch/vision/tree/main/references/classification#efficientnet-v1)Zmin_sizerecipe)!   r   zUhttps://github.com/pytorch/vision/tree/main/references/classification#efficientnet-v2c                
   @   sJ   e Zd Zedeeddejdi eddddd	id
ddddZ	e	Z
dS )r   zJhttps://download.pytorch.org/models/efficientnet_b0_rwightman-7f5810bc.pthr   r   	crop_sizeresize_sizeinterpolationidP ImageNet-1Kg?5^IlS@g5^IbW@zacc@1zacc@5gNbX9?g~jts4@1These weights are ported from the original paper.
num_params_metrics_ops
_file_size_docsurlZ
transformsr   NrA   rB   rC   r   r   r   r   BICUBIC_COMMON_META_V1IMAGENET1K_V1DEFAULTr>   r>   r>   r?   r     s(   
r   c                   @   s   e Zd Zedeeddejdi eddddd	id
ddddZ	edeeddej
di edddddd	id
ddddZeZdS )r   zJhttps://download.pytorch.org/models/efficientnet_b1_rwightman-bac287d4.pth   r   r   iv r   g+S@gClW@r   gCl?gM">@r   r   r   z@https://download.pytorch.org/models/efficientnet_b1-c27df63c.pth   zOhttps://github.com/pytorch/vision/issues/3995#new-recipe-with-lr-wd-crop-tuninggʡS@gƻW@gA`">@$  
                These weights improve upon the results of the original paper by using a modified version of TorchVision's
                `new training recipe
                <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
            )r   r   r   r   r   r   N)rA   rB   rC   r   r   r   r   r   r   r   BILINEARZIMAGENET1K_V2r   r>   r>   r>   r?   r     sP   

r   c                
   @   sJ   e Zd Zedeeddejdi edddddid	d
dddZ	e	Z
dS )r   zJhttps://download.pytorch.org/models/efficientnet_b2_rwightman-c35c1473.pthi   r   i r   gx&T@gp=
W@r   g rh?gʡEA@r   r   r   Nr   r>   r>   r>   r?   r      s(   
r   c                
   @   sJ   e Zd Zedeeddejdi eddddd	id
ddddZ	e	Z
dS )r   zJhttps://download.pytorch.org/models/efficientnet_b3_rwightman-b3899882.pthi,  r   r   i r   gnT@g~jtX@r   gZd;?gd;OG@r   r   r   Nr   r>   r>   r>   r?   r     s(   
r   c                
   @   sJ   e Zd Zedeeddejdi eddddd	id
ddddZ	e	Z
dS )r   zJhttps://download.pytorch.org/models/efficientnet_b4_rwightman-23ab8bcd.pthi|  r   r   i0!'r   gjtT@gt&X@r   g~jt@gKR@r   r   r   Nr   r>   r>   r>   r?   r   0  s(   
r   c                
   @   sJ   e Zd Zedeeddejdi edddddid	d
dddZ	e	Z
dS )r    zJhttps://download.pytorch.org/models/efficientnet_b5_lukemelas-1a07897c.pthi  r   ir   g#~jT@gx&1(X@r   gx&1$@gK7]@r   r   r   Nr   r>   r>   r>   r?   r    H  s(   
r    c                
   @   sJ   e Zd Zedeeddejdi edddddid	d
dddZ	e	Z
dS )r!   zJhttps://download.pytorch.org/models/efficientnet_b6_lukemelas-24a108a5.pthi  r   ir   gn U@gv:X@r   g rh3@g$d@r   r   r   Nr   r>   r>   r>   r?   r!   `  s(   
r!   c                
   @   sJ   e Zd Zedeeddejdi edddddid	d
dddZ	e	Z
dS )r"   zJhttps://download.pytorch.org/models/efficientnet_b7_lukemelas-c5b4e57e.pthiX  r   icr   g+U@g'1:X@r   gsh|B@go@r   r   r   Nr   r>   r>   r>   r?   r"   x  s(   
r"   c                
   @   sJ   e Zd Zedeeddejdi edddddid	d
dddZ	e	Z
dS )r#   zBhttps://download.pytorch.org/models/efficientnet_v2_s-dd5fe13b.pthr   r   i8nGr   g;OU@gx&18X@r   gZd @gVT@r   r   r   NrA   rB   rC   r   r   r   r   r   _COMMON_META_V2r   r   r>   r>   r>   r?   r#     s.   r#   c                
   @   sJ   e Zd Zedeeddejdi edddddid	d
dddZ	e	Z
dS )r$   zBhttps://download.pytorch.org/models/efficientnet_v2_m-dc08266a.pth  r   i:r   gI+GU@gDlIX@r   gE8@gQ j@r   r   r   Nr   r>   r>   r>   r?   r$     s.   r$   c                
   @   sN   e Zd Zedeeddejdddi eddddd	id
ddddZ	e	Z
dS )r%   zBhttps://download.pytorch.org/models/efficientnet_v2_l-59c71312.pthr   )      ?r   r   )r   r   r   meanZstdiHfr   gʡEsU@gOnrX@r   g
ףp=
L@gI+i|@r   r   r   N)rA   rB   rC   r   r   r   r   r   r   r   r   r>   r>   r>   r?   r%     s2   r%   Z
pretrained)r   T)r   r   )r   r   r   r<   c                 K   s<   t | } tdddd\}}t||dd|| |fi |S )a  EfficientNet B0 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B0_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B0_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B0_Weights
        :members:
    r&   rJ   r   ry   rv   )r   verifyr   r   r   r   r   r   rx   r{   r>   r>   r?   r&     s    
r&   c                 K   s<   t | } tdddd\}}t||dd|| |fi |S )a  EfficientNet B1 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B1_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B1_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B1_Weights
        :members:
    r'   rJ   皙?r   ry   rv   )r   r   r   r   r   r   r>   r>   r?   r'     s    
r'   c                 K   s<   t | } tdddd\}}t||dd|| |fi |S )a  EfficientNet B2 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B2_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B2_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B2_Weights
        :members:
    r(   r   333333?r   ry   333333?)r   r   r   r   r   r   r>   r>   r?   r(   '  s    
r(   c                 K   s<   t | } tdddd\}}t||dd|| |fi |S )a  EfficientNet B3 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B3_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B3_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B3_Weights
        :members:
    r)   r   ffffff?r   ry   r   )r   r   r   r   r   r   r>   r>   r?   r)   F  s    

r)   c                 K   s<   t | } tdddd\}}t||dd|| |fi |S )a  EfficientNet B4 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B4_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B4_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B4_Weights
        :members:
    r*   r   ?r   ry   皙?)r   r   r   r   r   r   r>   r>   r?   r*   j  s    

r*   c                 K   sL   t | } tdddd\}}t||dd|| |fdttjdd	d
i|S )a  EfficientNet B5 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B5_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B5_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B5_Weights
        :members:
    r+   g?g@r   ry   r   r\   MbP?{Gz?epsZmomentum)r    r   r   r   r   r   r	   r   r   r>   r>   r?   r+     s    

r+   c                 K   sL   t | } tdddd\}}t||dd|| |fdttjdd	d
i|S )a  EfficientNet B6 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B6_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B6_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B6_Weights
        :members:
    r,   r   g@r   ry   r   r\   r   r   r   )r!   r   r   r   r   r   r	   r   r   r>   r>   r?   r,     s    

r,   c                 K   sL   t | } tdddd\}}t||dd|| |fdttjdd	d
i|S )a  EfficientNet B7 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B7_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B7_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B7_Weights
        :members:
    r-   g       @g@r   ry   r   r\   r   r   r   )r"   r   r   r   r   r   r	   r   r   r>   r>   r?   r-     s    

r-   c                 K   sD   t | } td\}}t||dd|| |fdttjddi|S )a  
    Constructs an EfficientNetV2-S architecture from
    `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_V2_S_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_V2_S_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_V2_S_Weights
        :members:
    r.   ry   rv   r\   r   r   )r#   r   r   r   r   r   r	   r   r   r>   r>   r?   r.     s    

r.   c                 K   sD   t | } td\}}t||dd|| |fdttjddi|S )a  
    Constructs an EfficientNetV2-M architecture from
    `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_V2_M_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_V2_M_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_V2_M_Weights
        :members:
    r/   ry   r   r\   r   r   )r$   r   r   r   r   r   r	   r   r   r>   r>   r?   r/   #  s    

r/   c                 K   sD   t | } td\}}t||dd|| |fdttjddi|S )a  
    Constructs an EfficientNetV2-L architecture from
    `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_V2_L_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_V2_L_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_V2_L_Weights
        :members:
    r0   ry   r   r\   r   r   )r%   r   r   r   r   r   r	   r   r   r>   r>   r?   r0   I  s    

r0   )Or   rU   collections.abcr   Zdataclassesr   	functoolsr   typingr   r   r   r   r   r	   r
   Ztorchvision.opsr   Zops.miscr   r   Ztransforms._presetsr   r   utilsr   Z_apir   r   r   Z_metar   _utilsr   r   r   __all__r1   rI   rX   rG   rN   rY   r   rD   rF   boolr   strtupler   r   dictrE   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r%   r   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r>   r>   r>   r?   <module>   s  
C=s80



"
"
#
#
#
$
$
