a
    hN<                     @   s  d dl mZ d dlmZmZmZ d dlZd dlmZ d dlm	Z	 ddl
mZ ddlmZ dd	lmZmZmZ dd
lmZ ddlmZmZ g dZe	ee	dddZG dd dejZG dd dejZee eeeedddZdeddZG dd deZ G dd deZ!G dd deZ"G d d! d!eZ#e ed"e j$fd#dd$d%ee  eeed&d'd(Z%e ed"e!j$fd#dd$d%ee! eeed&d)d*Z&e ed"e"j$fd#dd$d%ee" eeed&d+d,Z'e ed"e#j$fd#dd$d%ee# eeed&d-d.Z(dS )/    )partial)AnyCallableOptionalN)Tensor   )ImageClassification)_log_api_usage_once   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_ovewrite_named_paramhandle_legacy_interface)	ShuffleNetV2ShuffleNet_V2_X0_5_WeightsShuffleNet_V2_X1_0_WeightsShuffleNet_V2_X1_5_WeightsShuffleNet_V2_X2_0_Weightsshufflenet_v2_x0_5shufflenet_v2_x1_0shufflenet_v2_x1_5shufflenet_v2_x2_0)xgroupsreturnc                 C   sP   |   \}}}}|| }| |||||} t| dd } | ||||} | S )Nr
   r   )sizeviewtorchZ	transpose
contiguous)r   r   Z	batchsizeZnum_channelsheightwidthZchannels_per_group r#   M/var/www/auris/lib/python3.9/site-packages/torchvision/models/shufflenetv2.pychannel_shuffle   s    r%   c                
       sZ   e Zd Zeeedd fddZedeeeeeeejdd	d
Z	e
e
dddZ  ZS )InvertedResidualN)inpoupstrider   c                    sN  t    d|  krdks(n td|| _|d }| jdkrh||d> krhtd| d| d| d| jdkrt| j||d| jdd	t|tj||ddd
ddt|tj	dd| _
n
t | _
ttj| jdkr|n||ddd
ddt|tj	dd| j||d| jdd	t|tj||ddd
ddt|tj	dd| _d S )Nr
      zillegal stride valuer   zInvalid combination of stride z, inp z	 and oup zB values. If stride == 1 then inp should be equal to oup // 2 << 1.kernel_sizer)   paddingr   F)r,   r)   r-   biasTZinplace)super__init__
ValueErrorr)   nn
Sequentialdepthwise_convBatchNorm2dConv2dReLUbranch1branch2)selfr'   r(   r)   Zbranch_features	__class__r#   r$   r1   ,   sF    





zInvertedResidual.__init__r
   r   F)ior,   r)   r-   r.   r   c              	   C   s   t j| |||||| dS )N)r.   r   )r3   r7   )r>   r?   r,   r)   r-   r.   r#   r#   r$   r5   V   s    zInvertedResidual.depthwise_convr   r   c                 C   sb   | j dkr6|jddd\}}tj|| |fdd}ntj| || |fdd}t|d}|S )Nr
   r   )Zdim)r)   chunkr   catr:   r9   r%   )r;   r   x1Zx2outr#   r#   r$   forward\   s    

zInvertedResidual.forward)r
   r   F)__name__
__module____qualname__intr1   staticmethodboolr3   r7   r5   r   rE   __classcell__r#   r#   r<   r$   r&   +   s   * r&   c                       sb   e Zd Zdefee ee eedejf dd fddZ	e
e
ddd	Ze
e
dd
dZ  ZS )r   i  .N)stages_repeatsstages_out_channelsnum_classesinverted_residualr   c              
      sd  t    t|  t|dkr&tdt|dkr:td|| _d}| jd }ttj||ddddd	t	|tj
d
d| _|}tjdddd| _|  |  |  dd dD }t||| jdd  D ]R\}}	}|||dg}
t|	d D ]}|
|||d qt| |tj|
  |}q| jd }ttj||ddddd	t	|tj
d
d| _t||| _d S )Nr*   z2expected stages_repeats as list of 3 positive ints   z7expected stages_out_channels as list of 5 positive intsr   r   r
   F)r.   Tr/   r+   c                 S   s   g | ]}d | qS )Zstager#   ).0r>   r#   r#   r$   
<listcomp>       z)ShuffleNetV2.__init__.<locals>.<listcomp>)r   r*      )r0   r1   r	   lenr2   Z_stage_out_channelsr3   r4   r7   r6   r8   conv1Z	MaxPool2dmaxpoolziprangeappendsetattrconv5ZLinearfc)r;   rM   rN   rO   rP   Zinput_channelsZoutput_channelsZstage_namesnameZrepeatsseqr>   r<   r#   r$   r1   i   sB    


 

zShuffleNetV2.__init__r@   c                 C   sX   |  |}| |}| |}| |}| |}| |}|ddg}| |}|S )Nr   r*   )rX   rY   Zstage2Zstage3Zstage4r^   meanr_   r;   r   r#   r#   r$   _forward_impl   s    






zShuffleNetV2._forward_implc                 C   s
   |  |S )N)rd   rc   r#   r#   r$   rE      s    zShuffleNetV2.forward)rF   rG   rH   r&   listrI   r   r3   Moduler1   r   rd   rE   rL   r#   r#   r<   r$   r   h   s   0r   )weightsprogressargskwargsr   c                 O   sL   | d urt |dt| jd  t|i |}| d urH|| j|dd |S )NrO   
categoriesT)rh   Z
check_hash)r   rW   metar   Zload_state_dictZget_state_dict)rg   rh   ri   rj   modelr#   r#   r$   _shufflenetv2   s    rn   )r
   r
   z2https://github.com/ericsun99/Shufflenet-v2-Pytorch)Zmin_sizerk   recipec                
   @   sD   e Zd Zedeeddi edddddid	d
dddZeZdS )r   zDhttps://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth   	crop_sizei ImageNet-1Kg-FN@g9voT@zacc@1zacc@5g{Gz?gT㥛 @VThese weights were trained from scratch to reproduce closely the results of the paper.
num_params_metrics_ops
_file_size_docsurlZ
transformsrl   N	rF   rG   rH   r   r   r   _COMMON_METAIMAGENET1K_V1DEFAULTr#   r#   r#   r$   r      s$   
r   c                
   @   sD   e Zd Zedeeddi edddddid	d
dddZeZdS )r   zBhttps://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pthrp   rq   i" rs   gI+WQ@gNbX9V@rt   g(\?gE!@ru   rv   r|   Nr~   r#   r#   r#   r$   r      s$   
r   c                   @   sH   e Zd Zedeedddi eddddd	d
idddddZeZdS )r   zBhttps://download.pytorch.org/models/shufflenetv2_x1_5-3c479a10.pthrp      rr   Zresize_size+https://github.com/pytorch/vision/pull/5906iv5 rs   g9v?R@g/$V@rt   gl?gw/+@
                These weights were trained from scratch by using TorchVision's `new training recipe
                <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
            ro   rw   rx   ry   rz   r{   r|   Nr~   r#   r#   r#   r$   r      s&   r   c                   @   sH   e Zd Zedeedddi eddddd	d
idddddZeZdS )r   zBhttps://download.pytorch.org/models/shufflenetv2_x2_0-8be3c8ee.pthrp   r   r   r   ip rs   gQS@gMb@W@rt   g-?g+n<@r   r   r|   Nr~   r#   r#   r#   r$   r     s&   r   Z
pretrained)rg   T)rg   rh   )rg   rh   rj   r   c                 K   s(   t | } t| |g dg dfi |S )a  
    Constructs a ShuffleNetV2 architecture with 0.5x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X0_5_Weights
        :members:
    rU      rU   )   0   `         )r   verifyrn   rg   rh   rj   r#   r#   r$   r     s    
r   c                 K   s(   t | } t| |g dg dfi |S )a  
    Constructs a ShuffleNetV2 architecture with 1.0x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X1_0_Weights
        :members:
    r   )r   t   r   i  r   )r   r   rn   r   r#   r#   r$   r   >  s    
r   c                 K   s(   t | } t| |g dg dfi |S )a  
    Constructs a ShuffleNetV2 architecture with 1.5x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X1_5_Weights
        :members:
    r   )r      i`  i  r   )r   r   rn   r   r#   r#   r$   r   ]  s    
r   c                 K   s(   t | } t| |g dg dfi |S )a  
    Constructs a ShuffleNetV2 architecture with 2.0x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X2_0_Weights
        :members:
    r   )r      i  i  i   )r   r   rn   r   r#   r#   r$   r   |  s    
r   ))	functoolsr   typingr   r   r   r   Ztorch.nnr3   r   Ztransforms._presetsr   utilsr	   Z_apir   r   r   Z_metar   _utilsr   r   __all__rI   r%   rf   r&   r   rK   rn   r   r   r   r   r   r   r   r   r   r   r#   r#   r#   r$   <module>   sn   =B



