a
    hnB                  	   @   sz  d dl mZ d dlmZmZmZ d dlZd dlmZ d dlm	Z	 d dl
mZ ddlmZ dd	lmZmZmZ dd
lmZ ddlmZmZ ddlmZmZmZmZ ddlmZmZmZ g dZG dd dej Z!G dd dej"Z#e$e% e$e% ee e&e&ee#dddZ'deddddZ(G dd deZ)G dd deZ*G d d! d!eZ+G d"d# d#eZ,ed$d%ed&d'd( fd)dd*d+d,eee)ef  e&e&ee#d-d.d/Z-ed0d%ed&d1d( fd)dd*d+d,eee*ef  e&e&ee#d-d2d3Z.ed4d%ed&d5d( fd)dd*d+d,eee+ef  e&e&ee#d-d6d7Z/ed8d%ed&d9d( fd)dd*d+d,eee,ef  e&e&ee#d-d:d;Z0dS )<    )partial)AnyOptionalUnionN)Tensor)shufflenetv2   )ImageClassification   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_ovewrite_named_paramhandle_legacy_interface)ShuffleNet_V2_X0_5_WeightsShuffleNet_V2_X1_0_WeightsShuffleNet_V2_X1_5_WeightsShuffleNet_V2_X2_0_Weights   )_fuse_modules_replace_reluquantize_model)	QuantizableShuffleNetV2#ShuffleNet_V2_X0_5_QuantizedWeights#ShuffleNet_V2_X1_0_QuantizedWeights#ShuffleNet_V2_X1_5_QuantizedWeights#ShuffleNet_V2_X2_0_QuantizedWeightsshufflenet_v2_x0_5shufflenet_v2_x1_0shufflenet_v2_x1_5shufflenet_v2_x2_0c                       s6   e Zd Zeedd fddZeedddZ  ZS )QuantizableInvertedResidualNargskwargsreturnc                    s"   t  j|i | tj | _d S N)super__init__nnZ	quantizedZFloatFunctionalcatselfr$   r%   	__class__ Z/var/www/auris/lib/python3.9/site-packages/torchvision/models/quantization/shufflenetv2.pyr)   $   s    z$QuantizableInvertedResidual.__init__xr&   c                 C   sh   | j dkr8|jddd\}}| jj|| |gdd}n | jj| || |gdd}t|d}|S )Nr   r
   )Zdim)Zstridechunkr+   branch2branch1r   Zchannel_shuffle)r-   r3   x1Zx2outr0   r0   r1   forward(   s    
 z#QuantizableInvertedResidual.forward)__name__
__module____qualname__r   r)   r   r9   __classcell__r0   r0   r.   r1   r"   #   s   r"   c                       sL   e Zd Zeedd fddZeedddZdee ddd	d
Z	  Z
S )r   Nr#   c                    s6   t  j|dti| tjj | _tjj | _	d S )NZinverted_residual)
r(   r)   r"   torchZaoZquantizationZ	QuantStubquantZDeQuantStubdequantr,   r.   r0   r1   r)   6   s    z QuantizableShuffleNetV2.__init__r2   c                 C   s"   |  |}| |}| |}|S r'   )r?   Z_forward_implr@   )r-   r3   r0   r0   r1   r9   ;   s    


zQuantizableShuffleNetV2.forward)is_qatr&   c                 C   s   | j  D ].\}}|dv r
|dur
t|g dg|dd q
|  D ]f}t|tu rBt|jj  dkrt|jddgg d	g|dd t|jg dd
dgg dg|dd qBdS )aB  Fuse conv/bn/relu modules in shufflenetv2 model

        Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
        Model is modified in place.

        .. note::
            Note that this operation does not change numerics
            and the model after modification is in floating point
        )Zconv1Zconv5N)012T)Zinplacer   rB   rC   )rD   34rE   rF   )567)	Z_modulesitemsr   modulestyper"   lenr6   r5   )r-   rA   namemr0   r0   r1   
fuse_modelA   s    
z"QuantizableShuffleNetV2.fuse_model)N)r:   r;   r<   r   r)   r   r9   r   boolrP   r=   r0   r0   r.   r1   r   4   s   r   )stages_repeatsstages_out_channelsweightsprogressquantizer%   r&   c                K   s   |d ur:t |dt|jd  d|jv r:t |d|jd  |dd}t| |fi |}t| |rnt|| |d ur||j|dd |S )NZnum_classes
categoriesbackendfbgemmT)rU   Z
check_hash)	r   rM   metapopr   r   r   Zload_state_dictZget_state_dict)rR   rS   rT   rU   rV   r%   rX   modelr0   r0   r1   _shufflenetv2Z   s    	

r]   )r   r   rY   zdhttps://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-modelsz
        These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
        weights listed below.
    )Zmin_sizerW   rX   recipeZ_docsc                
   @   sF   e Zd Zedeeddi edejddddid	d
ddZ	e	Z
dS )r   zShttps://download.pytorch.org/models/quantized/shufflenetv2_x0.5_fbgemm-00845098.pth   	crop_sizei ImageNet-1Kg#~jL@gRS@zacc@1zacc@5g{Gz?gjt?
num_paramsunquantized_metrics_ops
_file_sizeurlZ
transformsrZ   N)r:   r;   r<   r   r   r	   _COMMON_METAr   IMAGENET1K_V1IMAGENET1K_FBGEMM_V1DEFAULTr0   r0   r0   r1   r      s$   
r   c                
   @   sF   e Zd Zedeeddi edejddddid	d
ddZ	e	Z
dS )r   zQhttps://download.pytorch.org/models/quantized/shufflenetv2_x1_fbgemm-1e62bb32.pthr_   r`   i" rb   gףp=
Q@gh|?U@rc   g(\?gy&1@rd   rj   N)r:   r;   r<   r   r   r	   rl   r   rm   rn   ro   r0   r0   r0   r1   r      s$   
r   c                   @   sJ   e Zd Zedeedddi eddejddd	d
iddddZ	e	Z
dS )r   zShttps://download.pytorch.org/models/quantized/shufflenetv2_x1_5_fbgemm-d7401f05.pthr_      ra   Zresize_size+https://github.com/pytorch/vision/pull/5906iv5 rb   gSR@g̬V@rc   gl?gK7A`@r^   re   rf   rg   rh   ri   rj   N)r:   r;   r<   r   r   r	   rl   r   rm   rn   ro   r0   r0   r0   r1   r      s&   r   c                   @   sJ   e Zd Zedeedddi eddejddd	d
iddddZ	e	Z
dS )r   zShttps://download.pytorch.org/models/quantized/shufflenetv2_x2_0_fbgemm-5cac526c.pthr_   rp   rq   rr   ip rb   g-R@gZd;W@rc   g-?g|?5@rs   rj   N)r:   r;   r<   r   r   r	   rl   r   rm   rn   ro   r0   r0   r0   r1   r      s&   r   Zquantized_shufflenet_v2_x0_5)rN   Z
pretrainedc                 C   s   |  ddrtjS tjS NrV   F)getr   rn   r   rm   r%   r0   r0   r1   <lambda>   s    
rw   )rT   TFrT   rU   rV   )rT   rU   rV   r%   r&   c                 K   s4   |rt nt| } tg dg df| ||d|S )aQ  
    Constructs a ShuffleNetV2 with 0.5x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr.
            Default is True.
        quantize (bool, optional): If True, return a quantized version of the model.
            Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ShuffleNet_V2_X0_5_Weights
        :members:
        :noindex:
          rz   )   0   `         rx   )r   r   verifyr]   rT   rU   rV   r%   r0   r0   r1   r      s    2r   Zquantized_shufflenet_v2_x1_0c                 C   s   |  ddrtjS tjS rt   )ru   r   rn   r   rm   rv   r0   r0   r1   rw     s    
c                 K   s4   |rt nt| } tg dg df| ||d|S )aQ  
    Constructs a ShuffleNetV2 with 1.0x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr.
            Default is True.
        quantize (bool, optional): If True, return a quantized version of the model.
            Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ShuffleNet_V2_X1_0_Weights
        :members:
        :noindex:
    ry   )r|   t   rp   i  r   rx   )r   r   r   r]   r   r0   r0   r1   r     s    2r   Zquantized_shufflenet_v2_x1_5c                 C   s   |  ddrtjS tjS rt   )ru   r   rn   r   rm   rv   r0   r0   r1   rw   J  s    
c                 K   s4   |rt nt| } tg dg df| ||d|S )aQ  
    Constructs a ShuffleNetV2 with 1.5x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr.
            Default is True.
        quantize (bool, optional): If True, return a quantized version of the model.
            Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ShuffleNet_V2_X1_5_Weights
        :members:
        :noindex:
    ry   )r|      i`  i  r   rx   )r   r   r   r]   r   r0   r0   r1   r    F  s    2r    Zquantized_shufflenet_v2_x2_0c                 C   s   |  ddrtjS tjS rt   )ru   r   rn   r   rm   rv   r0   r0   r1   rw     s    
c                 K   s4   |rt nt| } tg dg df| ||d|S )aQ  
    Constructs a ShuffleNetV2 with 2.0x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr.
            Default is True.
        quantize (bool, optional): If True, return a quantized version of the model.
            Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ShuffleNet_V2_X2_0_Weights
        :members:
        :noindex:
    ry   )r|      i  i  i   rx   )r   r   r   r]   r   r0   r0   r1   r!   ~  s    2r!   )1	functoolsr   typingr   r   r   r>   Ztorch.nnr*   r   Ztorchvision.modelsr   Ztransforms._presetsr	   Z_apir   r   r   Z_metar   _utilsr   r   r   r   r   r   utilsr   r   r   __all__ZInvertedResidualr"   ZShuffleNetV2r   listintrQ   r]   rl   r   r   r   r   r   r   r    r!   r0   r0   r0   r1   <module>   s   '---