o
    Zh(                     @   s  U d dl Z d dlmZmZmZmZ d dlm  m  m	Z
 d dlmZ d dlmZmZmZ g dZdd Zdd Zd	d
 Zdd Zdd Zi ejejfeejejejfeejejfeejejejfeejejfeejejejfeejejfee
jejejfee
jejejfee
jej ejfeej ejfee
j!ejejfee
j"ejejfee
j#ej$ejfeej%ejfeej&ejfeZ'e(e)eej*ef f e+d< dddZ,dd Z-dd Z.dd Z/dede(eeej*ef f fddZ0dS )    N)AnyCallableOptionalUnion)get_combined_dictMatchAllNodePattern)fuse_conv_bnfuse_conv_bn_relufuse_linear_bnfuse_convtranspose_bnget_fuser_methodget_fuser_method_newc                 C   s   |j |j ks
J dtjtjtjtjtjtji}| rL|j	|j
ks$J d|js+J d|js2J d|t|d}|durC|||S td||f tj||S )a  Return the fused the conv and bn modules.
    Given the conv and bn modules, fuses them and returns the fused module

    Args:
        is_qat: a flag for whether we are using quantization aware training fusion
        or post training quantization fusion
        conv: Module instance of type conv2d/conv3d
        bn: Spatial BN instance that needs to be fused with the conv

    Examples::

        >>> m1 = nn.Conv2d(10, 20, 3)
        >>> b1 = nn.BatchNorm2d(20)
        >>> # xdoctest: +SKIP
        >>> m2 = fuse_conv_bn(m1, b1)
    :Conv and BN both must be in the same mode (train or eval).z?Output channel of Conv2d must match num_features of BatchNorm2dz7Only support fusing BatchNorm2d with affine set to TruezGOnly support fusing BatchNorm2d with tracking_running_stats set to TrueNCannot fuse train modules: )trainingnnConv1dnniZConvBn1dConv2dZConvBn2dConv3dZConvBn3dnum_featuresout_channelsaffinetrack_running_statsgettypeNotImplementedErrorutilsfuse_conv_bn_eval)is_qatconvbnZfused_module_class_mapZfused_module_class r#   Z/var/www/auris/lib/python3.10/site-packages/torch/ao/quantization/fuser_method_mappings.pyr	      s*   
r	   c                 C   s  |j |j   kr|j ksJ d J dd}| r[tjtjtjtjtjtji}|j	|j
ks1J d|js8J d|js?J d|t|d}|durQ||||S td|||f tjtjtjtjtjtji}|t|d}|durtjj||}|||S td|||f )aJ  Return the fused conv and bv modules.

    Given the conv and bn modules, fuses them and returns the fused module

    Args:
        is_qat: a flag for whether we are using quantization aware training fusion
        or post training quantization fusion
        conv: Module instance of type conv2d/conv3d
        bn: Spatial BN instance that needs to be fused with the conv

    Examples::

        >>> m1 = nn.Conv2d(10, 20, 3)
        >>> b1 = nn.BatchNorm2d(20)
        >>> r1 = nn.ReLU(inplace=False)
        >>> # xdoctest: +SKIP
        >>> m2 = fuse_conv_bn_relu(m1, b1, r1)
    r   Nz;Output channel of Conv must match num_features of BatchNormz5Only support fusing BatchNorm with affine set to TruezEOnly support fusing BatchNorm with tracking_running_stats set to Truer   zCannot fuse eval modules: )r   r   r   r   ZConvBnReLU1dr   ZConvBnReLU2dr   ZConvBnReLU3dr   r   r   r   r   r   r   
ConvReLU1d
ConvReLU2d
ConvReLU3dr   fusionr   )r    r!   r"   ZreluZfused_moduleZmap_to_fused_module_trainZmap_to_fused_module_evalZ
fused_convr#   r#   r$   r
   @   s@   
r
   c                 C   sd   |j |j ks
J d| r*|j|jksJ d|jsJ d|js$J dt||S tjj	
||S )a  Return the fused linear and bn modules.
    Given the linear and bn modules, fuses them and returns the fused module

    Args:
        is_qat: a flag for whether we are using quantization aware training fusion
        or post training quantization fusion
        linear: Module instance of type Linear
        bn: BatchNorm1d instance that needs to be fused with the linear layer

    Examples::

        >>> m1 = nn.Linear(20, 10)
        >>> b1 = nn.BatchNorm1d(10)
        >>> # xdoctest: +SKIP
        >>> m2 = fuse_linear_bn(m1, b1)
    z<Linear and BN both must be in the same mode (train or eval).z@Output features of Linear must match num_features of BatchNorm1dz7Only support fusing BatchNorm1d with affine set to TruezGOnly support fusing BatchNorm1d with tracking_running_stats set to True)r   r   Zout_featuresr   r   r   Z
LinearBn1dr   r   r(   Zfuse_linear_bn_eval)r    Zlinearr"   r#   r#   r$   r   w   s   r   c                 C   s4   |j |j ks
J d| rtdtjjj||ddS )a  Return the fused ConvTranspose and bn modules.
    Given ConvTranspose and bn modules, fuses them and returns the fused module

    Args:
        convt: Module instance of type ConvTransposeNd
        bn: BatchNormNd instance that needs to be fused with the linear layer.
            batch norm N should match the ConvTranspose N

    Examples::

        >>> m1 = nn.ConvTranspose2d(10, 20, 3)
        >>> b1 = nn.BatchNorm2d(20)
        >>> # xdoctest: +SKIP
        >>> m2 = fuse_convtranspose_bn(m1, b1)
    zCConvTranspose and BN both must be in the same mode (train or eval).z8Fusing ConvTranspose+BatchNorm not yet supported in QAT.T)Z	transpose)r   	Exceptionr   r   r(   r   )r    Zconvtr"   r#   r#   r$   r      s   r   c                        fdd}|S )a!  Return a sequential wrapped that for is_qat and two modules.
    Given a sequential class for two modules, return a function that takes
    is_qat, and then two modules as argument, that ignores the is_qat flag
    and always returns the sequential that combines the two input modules
    c                    s
    ||S Nr#   )r    m1m2
sequentialr#   r$   fuser_method   s   
z*_sequential_wrapper2.<locals>.fuser_methodr#   )r/   r0   r#   r.   r$   _sequential_wrapper2   s   r1    _DEFAULT_OP_LIST_TO_FUSER_METHODc                 C   s>   |du ri }t t|}|| d}|dusJ d|  d|S )zGet fuser method for the given list of module types.

    Get fuser method for the given list of module types,
    return None if fuser method does not exist
    Ndid not find fuser method for:  )r   r2   r   )op_listZadditional_fuser_method_mappingZall_mappingsr0   r#   r#   r$   r      s   r   c                    r*   )Nc                    s    | ||S r+   r#   )r    xyfr#   r$   reversed   s   z_reverse2.<locals>.reversedr#   r9   r:   r#   r8   r$   	_reverse2   s   r<   c                    r*   )Nc                    s   |\}} | |||S r+   r#   )r    r6   wr7   zr8   r#   r$   r:      s   z_reverse3.<locals>.reversedr#   r;   r#   r8   r$   	_reverse3   s   r?   c                 C   s:   t | ttfrdd | D }ttj| }|S | tg}|S )aQ  Return a list of valid patterns generated from the op_pattern.

    Returns a list of valid patterns generated from the op_pattern,
    since MatchAllNode can match all types of nodes,
    e.g. pattern (torch.nn.Conv2d, torch.add) should also be able to match keys like
    (MatchAllNode, torch.add) and (torch.nn.Conv2d, MatchAllNode)

    Example Input:
    (torch.add, (torch.nn.ReLU, torch.nn.Conv2d))

    Example Output:
    [(torch.add, (torch.nn.ReLU, torch.nn.Conv2d)),
     (torch.add, (torch.nn.ReLU, MatchAllNode)),
     (torch.add, (MatchAllNode, torch.nn.Conv2d)),
     (torch.add, (MatchAllNode, MatchAllNode)),
     (MatchAllNode, (torch.nn.ReLU, torch.nn.Conv2d)),
     (MatchAllNode, (torch.nn.ReLU, MatchAllNode)),
     (MatchAllNode, (MatchAllNode, torch.nn.Conv2d)),
     (MatchAllNode, (MatchAllNode, MatchAllNode)),
    ]
    c                 S   s   g | ]}t |qS r#   )_get_valid_patterns).0Zsub_patternr#   r#   r$   
<listcomp>  s    z'_get_valid_patterns.<locals>.<listcomp>)
isinstancetuplelist	itertoolsproductr   )
op_patternZ	sub_combsresultr#   r#   r$   r@      s   r@   rH   fuser_method_mappingc                 C   sJ   t | }d}|D ]} || d}|dur nq|dus#J d|  d|S )zGet fuser method.

    This will be made default after we deprecate the get_fuser_method
    Would like to implement this first and have a separate PR for deprecation
    Nr3   r4   )r@   r   )rH   rJ   Zop_patternsr0   r#   r#   r$   r     s   	r   r+   )1rF   typingr   r   r   r   Ztorch.ao.nn.intrinsicZaor   Z	intrinsicr   Ztorch.nnZtorch.ao.quantization.utilsr   r   r   __all__r	   r
   r   r   r1   r   ZBatchNorm1dZReLUr   ZBatchNorm2dr   ZBatchNorm3dr%   r&   r'   ZLinearZ
LinearReLUZBNReLU2dZBNReLU3dZConvTranspose1dZConvTranspose2dZConvTranspose3dr2   dictrD   Z
Sequential__annotations__r   r<   r?   r@   r   r#   r#   r#   r$   <module>   sj   

,7"	

