o
    wZh                  	   @   s   d Z ddlmZmZ ddlmZ ddlmZmZ ddl	m
Z
 ddlmZmZ g dZG dd	 d	Zed
e
dZddedededefddZddededefddZdS )z;Weight Normalization from https://arxiv.org/abs/1602.07868.    )AnyTypeVar)
deprecated)_weight_normnorm_except_dim)Module)	ParameterUninitializedParameter)
WeightNormweight_normremove_weight_normc                   @   s   e Zd ZU eed< eed< dededdfddZdedefdd	Z	e
ed
eddededd fddZdeddfddZdededdfddZdS )r
   namedimreturnNc                 C   s   |d u rd}|| _ || _d S )N)r   r   )selfr   r    r   I/var/www/auris/lib/python3.10/site-packages/torch/nn/utils/weight_norm.py__init__   s   
zWeightNorm.__init__modulec                 C   s.   t || jd }t || jd }t||| jS N_g_v)getattrr   r   r   )r   r   gvr   r   r   compute_weight   s   zWeightNorm.compute_weightze`torch.nn.utils.weight_norm` is deprecated in favor of `torch.nn.utils.parametrizations.weight_norm`.)categoryc                 C   s   | j  D ]}t|tr|j|krtd| q|d u rd}t||}t| |}t|tr2td| j	|= | 
|d tt|d|j | 
|d t|j t| |||  | | |S )Nz<Cannot register two weight_norm hooks on the same parameter r   zThe module passed to `WeightNorm` can't have uninitialized parameters. Make sure to run the dummy forward before applying weight normalizationr      r   )_forward_pre_hooksvalues
isinstancer
   r   RuntimeErrorr   r	   
ValueError_parametersZregister_parameterr   r   datasetattrr   Zregister_forward_pre_hook)r   r   r   hookfnweightr   r   r   apply   s,   



zWeightNorm.applyc                 C   sJ   |  |}t|| j |j| jd = |j| jd = t|| jt|j d S r   )r   delattrr   r$   r&   r   r%   )r   r   r)   r   r   r   removeE   s
   
zWeightNorm.removeinputsc                 C   s   t || j| | d S )N)r&   r   r   )r   r   r-   r   r   r   __call__L   s   zWeightNorm.__call__)__name__
__module____qualname__str__annotations__intr   r   r   r   staticmethodr   FutureWarningr*   r,   r.   r   r   r   r   r
      s   
 !r
   T_module)boundr)   r   r   r   r   c                 C   s   t | || | S )aE	  Apply weight normalization to a parameter in the given module.

    .. math::
         \mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|}

    Weight normalization is a reparameterization that decouples the magnitude
    of a weight tensor from its direction. This replaces the parameter specified
    by :attr:`name` (e.g. ``'weight'``) with two parameters: one specifying the magnitude
    (e.g. ``'weight_g'``) and one specifying the direction (e.g. ``'weight_v'``).
    Weight normalization is implemented via a hook that recomputes the weight
    tensor from the magnitude and direction before every :meth:`~Module.forward`
    call.

    By default, with ``dim=0``, the norm is computed independently per output
    channel/plane. To compute a norm over the entire weight tensor, use
    ``dim=None``.

    See https://arxiv.org/abs/1602.07868

    .. warning::

        This function is deprecated.  Use :func:`torch.nn.utils.parametrizations.weight_norm`
        which uses the modern parametrization API.  The new ``weight_norm`` is compatible
        with ``state_dict`` generated from old ``weight_norm``.

        Migration guide:

        * The magnitude (``weight_g``) and direction (``weight_v``) are now expressed
          as ``parametrizations.weight.original0`` and ``parametrizations.weight.original1``
          respectively.  If this is bothering you, please comment on
          https://github.com/pytorch/pytorch/issues/102999

        * To remove the weight normalization reparametrization, use
          :func:`torch.nn.utils.parametrize.remove_parametrizations`.

        * The weight is no longer recomputed once at module forward; instead, it will
          be recomputed on every access.  To restore the old behavior, use
          :func:`torch.nn.utils.parametrize.cached` before invoking the module
          in question.

    Args:
        module (Module): containing module
        name (str, optional): name of weight parameter
        dim (int, optional): dimension over which to compute the norm

    Returns:
        The original module with the weight norm hook

    Example::

        >>> m = weight_norm(nn.Linear(20, 40), name='weight')
        >>> m
        Linear(in_features=20, out_features=40, bias=True)
        >>> m.weight_g.size()
        torch.Size([40, 1])
        >>> m.weight_v.size()
        torch.Size([40, 20])

    )r
   r*   )r   r   r   r   r   r   r   S   s   <r   c                 C   sV   | j  D ]\}}t|tr |j|kr ||  | j |= |   S qtd| d|  )a  Remove the weight normalization reparameterization from a module.

    Args:
        module (Module): containing module
        name (str, optional): name of weight parameter

    Example:
        >>> m = weight_norm(nn.Linear(20, 40))
        >>> remove_weight_norm(m)
    zweight_norm of 'z' not found in )r   itemsr!   r
   r   r,   r#   )r   r   kr'   r   r   r   r      s   
r   N)r)   r   )r)   )__doc__typingr   r   Ztyping_extensionsr   Ztorchr   r   Ztorch.nn.modulesr   Ztorch.nn.parameterr   r	   __all__r
   r7   r2   r4   r   r   r   r   r   r   <module>   s   B@