a
    hR                     @   sn  d dl mZmZmZ d dlZd dlmZ ddlmZmZm	Z	m
Z
mZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZ ddgZG dd deZd	d
e de de de de
 de d e_ee ee ee ee ee ee ee ee eeee ef ee ef ee ef e e eeeedddZ!ee ee ee ee ee ee ee ee eeee ef ee ef ee ef e e eeeedddZ"ee ee ee ee ee ee ee ee eee e ee ef e e eeeeddddZ#ee!ddee ee ee ee ee ee ee eeee ee ee eeee e ee ef e e edddZ$dS )    )castOptionalUnionN)Tensor   )_capturable_doc_default_to_fused_or_foreach_device_dtype_check_for_fused_differentiable_doc_disable_dynamo_if_unsupported_foreach_doc
_fused_doc!_get_capturable_supported_devices_get_scalar_dtype
_get_value_maximize_doc_params_doc_stack_if_compiling
_to_scalar_use_grad_for_differentiable_view_as_real
DeviceDictDeviceDtypeDict	OptimizerParamsTAdamadamc                       s   e Zd Zddddddddeeeef eeeef eeef f eeee	e eeee	e ed fd	d
Z
 fddZdd ZedddZ  ZS )r   MbP?g?g+?:0yE>r   FN)foreachmaximize
capturabledifferentiablefuseddecoupled_weight_decay)paramslrbetasepsweight_decayamsgradr    r!   r"   r#   r$   r%   c                   s  t |tr.|r|	std| dkr.tdd|ksDtd| d|ksZtd| d|d   krrdk sn td	|d  d|d   krdk sn td
|d  d|kstd| t |d trt |d tst |d trt |d tstdt |d trL|	s2|r2td|d  dkrLtdt |d tr|	sp|rptd|d  dkrtdt||||||||	|
||d}t || |r|
rtdd| _	|rtdd S )NElr as a Tensor is not supported for capturable=False and foreach=Truer   Tensor lr must be 1-element        zInvalid learning rate: zInvalid epsilon value: r         ?z#Invalid beta parameter at index 0: z#Invalid beta parameter at index 1: zInvalid weight_decay value: z0betas must be either both floats or both TensorszKbetas[0] as a Tensor is not supported for capturable=False and foreach=Truez!Tensor betas[0] must be 1-elementzKbetas[1] as a Tensor is not supported for capturable=False and foreach=Truez!Tensor betas[1] must be 1-element)r'   r(   r)   r*   r+   r!   r    r"   r#   r$   r%   z)`fused` does not support `differentiable`Tz0`fused` and `foreach` cannot be `True` together.)

isinstancer   
ValueErrornumelfloatdictsuper__init__RuntimeErrorZ_step_supports_amp_scaling)selfr&   r'   r(   r)   r*   r+   r    r!   r"   r#   r$   r%   defaults	__class__ >/var/www/auris/lib/python3.9/site-packages/torch/optim/adam.pyr6   #   sx    
zAdam.__init__c                    s   t  | | jD ]}|dd |dd |dd  |dd |dd |dd |dd }|d	 D ]t}| j|g }t|d
krrt|d srt	|d }|d s|d rtj
|t|d|jdntj
|t d|d< qrqd S )Nr+   Fr!   r    r"   r#   r%   r$   r&   r   stepZis_fuseddtypedevicerA   )r5   __setstate__param_groups
setdefaultstategetlentorch	is_tensorr3   tensorr   rB   )r8   rG   groupr$   pZp_stateZstep_valr:   r<   r=   rD   r   s.    
zAdam.__setstate__c                 C   s  d}|d D ]x}	|	j d ur|t|	O }||	 |	j jrDtd||	j  | j|	 }
t|
dkr|d rvt|	 |d s|d rtj	dt
|d d|	jd	ntjd
t
 d|
d< tj|	tjd|
d< tj|	tjd|
d< |d rtj|	tjd|
d< ||
d  ||
d  |d r.||
d  |d rL|
d jrLtd|d rxt|d rx|d sxtd||
d  q|S )NFr&   zJAdam does not support sparse gradients, please consider SparseAdam insteadr   r$   r"   r<   r?   r@   r.   rC   r>   )Zmemory_formatexp_avg
exp_avg_sqr+   max_exp_avg_sqr#   zB`requires_grad` is not supported for `step` in differentiable moder    r'   r,   )gradrJ   
is_complexappendZ	is_sparser7   rG   rI   r	   zerosr   rB   rL   Z
zeros_likeZpreserve_formatrequires_gradrK   )r8   rM   params_with_gradgradsexp_avgsexp_avg_sqsmax_exp_avg_sqsstate_stepshas_complexrN   rG   r<   r<   r=   _init_group   sj    









zAdam._init_groupc                 C   s   |    d}|durBt  | }W d   n1 s80    Y  | jD ]}g }g }g }g }g }g }	|d \}
}| |||||||	}t||||||	|d ||
||d |d |d |d |d |d	 |d
 |d t| ddt| dd|d d qH|S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr(   r+   r'   r*   r)   r!   r    r"   r#   r$   
grad_scale	found_infr%   )r+   r]   beta1beta2r'   r*   r)   r!   r    r"   r#   r$   r_   r`   r%   )Z _cuda_graph_capture_health_checkrJ   Zenable_gradrE   r^   r   getattr)r8   closureZlossrM   rW   rX   rY   rZ   r[   r\   ra   rb   r]   r<   r<   r=   r>      s\    
$



z	Adam.step)r   r   r   r   F)N)__name__
__module____qualname__r   r   r3   r   tupleboolr   r6   rD   r^   r   r>   __classcell__r<   r<   r:   r=   r   "   s<        	
OKaf  Implements Adam algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \beta_1, \beta_2
                \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)}          \\
            &\hspace{13mm}      \lambda \text{ (weight decay)},  \: \textit{amsgrad},
                \:\textit{maximize},  \: \epsilon \text{ (epsilon)}                              \\
            &\textbf{initialize} :  m_0 \leftarrow 0 \text{ ( first moment)},
                v_0\leftarrow 0 \text{ (second moment)},\: v_0^{max}\leftarrow 0          \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\

            &\hspace{5mm}\textbf{if} \: \textit{maximize}:                                       \\
            &\hspace{10mm}g_t           \leftarrow   -\nabla_{\theta} f_t (\theta_{t-1})         \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})          \\
            &\hspace{5mm}\textbf{if} \: \lambda \neq 0                                           \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{5mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{5mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{5mm}\textbf{if} \: amsgrad                                                  \\
            &\hspace{10mm} v_t^{max} \leftarrow \mathrm{max}(v_{t-1}^{max},v_t)                  \\
            &\hspace{10mm}\widehat{v_t} \leftarrow v_t^{max}/\big(1-\beta_2^t \big)              \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}\widehat{v_t} \leftarrow   v_t/\big(1-\beta_2^t \big)                  \\
            &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}} + \epsilon \big)                                       \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
    z
    Args:
        a  
        lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR
            is not yet supported for all our implementations. Please use a float
            LR if you are not also specifying fused=True or capturable=True.
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        decoupled_weight_decay (bool, optional): if True, this optimizer is
            equivalent to AdamW and the algorithm will not accumulate weight
            decay in the momentum nor variance. (default: False)
        amsgrad (bool, optional): whether to use the AMSGrad variant of this
            algorithm from the paper `On the Convergence of Adam and Beyond`_
            (default: False)
        z	
        a=  
    .. Note::
        A prototype implementation of Adam and AdamW for MPS supports `torch.float32` and `torch.float16`.
    .. _Adam\: A Method for Stochastic Optimization:
        https://arxiv.org/abs/1412.6980
    .. _On the Convergence of Adam and Beyond:
        https://openreview.net/forum?id=ryQu7f-RZ

    )r&   rX   rY   rZ   r[   r\   r_   r`   r+   r]   ra   rb   r'   r*   r)   r!   r"   r#   r%   c          '      C   sx  |d u r|d u sJ t j rJt|ts,J t|
ts:J t|tsRJ nt|}t|
trn|
j|
jf|
i}nd }t	| D ]\}}|s|| n||  }|| }|| }|| }t j
 s|rt }|jj|jjkr|jj|v sJ d| d|d7 }|dkrh|r|d||   nJ|rZt|trZ|jrJ|| |}n|j||d}n|j||d}t |rt |}t |}t |}|rt || ||< t |}|j}|d ur|j}||f}||vr|
j||dd||< || }n|
}||d|  |r^t|tr^|jrB|jt |d| d n||j||d| d	 n||j||d| d	 |s|r|}|rt|
tr|
jrd|
|   } nd|
|  } nd|
|  } |rt|tr|jrd||   }!nd||  }!nd||  }!||  }"|" }#|! }$|r|rF||  }%n|| }%|| t |%| ||  |$|#  ||# }&n| |$|#  ||# }&|r|| |& n|||& nt|}d|
|  } d||  }!||  }"|!d
 }$|r*t j|| ||| d ||  |$ |}&n| |$ |}&|j||&|" d	 |rzt | | rzt || ||< qzd S )NIIf capturable=True, params and state_steps must be on supported devices: .r   r   alphaT)rB   rA   non_blocking)Zweight)value      ?)out) rJ   jitis_scriptingr0   r3   r   r   rB   rA   	enumeratecompileris_compilingr   typeZmul_rV   Zaddcmul_cloneaddrS   Zview_as_realtoZlerp_ZsquarenegsqrtZcopy_maximumZadd_Zaddcdiv_r   Zview_as_complex)'r&   rX   rY   rZ   r[   r\   r_   r`   r+   r]   ra   rb   r'   r*   r)   r!   r"   r#   r%   
beta1_dictiparamrR   rO   rP   Zstep_tcapturable_supported_devicesrB   rA   keydevice_beta1r>   bias_correction1bias_correction2	step_sizeZstep_size_negbias_correction2_sqrtrQ   Zdenomr<   r<   r=   _single_tensor_adamY  s    












	r   c          +         s  t | dkrd S ttr:|s&td dkr:tdt trd|sPtd  dkrdtdttr|sztd dkrtdtj s|rt	d	d
t
fddt| |D sJ d d|d u r|d u sJ |rJ dtt| |||||g}t tr6t jdkr6 j ind }| D ]P\\}}}}}}}ttt |}ttt |}ttt |}ttt |}ttt |} |d j}!|d ur|!|vrΈ j|!dd||!< |r||! n }"|	r|rttt |}#t|||||# nt|||| |r*t|}tj s^| d jr^tj| tjddddd nt| d |dkr|rt|d|   n(|rtj|||d ntj|||d}t||d|"  t| ttjrt|d }$d}%n|}$d }%t||$||% ~~$|rt | }&t| }'t |&d t |'d t!|' t"|& t#|& t$|' |&}(|'})|rttt |}#t%|#| t&|#}*n
t&|}*t"|*|) t|*| t"|*|( t'|||* n fdd| D }&fdd| D }'t(fdd|&D }(dd |'D })|r`ttt |}#t%|#| t&|#}*n
t&|}*t"|*|) t|*| t'|||*|( qBd S )Nr   r,   r   r-   zHbeta1 as a Tensor is not supported for capturable=False and foreach=TruezTensor beta1 must be 1-elementzHbeta2 as a Tensor is not supported for capturable=False and foreach=TruezTensor beta2 must be 1-elementF)Zsupports_xlac                 3   s.   | ]&\}}|j j|j jko$|j j v V  qd S N)rB   rx   ).0rN   r>   )r   r<   r=   	<genexpr>T  s   z%_multi_tensor_adam.<locals>.<genexpr>rk   rl   z#_foreach ops don't support autogradcpuTrB   ro   r/   )rB   rm   c                    s   g | ]}d  t |  qS r   r   r   r>   )ra   r<   r=   
<listcomp>  s   z&_multi_tensor_adam.<locals>.<listcomp>c                    s   g | ]}d  t |  qS r   r   r   )rb   r<   r=   r     s   c                    s   g | ]} | d  qS )r<   r   Zbc)r'   r<   r=   r         c                 S   s   g | ]}|d  qS )rq   r<   r   r<   r<   r=   r     r   ))rI   r0   r   r7   r2   r1   rJ   rv   rw   r   allzipr   r   "_group_tensors_by_device_and_dtypestrrB   valuesr   listr{   r   Z_foreach_negZis_cpu_foreach_add_rL   Z_foreach_mul_Z_foreach_addZ_foreach_lerp_Z_foreach_mulZ_foreach_addcmul_Z_foreach_pow_foreach_sub_Z_foreach_neg_Z_foreach_div_Z_foreach_reciprocal_Z_foreach_sqrt_Z_foreach_maximum_Z_foreach_sqrtZ_foreach_addcdiv_r   )+r&   rX   rY   rZ   r[   r\   r_   r`   r+   r]   ra   rb   r'   r*   r)   r!   r"   r#   r%   grouped_tensorsr   device_params_device_grads_device_exp_avgs_device_exp_avg_sqs_Zdevice_max_exp_avg_sqs_device_state_steps__device_paramsdevice_gradsdevice_exp_avgsdevice_exp_avg_sqsdevice_state_stepsrB   r   device_max_exp_avg_sqsZscaled_device_gradsrp   r   r   r   r   Zexp_avg_sq_sqrtr<   )ra   rb   r   r'   r=   _multi_tensor_adam  s   















r   )r&   rX   rY   rZ   r[   r\   r_   r`   r+   r]   ra   rb   r'   r*   r)   r!   r"   r#   r%   returnc          '      C   s  | sd S |rt d|d ur&|j|ini }|d ur<|j|ini }t|trbt|jdkrb|j|ind }t| |||||g}| D ]:\\}}\\}}}}}}}tt	t |}tt	t |} tt	t |}!tt	t |}"tt	t |}#d\}$}%|d ur|
||j|dd}$|d ur0|
||j|dd}%|d ur^||vr^|j|dd||< || }t|#d |svtjntj}&|&|| |!|"||#|||
|||||$|%d |%d urt|#|%gt|#  qd S )	Nz9Adam with fused=True does not support differentiable=Truer   )NNT)ro   r   r   )	r+   r'   ra   rb   r*   r)   r!   r_   r`   )r7   rB   r0   r   r   r   r   itemsr   r   rF   r{   rJ   r   Z_fused_adam_Z_fused_adamw_r   rI   )'r&   rX   rY   rZ   r[   r\   r_   r`   r+   r]   ra   rb   r'   r*   r)   r!   r"   r#   r%   Zgrad_scale_dictZfound_inf_dictZlr_dictr   rB   r   r   r   r   r   r   r   r   r   r   r   r   Zdevice_grad_scaleZdevice_found_inffuncr<   r<   r=   _fused_adam  s~    $

r   )Zsingle_tensor_fnF)r&   rX   rY   rZ   r[   r\   r    r"   r#   r$   r_   r`   r]   r%   r+   ra   rb   r'   r*   r)   r!   c                C   s   |	du r8|du r8t | |dd\}}|r8t|tr8|s8d}|	du rDd}	|du rPd}tj sttdd |D sttd|rtj	 rtd|	rtj	 rtd|	rtj	 st
}n|rtj	 st}nt}|| ||||||||||||||||
||d	 dS )
znFunctional API that performs Adam algorithm computation.

    See :class:`~torch.optim.Adam` for details.
    NF)Z	use_fusedc                 s   s   | ]}t |tjV  qd S r   )r0   rJ   r   )r   tr<   r<   r=   r     s   zadam.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsz6torch.jit.script not supported with foreach optimizersz4torch.jit.script not supported with fused optimizers)r+   r]   ra   rb   r'   r*   r)   r!   r"   r#   r_   r`   r%   )r   r0   r   rJ   rv   rw   r   r7   rs   rt   r   r   r   )r&   rX   rY   rZ   r[   r\   r    r"   r#   r$   r_   r`   r]   r%   r+   ra   rb   r'   r*   r)   r!   r   r   r<   r<   r=   r   q  s\    #
)NFFNNNFF)%typingr   r   r   rJ   r   Z	optimizerr   r   r	   r
   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   __all__r   __doc__r   ri   r3   r   r   r   r   r<   r<   r<   r=   <module>   s   X r%H


 G


 u
`
        
