o
    wZh@                     @   s  d dl mZmZmZmZ d dlZd dlmZ ddlmZm	Z	m
Z
mZmZmZmZmZmZmZmZmZmZ ddgZG dd deZd	d
e de de de de
 d e_dee dee dee dee dee dedededededededefddZdee dee dee dee dee dedededededededefddZeed	 		 	 d#dee dee dee dee dee ded!ee dededededededefd"dZdS )$    )AnycastOptionalUnionN)Tensor   )_capturable_doc_default_to_fused_or_foreach_differentiable_doc_disable_dynamo_if_unsupported_foreach_doc!_get_capturable_supported_devices_get_scalar_dtype_maximize_doc_params_doc_use_grad_for_differentiable_view_as_real	OptimizerParamsTAdadeltaadadeltac                       s   e Zd Z					dddddded	eeef d
edededee dededef fddZ	 fddZ
deeef dee dee dee dee dee fddZed ddZ  ZS )!r         ??ư>r   NF)
capturablemaximizedifferentiableparamslrrhoepsweight_decayforeachr   r   r   c             
      s   t |tr| dkrtdd|kstd| d|  kr$dks,n td| d|ks7td| d|ksBtd| t||||||||	d	}
t ||
 d S )
Nr   zTensor lr must be 1-elementg        zInvalid learning rate: r   zInvalid rho value: zInvalid epsilon value: zInvalid weight_decay value: )r   r   r    r!   r   r   r"   r   )
isinstancer   Znumel
ValueErrordictsuper__init__)selfr   r   r   r    r!   r"   r   r   r   defaults	__class__ C/var/www/auris/lib/python3.10/site-packages/torch/optim/adadelta.pyr'      s*   
zAdadelta.__init__c                    s   t  | | jD ]S}|dd  |dd |dd |dd |d D ]4}| j|g }t|dkr[t|d s[t	|d }|d rQtj
|t |jd	ntj
|t d
|d< q'q	d S )Nr"   r   Fr   r   r   r   stepdtypedevicer0   )r&   __setstate__param_groups
setdefaultstategetlentorchZ	is_tensorfloattensorr   r1   )r(   r6   grouppZp_stateZstep_valr*   r,   r-   r3   @   s&   

zAdadelta.__setstate__r<   params_with_gradgradssquare_avgs
acc_deltasstate_stepsc           
      C   s   d}|d D ]n}|j d u rq|t|O }|| |j jr"td||j  | j| }	t|	dkr_|d rAtjdt	 |j
dntjdt	 d|	d	< tj|tjd
|	d< tj|tjd
|	d< ||	d  ||	d  ||	d	  q|S )NFr   z*Adadelta does not support sparse gradientsr   r   r,   r/   r2   r.   )Zmemory_format
square_avg	acc_delta)gradr9   
is_complexappendZ	is_sparseRuntimeErrorr6   r8   Zzerosr   r1   Z
zeros_likeZpreserve_format)
r(   r<   r>   r?   r@   rA   rB   has_complexr=   r6   r,   r,   r-   _init_groupS   s2   	




zAdadelta._init_groupc                 C   s   |    d}|dur!t  | }W d   n1 sw   Y  | jD ]J}g }g }g }g }g }|d |d |d |d |d |d |d |d	 f\}	}
}}}}}}| ||||||}t||||||	|
|||||||d
 q$|S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r    r!   r"   r   r   r   )	r   r   r    r!   r"   r   r   r   rI   )Z _cuda_graph_capture_health_checkr9   Zenable_gradr4   rJ   r   )r(   closureZlossr<   r>   r?   r@   rA   rB   r   r   r    r!   r"   r   r   r   rI   r,   r,   r-   r.   ~   sd   

zAdadelta.step)r   r   r   r   NN)__name__
__module____qualname__r   r   r:   r   r   boolr'   r3   r%   strr   listrJ   r   r.   __classcell__r,   r,   r*   r-   r      sZ    	
	
$

+a  Implements Adadelta algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)},
                \: f(\theta) \text{ (objective)}, \: \rho \text{ (decay)},
                \: \lambda \text{ (weight decay)}                                                \\
            &\textbf{initialize} :  v_0  \leftarrow 0 \: \text{ (square avg)},
                \: u_0 \leftarrow 0 \: \text{ (accumulate variables)}                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm}if \: \lambda \neq 0                                                    \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm} v_t      \leftarrow v_{t-1} \rho + g^2_t (1 - \rho)                    \\
            &\hspace{5mm}\Delta x_t    \leftarrow   \frac{\sqrt{u_{t-1} +
                \epsilon }}{ \sqrt{v_t + \epsilon}  }g_t \hspace{21mm}                           \\
            &\hspace{5mm} u_t  \leftarrow   u_{t-1}  \rho +
                 \Delta x^2_t  (1 - \rho)                                                        \\
            &\hspace{5mm}\theta_t      \leftarrow   \theta_{t-1} - \gamma  \Delta x_t            \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `ADADELTA: An Adaptive Learning Rate Method`_.
    z
    Args:
        ar  
        lr (float, Tensor, optional): coefficient that scale delta before it is applied
            to the parameters (default: 1.0)
        rho (float, optional): coefficient used for computing a running average
            of squared gradients (default: 0.9). A higher value of `rho` will
            result in a slower average, which can be helpful for preventing
            oscillations in the learning process.
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-6).
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        z	
        zd

    .. _ADADELTA\: An Adaptive Learning Rate Method:
        https://arxiv.org/abs/1212.5701

    r   r?   r@   rA   rB   r   r   r    r!   r   r   r   rI   c                   sL  t j s"|r"tdd t fddt| |D s"J d  dt| ||||D ]y\}}}}}|d7 }|	s9|n| }|dkrG|j||d	}t |r[t |}t |}t |}|	|j
||d| d
 || }|| }|
r|| }||	| |	|j
||d| d
 t |rt |}|j|| d	 q*d S )NFZsupports_xlac                 3   0    | ]\}}|j j|j jko|j j v V  qd S rL   r1   type.0r=   r.   Zcapturable_supported_devicesr,   r-   	<genexpr>	      

z*_single_tensor_adadelta.<locals>.<genexpr>IIf capturable=True, params and state_steps must be on supported devices: .r   r   alphavalue)r9   compileris_compilingr   allzipaddrF   Zview_as_realZmul_Zaddcmul_Zsqrt_cloneZdiv_Zview_as_complexZadd_)r   r?   r@   rA   rB   r   r   r    r!   r   r   r   rI   paramrE   rC   rD   r.   stddeltar,   rZ   r-   _single_tensor_adadelta   s>   








rl   c                   s.  |
rJ dt j s(|r(tdd t fddt| |D s(J d  dt| dkr0d S t| ||||g}|	 D ]\\}}}}}}t
tt |}t
tt |}t
tt |}t
tt |}t
tt |}|rst|||| t j s|d jrt j|t jd	d
dd	d nt |d |	rt |}|dkr|	rt j|||d nt j|||d}t || t j|||d| d t ||}t | t ||}t | t || t || t || t j|||d| d |rt|t jrt ||  t || q>t j||| d q>d S )Nz#_foreach ops don't support autogradFrT   c                 3   rU   rL   rV   rX   rZ   r,   r-   r[   A  r\   z)_multi_tensor_adadelta.<locals>.<genexpr>r]   r^   r   r   cpu)r1   r_   r   ra   )r9   rc   rd   r   re   rf   r8   r   Z"_group_tensors_by_device_and_dtypevaluesr   rR   r   r   Zis_cpuZ_foreach_add_r;   Z_foreach_negZ_foreach_addZ_foreach_mul_Z_foreach_addcmul_Z_foreach_sqrt_Z_foreach_div_r#   )r   r?   r@   rA   rB   r   r   r    r!   r   r   r   rI   Zgrouped_tensorsZdevice_params_Zdevice_grads_Zdevice_square_avgs_Zdevice_acc_deltas_Zdevice_state_steps__Zdevice_paramsZdevice_gradsZdevice_square_avgsZdevice_acc_deltasZdevice_state_stepsrj   Zdeltasr,   rZ   r-   _multi_tensor_adadelta*  s|   

	


rp   )Zsingle_tensor_fnFr"   c	                C   s   t j stdd |D std|du rt| |dd\}}|r*t j r*td|r4t j s4t}nt	}|| |||||	|
||||||d dS )	zvFunctional API that performs Adadelta algorithm computation.

    See :class:`~torch.optim.Adadelta` for details.
    c                 s   s    | ]	}t |tjV  qd S rL   )r#   r9   r   )rY   tr,   r,   r-   r[     s    
zadadelta.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)Z	use_fusedz6torch.jit.script not supported with foreach optimizers)r   r   r    r!   r   r   r   rI   )
r9   rc   rd   re   rH   r	   ZjitZis_scriptingrp   rl   )r   r?   r@   rA   rB   r   r"   r   rI   r   r   r    r!   r   ro   funcr,   r,   r-   r     s<   

)FNFF)typingr   r   r   r   r9   r   Z	optimizerr   r	   r
   r   r   r   r   r   r   r   r   r   r   __all__r   __doc__rR   r:   rP   rl   rp   r   r,   r,   r,   r-   <module>   s   < &5	

6	

d		
