a
    h}D                     @   sr  d Z ddlmZmZmZ ddlZddlmZ ddlmZm	Z	m
Z
mZmZmZmZmZmZmZmZmZmZmZ ddgZG d	d deZd
de de de de de
 d e_ ee ee ee ee ee eeeeeeeedddZee ee ee ee ee eeeeeeeedddZeeddee ee ee ee ee ee eeeeeeeedddZdS )z1Implementation for the Resilient backpropagation.    )castOptionalUnionN)Tensor   )_capturable_doc_default_to_fused_or_foreach_differentiable_doc_disable_dynamo_if_unsupported_foreach_doc!_get_capturable_supported_devices_get_scalar_dtype_maximize_doc_params_doc
_to_scalar_use_grad_for_differentiable_view_as_real	OptimizerParamsTRproprpropc                       s|   e Zd Zddddddeeeef eeef eeef ee	e eed fdd	Z
 fd
dZdd ZedddZ  ZS )r   {Gz?g      ?g333333?gư>2   FN)
capturableforeachmaximizedifferentiable)paramslretas
step_sizesr   r   r   r   c          
   	      s   t |tr| dkrtdd|ks4td| d|d   k rZd  k rZ|d k sxn td|d  d|d  t|||||||d	}	t ||	 d S )
Nr   zTensor lr must be 1-elementg        zInvalid learning rate: r         ?zInvalid eta values: z, )r    r!   r"   r   r   r   r   )
isinstancer   Znumel
ValueErrordictsuper__init__)
selfr   r    r!   r"   r   r   r   r   defaults	__class__ ?/var/www/auris/lib/python3.9/site-packages/torch/optim/rprop.pyr(      s     (	zRprop.__init__c                    s   t  | | jD ]}|dd  |dd |dd |dd |d D ]h}| j|g }t|dkrNt|d sNt	|d }|d rtj
|t |jd	ntj
|t d
|d< qNqd S )Nr   r   Fr   r   r   r   stepdtypedevicer1   )r'   __setstate__param_groups
setdefaultstategetlentorchZ	is_tensorfloattensorr   r2   )r)   r7   grouppZp_stateZstep_valr+   r-   r.   r4   =   s     

zRprop.__setstate__c                 C   s  d}|d D ]
}|j d u rq|t|O }|| |j }	|	jrJtd||	 | j| }
t|
dkr|d rtjdt	 |j
dntjdt	 d|
d	< tj|tjd
|
d< |jjrt|	t|d |d |
d< nt|	t|d |
d< ||
d  ||
d  ||
d	  q|S )NFr   z'Rprop does not support sparse gradientsr   r   r-   r0   r3   r/   Zmemory_formatprevr    	step_size)gradr:   
is_complexappendZ	is_sparseRuntimeErrorr7   r9   zerosr   r2   Z
zeros_likepreserve_formatr1   Z	full_likecomplexr   )r)   r=   r   gradsprevsr"   state_stepshas_complexr>   rB   r7   r-   r-   r.   _init_groupP   s4    




zRprop._init_groupc                 C   s   |    d}|durBt  | }W d   n1 s80    Y  | jD ]}g }g }g }g }g }|d \}	}
|d \}}|d }|d }| ||||||}t||||||||	|
|||d |d |d qH|S )	zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr!   r"   r   r   r   r   )	step_size_minstep_size_maxetaminusetaplusr   r   r   r   rL   )Z _cuda_graph_capture_health_checkr:   Zenable_gradr5   rM   r   )r)   closureZlossr=   r   rI   rJ   r"   rK   rP   rQ   rN   rO   r   r   rL   r-   r-   r.   r/   v   sF    
$
z
Rprop.step)r   r   r   )N)__name__
__module____qualname__r   r   r;   r   tupleboolr   r(   r4   rM   r   r/   __classcell__r-   r-   r+   r.   r      s,      


&a
  Implements the resilient backpropagation algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \theta_0 \in \mathbf{R}^d \text{ (params)},f(\theta)
                \text{ (objective)},                                                             \\
            &\hspace{13mm}      \eta_{+/-} \text{ (etaplus, etaminus)}, \Gamma_{max/min}
                \text{ (step sizes)}                                                             \\
            &\textbf{initialize} :   g^0_{prev} \leftarrow 0,
                \: \eta_0 \leftarrow \text{lr (learning rate)}                                   \\
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm} \textbf{for} \text{  } i = 0, 1, \ldots, d-1 \: \mathbf{do}            \\
            &\hspace{10mm}  \textbf{if} \:   g^i_{prev} g^i_t  > 0                               \\
            &\hspace{15mm}  \eta^i_t \leftarrow \mathrm{min}(\eta^i_{t-1} \eta_{+},
                \Gamma_{max})                                                                    \\
            &\hspace{10mm}  \textbf{else if}  \:  g^i_{prev} g^i_t < 0                           \\
            &\hspace{15mm}  \eta^i_t \leftarrow \mathrm{max}(\eta^i_{t-1} \eta_{-},
                \Gamma_{min})                                                                    \\
            &\hspace{15mm}  g^i_t \leftarrow 0                                                   \\
            &\hspace{10mm}  \textbf{else}  \:                                                    \\
            &\hspace{15mm}  \eta^i_t \leftarrow \eta^i_{t-1}                                     \\
            &\hspace{5mm}\theta_t \leftarrow \theta_{t-1}- \eta_t \mathrm{sign}(g_t)             \\
            &\hspace{5mm}g_{prev} \leftarrow  g_t                                                \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to the paper
    `A Direct Adaptive Method for Faster Backpropagation Learning: The RPROP Algorithm
    <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.21.1417>`_.
    z
    Args:
        a{  
        lr (float, optional): learning rate (default: 1e-2)
        etas (Tuple[float, float], optional): pair of (etaminus, etaplus), that
            are multiplicative increase and decrease factors
            (default: (0.5, 1.2))
        step_sizes (Tuple[float, float], optional): a pair of minimal and
            maximal allowed step sizes (default: (1e-6, 50))
        z	
        z

    )r   rI   rJ   r"   rK   rN   rO   rP   rQ   r   r   r   rL   c                C   s  t | D ]\}}|| }|	s"|n| }|| }|| }|| }tj s|
rt }|jj|jjkrp|jj|v sJ d| d|d7 }t|rt|}t|}t|}t|}|r|	|
  }n|	| }|
r6|t|d|| |t|d|| |t|dd| n*|||d< |||d< d||d< |||| |j
tjd}|
r|t||d| nd|||< |j| |dd || qd S )NIIf capturable=True, params and state_steps must be on supported devices: .r   r   r?   value)	enumerater:   compileris_compilingr   r2   typerC   Zview_as_realmulclonesigncopy_wheregtlteqZmul_clamp_rG   Zaddcmul_)r   rI   rJ   r"   rK   rN   rO   rP   rQ   r   r   r   rL   iparamrB   r@   rA   r/   capturable_supported_devicesrd   r-   r-   r.   _single_tensor_rprop   sJ    






rn   c             
      s  t | dkrd S |rJ dtj s\|
r\t  t fddt| |D s\J d  dt| ||||g}|	 D ]\\}}}}}}t
tt |}t
tt |}t
tt |}t
tt |}t
tt |}tj s|d jrtj|tjddd	dd
 nt|d |r&t|||| t||}|	rBt| t|| |	r^t| |}t| |
r|D ]T}|t|d|| |t|d|| |t|dd| qvn6|D ]0}|||d< |||d< d||d< qt|| |D ]}||| qt|}tt |D ],}|| t|| |d||  q<~dd |D }tj|||dd qxd S )Nr   z#_foreach ops don't support autogradc                 3   s.   | ]&\}}|j j|j jko$|j j v V  qd S N)r2   ra   ).0r>   r/   rm   r-   r.   	<genexpr>=  s   z&_multi_tensor_rprop.<locals>.<genexpr>rY   rZ   r#   cpu)r2   )alphar   c                 S   s   g | ]}|  qS r-   )rd   )rp   rB   r-   r-   r.   
<listcomp>      z'_multi_tensor_rprop.<locals>.<listcomp>r[   r\   )r9   r:   r_   r`   r   allzipr   Z"_group_tensors_by_device_and_dtypevaluesr   listr   Zis_cpuZ_foreach_add_r<   r   Z_foreach_mulZ_foreach_neg_Z_foreach_copy_Z_foreach_sign_re   rf   rg   rh   ri   Z_foreach_mul_rj   rangeZ_foreach_addcmul_)r   rI   rJ   r"   rK   rN   rO   rP   rQ   r   r   r   rL   Zgrouped_tensorsZgrouped_params_Zgrouped_grads_Zgrouped_prevs_Zgrouped_step_sizes_Zgrouped_state_steps__Zgrouped_paramsZgrouped_gradsZgrouped_prevsZgrouped_step_sizesZgrouped_state_stepsZsignsrd   rA   rk   Z
grad_signsr-   rq   r.   _multi_tensor_rprop%  s    

	



 r}   )Zsingle_tensor_fnF)r   rI   rJ   r"   rK   r   r   r   r   rL   rN   rO   rP   rQ   c
                C   s   t j s$tdd |D s$td|du r>t| |dd\}}|rTt j rTtd|rht j sht}nt	}|| |||||
|||||||	d dS )	zpFunctional API that performs rprop algorithm computation.

    See :class:`~torch.optim.Rprop` for details.
    c                 s   s   | ]}t |tjV  qd S ro   )r$   r:   r   )rp   tr-   r-   r.   rr     s   zrprop.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)Z	use_fusedz6torch.jit.script not supported with foreach optimizers)rN   rO   rP   rQ   r   r   r   rL   )
r:   r_   r`   rw   rE   r   ZjitZis_scriptingr}   rn   )r   rI   rJ   r"   rK   r   r   r   r   rL   rN   rO   rP   rQ   r|   funcr-   r-   r.   r     s<    
)NFFFF)__doc__typingr   r   r   r:   r   Z	optimizerr   r   r	   r
   r   r   r   r   r   r   r   r   r   r   __all__r   rz   r;   rW   rn   r}   r   r-   r-   r-   r.   <module>   s   @ #	
7Gt	     