a
    h                     @   sj   U d dl mZ d dlZd dlm  mZ d dlmZ d dlm	Z	 g Z
ee ed< ejjG dd dZdS )    )OptionalN)Tensor)2_scripted_functional_optimizer_deprecation_warning__all__c                	   @   sR   e Zd Zdee eeeef eeef eeedddZee	e  dd	d
Z
dS )_FunctionalRprop{Gz?g      ?g333333?gư>2   F)paramslretas
step_sizesforeachmaximize_allow_empty_param_listc                 C   sv   t dd d|i| _|| _|| _|| _|| _t|dkrD|sDtdd|i| _t	j
tt	jttt	jf f i | _d S )N   )
stacklevelr   r   z%optimizer got an empty parameter listr   )r   defaultsr   r   r   r   len
ValueErrorparam_grouptorchjitZannotatedictr   strstate)selfr   r   r   r   r   r   r    r   V/var/www/auris/lib/python3.9/site-packages/torch/distributed/optim/functional_rprop.py__init__   s    


z_FunctionalRprop.__init__)	gradientsc                 C   s  | j d }g }g }g }g }g }| jd }| j\}	}
| j\}}t|t|krrtddt| d dt|  d}t||D ]\}}|d ur|t|O }|	| |	| || j
vri | j
|< | j
| }td|d	< tj|tjd
|d< t|||d< | j
| }|	|d  |	|d  |	|d	  qt 6 tj||||||||	|
| j| j|d W d    n1 s0    Y  d S )Nr   r   zEthe gradients passed in does not equal to the size of the parameters!zParams length: z. zGradients length: Fg        step)Zmemory_formatprevZ	step_size)step_size_minstep_size_maxetaminusetaplusr   r   has_complex)r   r   r   r   r   r   zipr   Z
is_complexappendr   ZtensorZ
zeros_likeZpreserve_formatZ	full_likeZno_gradFZrpropr   r   )r   r!   r   Zparams_with_gradZgradsZprevsr   Zstate_stepsr   r&   r'   r$   r%   r(   paramZgradientr   r   r   r   r"   6   sd    










z_FunctionalRprop.stepN)r   r   r	   FFF)__name__
__module____qualname__listr   floattupleboolr    r   r"   r   r   r   r   r      s          

r   )typingr   r   Ztorch.optim._functionalZoptimZ_functionalr+   r   Z,torch.distributed.optim._deprecation_warningr   r   r0   r   __annotations__r   scriptr   r   r   r   r   <module>   s   