o
    ‡ZŽh   ã                   @   sä   d Z ddlZddlZddlmZ ddlmZmZ ddlmZ ddl	m
Z
 ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ eejdƒr`ddlmZ ddlm Z  ddl!m"Z" g d¢Z#dS )aW  
:mod:`torch.distributed.optim` exposes DistributedOptimizer, which takes a list
of remote parameters (:class:`~torch.distributed.rpc.RRef`) and runs the
optimizer locally on the workers where the parameters live.  The distributed
optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to
apply the gradients on each worker.
é    N)Úoptimé   )Ú_apply_optimizer_in_backwardÚ_get_in_backward_optimizers)Ú_FunctionalAdadelta)Ú_FunctionalAdagrad)Ú_FunctionalAdam)Ú_FunctionalAdamax)Ú_FunctionalAdamW)Ú_FunctionalRMSprop)Ú_FunctionalRprop)Ú_FunctionalSGD)Ú_NamedOptimizer)Úas_functional_optimZ	_rpc_init)ÚDistributedOptimizer)ÚPostLocalSGDOptimizer)ÚZeroRedundancyOptimizer)r   r   r   r   )$Ú__doc__ÚwarningsZtorchr   Zapply_optimizer_in_backwardr   r   Zfunctional_adadeltar   Zfunctional_adagradr   Zfunctional_adamr   Zfunctional_adamaxr	   Zfunctional_adamwr
   Zfunctional_rmspropr   Zfunctional_rpropr   Zfunctional_sgdr   Znamed_optimizerr   Úutilsr   ÚhasattrZ_CZ	optimizerr   Zpost_localSGD_optimizerr   Zzero_redundancy_optimizerr   Ú__all__© r   r   úO/var/www/auris/lib/python3.10/site-packages/torch/distributed/optim/__init__.pyÚ<module>   s(    