
    JTh                        S SK r S SKrS SKrS SKrS SKrS SKrS SKrS SKrS SKrS SK	J
r
Jr  S SKJr  S SKJrJrJr  S SKJrJr  S SKJrJrJrJr  S SKrS SKJr  S SKJr  S SK J!r!J"r"  S S	K#J$r$J%r%J&r&  S S
K'J(r(  S SK)J*r*J+r+  S SK,J-r-J.r.  Sr/\R`                  " 5       (       a  S SK1J2r2J3r3J4r4  S SK5J6r6J7r7J8r8J9r9J:r:J;r;  \Rx                  Ra                  5       (       a  Sr/S SK=J>r>  \(       a  S SK?J@r@  S/rA\R                  " \C5      rD\ " S S5      5       rES rFS rGS rHS rIS rJS rK " S S\5      rL\ " S S5      5       rM " S  S!\!5      rN " S" S#\&5      rO " S$ S\(\%5      rPg)%    N)defaultdictdeque)contextmanager)	dataclassfieldsis_dataclass)autoEnum)AnyCallableOptionalTYPE_CHECKING)_get_device_index)FunctionVariable)JoinJoinableJoinHook)Module)gatherscatter_kwargs)tree_flattentree_unflattenF)_get_default_group_rank_not_in_groupReduceOp)_alloc_storage_cast_forward_inputs_free_storage_sync_module_states
_to_kwargs$_verify_param_shape_across_processesT)RRef)RemovableHandleDistributedDataParallelc                       \ rS rSr% SrSr\\R                     \	S'   Sr
\\R                     \	S'   Sr\\R                     \	S'   Srg)_MixedPrecision7   aY  
This configures DDP-native mixed precision training.

Attributes:
    param_dtype (torch.dtype): This specifies the dtype for model
        parameters, inputs (when ``cast_forward_inputs`` is set to
        ``True``), and therefore the dtype for computation.
        However, outside the forward and backward passes, parameters are in
        full precision. Model checkpointing always happens in full
        precision.
    reduce_dtype (torch.dtype): This specifies the dtype for gradient
        reduction, which is permitted to differ from ``param_dtype``.
    buffer_dtype (torch.dtype): This specifies the dtype for buffers.

.. note:: This API is experimental and subject to change.

.. note:: Only floating point tensors are cast to their specified dtypes.

.. note:: ``state_dict`` checkpoints parameters and buffers in full
    precision.

.. note:: Each low precision dtype must be specified explicitly. For
    example, ``_MixedPrecision(reduce_dtype=torch.float16)`` only specifies
    the reduction dtype to be low precision, and DDP will not cast
    parameters or buffers.

.. note:: If a ``reduce_dtype`` is not specified, then gradient reduction
    happens in ``param_dtype`` if specified or the original parameter dtype
    otherwise. For example, ``_MixedPrecision(param_dtype=torch.float16)``
    would result in communication occurring in fp16.
Nparam_dtypereduce_dtypebuffer_dtype )__name__
__module____qualname____firstlineno____doc__r)   r   torchdtype__annotations__r*   r+   __static_attributes__r,       U/var/www/auris/envauris/lib/python3.13/site-packages/torch/nn/parallel/distributed.pyr'   r'   7   sC    @ *.K%++&-*.L(5;;'.*.L(5;;'.r6   r'   c                     UR                  5        HE  n[        US5      (       a  UR                  (       a  M'  UR                  U R                  S9Ul        MG     g)z,Casts buffers to the given ``buffer_dtype``._ddp_ignored)r3   N)buffershasattrr9   tor+   data)mixed_precision_configroot_modulebufs      r7   _cast_buffersrA   b   sF    ""$3''C,<,<66 6 C C6D	 %r6   c                 `   UR                  5        H  n[        US5      (       a  UR                  (       a  M'  [        US5      (       a  M:  [        R                  " UUR
                  U R                  UR                  S9Ul        [        UR                  5        UR                  Ul        M     g)z;Create and free storage for the mixed precision parameters.r9   	_mp_param)devicer3   requires_gradN)
parametersr;   r9   r2   
zeros_likerD   r)   rE   rC   r   r=   	_fp_param)r>   r?   params      r7   _setup_mixed_precision_paramsrJ   k   s    '')5.))e.@.@uk**#..||,88#11	EO %//* $jjEO *r6   c                     [         =(       a    [        U [        5      nU(       a  [        U R	                  5       5      u  p#O[        U 5      u  p#X#U4$ N)RPC_AVAILABLE
isinstancer#   r   local_value)outputoutput_is_rrefoutput_tensor_listtreespecs       r7   _tree_flatten_with_rrefrT      sJ    "?z&$'?N'3F4F4F4H'I$H'3F';$ 77r6   c                 @    [        X5      n U(       a  [        U 5      n U $ rL   )r   r#   )rP   rS   rQ   s      r7   _tree_unflatten_with_rrefrV      s    F-FfMr6   c           	        ^  [         (       aC  [        T [        5      (       a.  T R                  5       (       a  [	        T R                  5       5      $ [        T [        R                  5      (       a  T /$ [        T [        [        45      (       a-  [        R                  R                  [        [        T 5      5      $ [        T [        5      (       a;  [        R                  R                  [        [        T R                  5       5      5      $ [!        T 5      (       a@  [        R                  R                  [        [        U 4S j[#        T 5       5       5      5      $ / $ )z?Recursively find all tensors contained in the specified object.c              3   P   >#    U  H  n[        TUR                  5      v   M     g 7frL   )getattrname).0fobjs     r7   	<genexpr> _find_tensors.<locals>.<genexpr>   s     JkQVV 4 4ks   #&)rM   rN   r#   is_owner_find_tensorsrO   r2   Tensorlisttuple	itertoolschainfrom_iterablemapdictvaluesr   r   )r]   s   `r7   ra   ra      s    }C.. <<>> !233#u||$$u#e}%%,,S-DEE#t,,S

-MNNC,,JfSkJK
 	
 Ir6   c                      / SQn SnU  H8  nU[         R                  ;   a  [         R                  U   OSnUSU SU S3-  nM:     [        U5        g )N)/RANK
LOCAL_RANK
WORLD_SIZEMASTER_PORTMASTER_ADDRCUDA_VISIBLE_DEVICESGLOO_SOCKET_IFNAMEGLOO_DEVICE_TRANSPORTNCCL_SOCKET_IFNAMETORCH_NCCL_BLOCKING_WAIT
NCCL_DEBUGNCCL_DEBUG_SUBSYSNCCL_IB_DISABLENCCL_P2P_DISABLENCCL_P2P_LEVELNCCL_SHM_DISABLENCCL_SOCKET_NTHREADSNCCL_NSOCKS_PERTHREADNCCL_BUFFSIZENCCL_NTHREADS
NCCL_RINGSNCCL_MAX_NCHANNELSNCCL_MIN_NCHANNELSNCCL_CHECKS_DISABLENCCL_CHECK_POINTERSNCCL_LAUNCH_MODENCCL_IB_HCANCCL_IB_TIMEOUTNCCL_IB_RETRY_CNTNCCL_IB_GID_INDEX
NCCL_IB_SL
NCCL_IB_TCNCCL_IB_AR_THRESHOLDNCCL_IB_CUDA_SUPPORTNCCL_NET_GDR_LEVELNCCL_NET_GDR_READNCCL_SINGLE_RING_THRESHOLDNCCL_LL_THRESHOLDNCCL_TREE_THRESHOLD	NCCL_ALGO
NCCL_PROTONCCL_IGNORE_CPU_AFFINITYNCCL_DEBUG_FILENCCL_COLLNET_ENABLENCCL_TOPO_FILENCCL_TOPO_DUMP_FILETORCH_NCCL_ASYNC_ERROR_HANDLING zN/Azenv:=
)osenvironprint)relevant_env_varsformatted_outputvarvalues       r7   _dump_DDP_relevant_env_varsr      s\    1d  #&"**#4

3%d3%qr22 ! 

r6   c                   0    \ rS rSr\" 5       r\" 5       rSrg)_BufferCommHookLocation   r,   N)r-   r.   r/   r0   r	   PRE_FORWARDPOST_FORWARDr5   r,   r6   r7   r   r      s    &K6Lr6   r   c                   4    \ rS rSr% \\S'   \\S'   \\S'   Srg)_BufferCommHook   buffer_comm_hookbuffer_comm_hook_statebuffer_comm_hook_locationr,   N)	r-   r.   r/   r0   r   r4   r   r   r5   r,   r6   r7   r   r      s    66r6   r   c                   4    \ rS rSr\S 5       r\S 5       rSrg)_DDPSink   c                     U R                  S5        Xl        UnU" 5       R                  (       a  [        S U 5       5      nU$ )NFc              3      #    U  H6  n[        U[        R                  5      (       a  UR                  5       OUv   M8     g 7frL   )rN   r2   rb   clone)r[   inps     r7   r^   #_DDPSink.forward.<locals>.<genexpr>   s/      QW#z#u||<<		#EQWs   >A )set_materialize_gradsddp_weakref_ddp_sink_clonerd   )ctxr   inputsrets       r7   forward_DDPSink.forward   sE     	!!%(%=(( QW C 
r6   c                    U R                  5       nUR                  nUR                  nU=(       a    UR                  nU(       a7  U(       d0  [        R
                  R                  UR                  5        SUl        S /UQ7$ NT)r   reducerstatic_graph&_static_graph_delay_allreduce_enqueuedr   _execution_enginequeue_callback_delay_all_reduce)r   grad_outputsr   r   r   delay_ar_enqueueds         r7   backward_DDPSink.backward   sx     oo'%%"//O[OO 	  1&&55)) BFK>$|$$r6   r,   N)r-   r.   r/   r0   staticmethodr   r   r5   r,   r6   r7   r   r      s(    
 
 % %r6   r   c                   <   ^  \ rS rSrU 4S jrS rS\4S jrSrU =r	$ )_DDPJoinHooki  c                    > [        U[        5      (       d   S5       eUR                  c   eUR                  R                  5         Xl        X R                  l        [        TU ]  5         g)z(Set config variables for internal usage.zQDDP join hook requires passing in a DistributedDataParallel instance as the stateN)rN   r%   logger_set_uneven_input_joinddp_divide_by_initial_world_sizesuper__init__)selfr   divide_by_initial_world_size	__class__s      r7   r   _DDPJoinHook.__init__  s_    #677 	
$	
7 zz%%%

))+1M.r6   c                 @   U R                   nUR                  R                  5         UR                  5         UR	                  SS9nX!l        U(       d  gUR                  5         UR                  (       a  UR                  5         UR                  R                  5         g)zVShadow the DDP collective communication operations in the forward and backward passes.Tis_joined_rankN)
r   r   _rebuild_buckets_check_and_sync_module_buffers)_check_global_requires_backward_grad_syncrequire_forward_param_sync_match_all_reduce_for_bwd_passfind_unused_parameters_match_unused_params_allreduce_push_all_rebuilt_params)r   r   should_sync_backwardss      r7   	main_hook_DDPJoinHook.main_hook  s    hh$$& 	**, !$ M M !N !
 *?&$ 	**, %%..0 	,,.r6   is_last_joinerc                 :    U R                   R                  U5        g)zOSync the final model to ensure that the model is the same across all processes.N)r   _sync_final_modelr   r   s     r7   	post_hook_DDPJoinHook.post_hookA  s    "">2r6   )r   )
r-   r.   r/   r0   r   r   boolr   r5   __classcell__r   s   @r7   r   r     s    
 /D3 3 3r6   r   c                     ^  \ rS rSr% SrSr\S    \S'                  STS\\   4U 4S jjjr	S r
S	 rS
 rS rS rS\S\SS4S jrS\S\SS4S jrS rS rS rU 4S jrS rS rS rSUS jrS r\S 5       r\S 5       r\\R@                  " SS9S 5       5       r!S r"S  r#S! r$S" r%S# r&S$ r'S% r(S& r)S' r*SUU 4S( jjr+S) r,S* r-S+ r.S, r/S- r0   SVS.\1S/\1S0\14S1 jjr2S2 r3\4S3 5       r5\4S4 5       r6\7Rp                  4S5\94S6 jjr:S7\;S5\94S8 jr<S9 r=SS:.S;\>4S< jjr? SWS= jr@S> rAS? rBS@ rCSA rDSB rESC rF SXSD jrGSE rHSF rI\4SG 5       rJ\KSYSH j5       rL\KSI 5       rMSJ rNSK rOSL rPSM rQSN rRSO rSSP rTSQ\14SR jrUSSrVU =rW$ )Zr%   iF  a,?  Implement distributed data parallelism based on ``torch.distributed`` at module level.

This container provides data parallelism by synchronizing gradients
across each model replica. The devices to synchronize across are
specified by the input ``process_group``, which is the entire world
by default. Note that ``DistributedDataParallel`` does not chunk or
otherwise shard the input across participating GPUs; the user is
responsible for defining how to do so, for example through the use
of a :class:`DistributedSampler`.

See also: :ref:`distributed-basics` and :ref:`cuda-nn-ddp-instead`.
The same constraints on input as in :class:`torch.nn.DataParallel` apply.

Creation of this class requires that ``torch.distributed`` to be already
initialized, by calling :func:`torch.distributed.init_process_group`.

``DistributedDataParallel`` is proven to be significantly faster than
:class:`torch.nn.DataParallel` for single-node multi-GPU data
parallel training.

To use ``DistributedDataParallel`` on a host with N GPUs, you should spawn
up ``N`` processes, ensuring that each process exclusively works on a single
GPU from 0 to N-1. This can be done by either setting
``CUDA_VISIBLE_DEVICES`` for every process or by calling:

    >>> # xdoctest: +SKIP("undefined variables")
    >>> torch.cuda.set_device(i)

where i is from 0 to N-1. In each process, you should refer the following
to construct this module:

    >>> # xdoctest: +SKIP("undefined variables")
    >>> torch.distributed.init_process_group(
    >>>     backend='nccl', world_size=N, init_method='...'
    >>> )
    >>> model = DistributedDataParallel(model, device_ids=[i], output_device=i)

In order to spawn up multiple processes per node, you can use either
``torch.distributed.launch`` or ``torch.multiprocessing.spawn``.

.. note::
    Please refer to `PyTorch Distributed Overview <https://pytorch.org/tutorials/beginner/dist_overview.html>`__
    for a brief introduction to all features related to distributed training.

.. note::
    ``DistributedDataParallel`` can be used in conjunction with
    :class:`torch.distributed.optim.ZeroRedundancyOptimizer` to reduce
    per-rank optimizer states memory footprint. Please refer to
    `ZeroRedundancyOptimizer recipe <https://pytorch.org/tutorials/recipes/zero_redundancy_optimizer.html>`__
    for more details.

.. note:: ``nccl`` backend is currently the fastest and highly recommended
    backend when using GPUs. This applies to both single-node and
    multi-node distributed training.

.. note:: This module also supports mixed-precision distributed training.
    This means that your model can have different types of parameters such
    as mixed types of ``fp16`` and ``fp32``, the gradient reduction on these
    mixed types of parameters will just work fine.

.. note:: If you use ``torch.save`` on one process to checkpoint the module,
    and ``torch.load`` on some other processes to recover it, make sure that
    ``map_location`` is configured properly for every process. Without
    ``map_location``, ``torch.load`` would recover the module to devices
    where the module was saved from.

.. note:: When a model is trained on ``M`` nodes with ``batch=N``, the
    gradient will be ``M`` times smaller when compared to the same model
    trained on a single node with ``batch=M*N`` if the loss is summed (NOT
    averaged as usual) across instances in a batch (because the gradients
    between different nodes are averaged). You should take this into
    consideration when you want to obtain a mathematically equivalent
    training process compared to the local training counterpart. But in most
    cases, you can just treat a DistributedDataParallel wrapped model, a
    DataParallel wrapped model and an ordinary model on a single GPU as the
    same (E.g. using the same learning rate for equivalent batch size).

.. note::
    Parameters are never broadcast between processes. The module performs
    an all-reduce step on gradients and assumes that they will be modified
    by the optimizer in all processes in the same way. Buffers
    (e.g. BatchNorm stats) are broadcast from the module in process of rank
    0, to all other replicas in the system in every iteration.

.. note::
    If you are using DistributedDataParallel in conjunction with the
    :ref:`distributed-rpc-framework`, you should always use
    :meth:`torch.distributed.autograd.backward` to compute gradients and
    :class:`torch.distributed.optim.DistributedOptimizer` for optimizing
    parameters.

    Example::

        >>> # xdoctest: +SKIP("undefined variables")
        >>> import torch.distributed.autograd as dist_autograd
        >>> from torch.nn.parallel import DistributedDataParallel as DDP
        >>> import torch
        >>> from torch import optim
        >>> from torch.distributed.optim import DistributedOptimizer
        >>> import torch.distributed.rpc as rpc
        >>> from torch.distributed.rpc import RRef
        >>>
        >>> t1 = torch.rand((3, 3), requires_grad=True)
        >>> t2 = torch.rand((3, 3), requires_grad=True)
        >>> rref = rpc.remote("worker1", torch.add, args=(t1, t2))
        >>> ddp_model = DDP(my_model)
        >>>
        >>> # Setup optimizer
        >>> optimizer_params = [rref]
        >>> for param in ddp_model.parameters():
        >>>     optimizer_params.append(RRef(param))
        >>>
        >>> dist_optim = DistributedOptimizer(
        >>>     optim.SGD,
        >>>     optimizer_params,
        >>>     lr=0.05,
        >>> )
        >>>
        >>> with dist_autograd.context() as context_id:
        >>>     pred = ddp_model(rref.to_here())
        >>>     loss = loss_func(pred, target)
        >>>     dist_autograd.backward(context_id, [loss])
        >>>     dist_optim.step(context_id)

.. note::
    DistributedDataParallel currently offers limited support for gradient
    checkpointing with :meth:`torch.utils.checkpoint`.
    If the checkpoint is done with use_reentrant=False (recommended), DDP
    will work as expected without any limitations.
    If, however, the checkpoint is done with use_reentrant=True (the default),
    DDP will work as expected when there are no unused parameters in the model
    and each layer is checkpointed at most once (make sure you are not passing
    `find_unused_parameters=True` to DDP). We currently do not support the
    case where a layer is checkpointed multiple times, or when there unused
    parameters in the checkpointed model.

.. note::
    To let a non-DDP model load a state dict from a DDP model,
    :meth:`~torch.nn.modules.utils.consume_prefix_in_state_dict_if_present`
    needs to be applied to strip the prefix "module." in the DDP state dict before loading.

.. warning::
    Constructor, forward method, and differentiation of the output (or a
    function of the output of this module) are distributed synchronization
    points. Take that into account in case different processes might be
    executing different code.

.. warning::
    This module assumes all parameters are registered in the model by the
    time it is created. No parameters should be added nor removed later.
    Same applies to buffers.

.. warning::
    This module assumes all parameters are registered in the model of each
    distributed processes are in the same order. The module itself will
    conduct gradient ``allreduce`` following the reverse order of the
    registered parameters of the model. In other words, it is users'
    responsibility to ensure that each distributed process has the exact
    same model and thus the exact same parameter registration order.

.. warning::
    This module allows parameters with non-rowmajor-contiguous strides.
    For example, your model may contain some parameters whose
    :class:`torch.memory_format` is ``torch.contiguous_format``
    and others whose format is ``torch.channels_last``.  However,
    corresponding parameters in different processes must have the
    same strides.

.. warning::
    This module doesn't work with :func:`torch.autograd.grad` (i.e. it will
    only work if gradients are to be accumulated in ``.grad`` attributes of
    parameters).

.. warning::
    If you plan on using this module with a ``nccl`` backend or a ``gloo``
    backend (that uses Infiniband), together with a DataLoader that uses
    multiple workers, please change the multiprocessing start method to
    ``forkserver`` (Python 3 only) or ``spawn``. Unfortunately
    Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will
    likely experience deadlocks if you don't change this setting.

.. warning::
    You should never try to change your model's parameters after wrapping
    up your model with ``DistributedDataParallel``. Because, when
    wrapping up your model with ``DistributedDataParallel``, the constructor
    of ``DistributedDataParallel`` will register the additional gradient
    reduction functions on all the parameters of the model itself at the
    time of construction. If you change the model's parameters afterwards,
    gradient reduction functions no longer match the correct set of
    parameters.

.. warning::
    Using ``DistributedDataParallel`` in conjunction with the
    :ref:`distributed-rpc-framework` is experimental and subject to change.

Args:
    module (Module): module to be parallelized
    device_ids (list of int or torch.device): CUDA devices.
               1) For single-device modules, ``device_ids`` can
               contain exactly one device id, which represents the only
               CUDA device where the input module corresponding to this process resides.
               Alternatively, ``device_ids`` can also be ``None``.
               2) For multi-device modules and CPU modules,
               ``device_ids`` must be ``None``.

               When ``device_ids`` is ``None`` for both cases,
               both the input data for the forward pass and the actual module
               must be placed on the correct device.
               (default: ``None``)
    output_device (int or torch.device): Device location of output for
                  single-device CUDA modules. For multi-device modules and
                  CPU modules, it must be ``None``, and the module itself
                  dictates the output location. (default: ``device_ids[0]``
                  for single-device modules)
    broadcast_buffers (bool): Flag that enables syncing (broadcasting)
                      buffers of the module at beginning of the ``forward``
                      function. (default: ``True``)
    init_sync (bool): Whether to sync during initialization to verify param
                      shapes and broadcast parameters and buffers.
                      WARNING: if this is set to False the user is required
                      to ensure themselves that the weights are the same on
                      all ranks.
                      (default: ``True``)
    process_group: The process group to be used for distributed data
                   all-reduction. If ``None``, the default process group, which
                   is created by :func:`torch.distributed.init_process_group`,
                   will be used. (default: ``None``)
    bucket_cap_mb: ``DistributedDataParallel`` will bucket parameters into
                   multiple buckets so that gradient reduction of each
                   bucket can potentially overlap with backward computation.
                   :attr:`bucket_cap_mb` controls the bucket size in
                   MebiBytes (MiB). If ``None``, a default size of 25 MiB
                   will be used. (default: ``None``)
    find_unused_parameters (bool): Traverse the autograd graph from all
                           tensors contained in the return value of the
                           wrapped module's ``forward`` function. Parameters
                           that don't receive gradients as part of this
                           graph are preemptively marked as being ready to
                           be reduced. In addition, parameters that may have
                           been used in the wrapped module's ``forward``
                           function but were not part of loss computation and
                           thus would also not receive gradients are
                           preemptively marked as ready to be reduced.
                           (default: ``False``)
    check_reduction: This argument is deprecated.
    gradient_as_bucket_view (bool): When set to ``True``, gradients will be views
                  pointing to different offsets of ``allreduce`` communication
                  buckets. This can reduce peak memory usage, where the
                  saved memory size will be equal to the total gradients
                  size. Moreover, it avoids the overhead of copying between
                  gradients and ``allreduce`` communication buckets. When
                  gradients are views, ``detach_()`` cannot be called on the
                  gradients. If hitting such errors, please fix it by
                  referring to the :meth:`~torch.optim.Optimizer.zero_grad`
                  function in ``torch/optim/optimizer.py`` as a solution.
                  Note that gradients will be views after first iteration, so
                  the peak memory saving should be checked after first iteration.
    static_graph (bool): When set to ``True``, DDP knows the trained graph is
                 static. Static graph means 1) The set of used and unused
                 parameters will not change during the whole training loop; in
                 this case, it does not matter whether users set
                 ``find_unused_parameters = True`` or not. 2) How the graph is trained
                 will not change during the whole training loop (meaning there is
                 no control flow depending on iterations).
                 When static_graph is set to be ``True``, DDP will support cases that
                 can not be supported in the past:
                 1) Reentrant backwards.
                 2) Activation checkpointing multiple times.
                 3) Activation checkpointing when model has unused parameters.
                 4) There are model parameters that are outside of forward function.
                 5) Potentially improve performance when there are unused parameters,
                 as DDP will not search graph in each iteration to detect unused
                 parameters when static_graph is set to be ``True``.
                 To check whether you can set static_graph to be ``True``, one way is to
                 check ddp logging data at the end of your previous model training,
                 if ``ddp_logging_data.get("can_set_static_graph") == True``, mostly you
                 can set ``static_graph = True`` as well.

                 Example::
                     >>> # xdoctest: +SKIP("undefined variables")
                     >>> model_DDP = torch.nn.parallel.DistributedDataParallel(model)
                     >>> # Training loop
                     >>> ...
                     >>> ddp_logging_data = model_DDP._get_ddp_logging_data()
                     >>> static_graph = ddp_logging_data.get("can_set_static_graph")
    delay_all_reduce_named_params (list of tuple of str and torch.nn.Parameter): a list
                of named parameters whose all reduce will be delayed when the gradient of
                the parameter specified in ``param_to_hook_all_reduce`` is ready. Other
                arguments of DDP do not apply to named params specified in this argument
                as these named params will be ignored by DDP reducer.
    param_to_hook_all_reduce (torch.nn.Parameter): a parameter to hook delayed all reduce
                of parameters specified in ``delay_all_reduce_named_params``.


Attributes:
    module (Module): the module to be parallelized.

Example::

    >>> # xdoctest: +SKIP("undefined variables")
    >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
    >>> net = torch.nn.parallel.DistributedDataParallel(model)
N_active_ddp_moduleFmixed_precisionc                   > [         T!U ]  5         [        R                  " U 5        S U l        [	        US L5      [	        US L5      :w  a  U R                  [        S5        U(       a  Ub  [        S5      eUc  Uc  [        5       U l	        OoUc  Xpl	        OeUR                  S:w  a  [        SU S35      eUU l        UR                  SS9U l	        SSKJn  UR                  U5      nUU:w  a  SS	KJn  U" U5        / U l        ['        US
5      (       a  [)        UR*                  5      U l        O[)        5       U l        UbB  U H<  u  nnU R,                  R/                  U5        U R$                  R1                  U5        M>     UR3                  5        VVs/ s H  u  nnUU R,                  ;  d  M  UPM     snnU l        [7        S U R4                   5       5      (       dF  [9        U R$                  5      (       a  [        R;                  S5        OU R                  [        S5        Ub%  [9        U5      S:  a  U R                  [        S5        [9        U R4                   Vs1 s H  nUR<                  iM     sn5      S:  U l        U R4                   Vs1 s H(  nUR<                  c  M  UR<                  R@                  iM*     nn[9        U5      S:w  a  U R                  [        SU S35        [C        [E        U5      5      U l#        Ub0  [9        U5      S:X  d!  U RF                  S:X  d  U R>                  (       ab  U(       d  U(       aE  U R                  [        SU SU SU R4                   Vs1 s H  nUR<                  iM     sn S35        S U l$        S U l%        O;U Vs/ s H  n[M        US5      PM     snU l$        Uc  US   n[M        US5      U l%        SU l'        X@l(        Xl)        [C        [E        U R4                  5      5      R<                  U l        XPl*        Xl+        SU l,        SU l-        Xl.        Xl/        U R^                  b   [        Ra                  SU R^                  5        U
(       a  [b        Rd                  " S[f        SS9  U R4                   HN  n[i        U[j        Rl                  Rn                  Rp                  5      (       d  M8  U R                  [        S5        MP     [s        S5      U l:        Uc
  SnSU l;        OSU l;        [s        US-  S-  5      U l<        [z        R|                  R                  SS5      S:H  U l@        S U lA        / U lB        SU lC        [9        U R$                  5      S:w  a#  U R                  UUUS 9  U R                  (       a  g U R                  5       u  nnU(       aV  [        U R                  U5        [        U RR                  U R                  U Rt                  SU R,                  U RT                  S!9  U R                  U5      nU R                  UUUU5        / U lJ        U R^                  GbF  [        U R^                  U RR                  5        [        U R^                  U RR                  5        [j        R                  " 5       U lN        [        [        5      U lQ        U RR                  R                  U R                  SSS"9  U RR                  R                  5        H  nUR                  U R                  SSS"9  M      SS#KVJWnJXn  U" [        R                  " U 5      [j        R                  " 5       S$9nU R                  UU5        U R                  R                  U R^                  R                  5        SU l_        U(       a  U R                  5         SU la        / U lb        [j        R                  R                  R                  5       n U S%:H  U lf        U R                  (       a  S[j        R                  R                  li        U[j        R                  R                  lj        [j        R                  R                  R                  R/                  S&5        [j        R                  R                  R                  R                  5         U R                  5         SU lp        g s  snnf s  snf s  snf s  snf s  snf )'Nz[delay_all_reduce_named_params and param_to_hook_all_reduce need to be set at the same time.z<Cannot specify both process_group and device_mesh arguments.   z*Only 1D device mesh is supported, but got .r   )mesh_dim)_mesh_resources)_pre_dp_module_transform!_ddp_params_and_buffers_to_ignorec              3   8   #    U  H  oR                   v   M     g 7frL   )rE   r[   ps     r7   r^   3DistributedDataParallel.__init__.<locals>.<genexpr>  s     D,Cq??,Cs   z&Delay the AllReduce of all parameters.zhDistributedDataParallel is not needed when a module doesn't have any parameter that requires a gradient.z8device_ids can only be None or contain a single element.zrDistributedDataParallel's input module must be on the same type of devices, but input module parameters locate in cpuzDistributedDataParallel device_ids and output_device arguments only work with single-device/multiple-device GPU modules or CPU modules, but got device_ids z, output_device z, and module parameters TFz"Received mixed precision config %szhThe `check_reduction` argument in `DistributedDataParallel` module is deprecated. Please avoid using it.   )
stacklevelzModules with uninitialized parameters can't be used with `DistributedDataParallel`. Run a dummy forward pass to correctly initialize the modulesi     i   PYTORCH_DDP_USE_SIDE_STREAM1)bucket_cap_mbparam_to_hook_all_reduce
device_idsmoduleprocess_groupbroadcast_bucket_sizesrcparams_and_buffers_to_ignorebroadcast_buffers)prependwith_kwargs)_AllreduceUpcastHookState"_reducer_allreduce_and_upcast_hook)r   upcast_streampython_reducerztorch.nn.parallel.distributed)qr   r   r   r   r   _log_and_throw
ValueErrorRuntimeErrorr   r  ndimdevice_mesh	get_grouptorch.distributed.device_meshr   get_root_mesh%torch.distributed.tensor.parallel.ddpr   _delay_all_reduce_paramsr;   setr   parameters_to_ignoreaddappendnamed_parameters_module_parametersanyleninforD   is_multi_device_moduletypenextiterdevice_typer  output_devicer   r   dimr
  r  r   require_backward_grad_syncr   gradient_as_bucket_viewr   warningwarningswarnFutureWarningrN   r2   nn	parameterUninitializedParameterintr  bucket_bytes_cap_defaultbucket_bytes_capr   r   get!use_side_stream_for_tensor_copies_delay_grad_buffer_delay_grad_views_delay_all_reduce_all_params_register_delay_all_reduce_hook_build_params_for_reducerr"   r    "_build_debug_param_to_name_mapping_ddp_init_helper_comm_hooksrJ   rA   Stream
_mp_streamr   r   _submodule_to_eventregister_forward_pre_hook_root_copy_hookmodules_module_wait_for_copy_hookAtorch.distributed.algorithms.ddp_comm_hooks.mixed_precision_hooksr  r  weakrefrefregister_comm_hookr    _set_mixed_precision_param_dtyper)   _has_rebuilt_buckets_set_static_graph_lazy_init_ran_accum_grad_hooks_dynamoutilsget_optimize_ddp_mode_use_python_reducer	_inductorconfig_fuse_ddp_communication_fuse_ddp_bucket_sizetrace_rulesLEGACY_MOD_INLINELISTget_legacy_mod_inlinelistcache_clear_register_accum_grad_hookr   )"r   r
  r  r.  r/  r  	init_syncr  r  r   check_reductionr1  r   delay_all_reduce_named_paramsr  r   r  r   	root_meshr   rZ   rI   nr   distinct_device_typesxrF   expect_sparse_gradientparam_to_name_mappingr  r  upcast_hook_stateoptimize_ddpr   s"                                    r7   r    DistributedDataParallel.__init__z  s   & 	$-1-T9:d$D0?
 
 3 [4N  "{':!3!5D !.1$"@QO   +D!,!6!6!6!BDE'55kBI K'
 )0(*%6>??(+F,T,T(UD%(+D%(4<e))--d3--44U;  = //1#
11111 1#

 DD,C,CDDD40011DE## K !c*o&9J 4#:#:;#:a#:;<q@ 	# $(#:#:!
#:aahhMAHHMM#: 	 !
 $%*SShRiijl  %: ;< :!#5(**]##**45Em_ U-AEAXAX.YAXAqxxAX.Y,[[\^ #DO!%DCMN:a0D9:NDO$ *1!2=$!GD!4 7 789@@!2&<#*.'*.''>$.+NN?AUAUV MM?	 ,,E%!3!3!J!JKK## S - &)):%;"  M,0D),1D) #MD$84$? @ JJNN8#>#E 	.
 ;?57,1)t,,-200+)A% 1 
 00 .2-K-K-M*
* 01C1CZP{{"00&*&@&@-1-F-F"&"8"8 !% G G
 S 	"!		
 ;=+)$*>*>L$..<#llnDO'25'9D$ KK11$$e 2 
 ++--/0033! $ 1  0
 !:#KK-#lln! ##!2 LL99$$00 %*!""$#
 9;}}**@@B#/3C#C ##=AEOO"":;HEOO""8 MM%%;;??/ MM%%??KKM**,  $y#
, <!
0 /Z Os*   b4.b4b:b?'b?+cc	c           
        ^ ^ SS K Js  Jm  S[        4UU 4S jjn[	        T R
                  5       HU  u  p#UR                  (       d  M  T R                  R                  UR                  [        R                  " UUS95      5        MW     g )Nr   param_indexc                z  > TR                   (       d  g U R                  c  g TR                  (       a*  TR                   H  u  p#U" X0R                  U 45        M     g U R                  TR                  R	                  5       -  nTR                  USTR                  5      nU R                  R                  U5        g )Nsum)r0  gradrE  r  size
all_reducecopy_)rI   rp  hookstategradientfcolr   s        r7   compiled_accum_grad_hookSDistributedDataParallel._register_accum_grad_hook.<locals>.compiled_accum_grad_hook  s    
 22zz!#'#3#3KDU 34 $4 !::(:(:(?(?(AA??8UD<N<NO

  *r6   )rp  ))torch.distributed._functional_collectivesdistributed_functional_collectivesr9  	enumerater%  rE   rU  r#  "register_post_accumulate_grad_hook	functoolspartial)r   r{  indexrI   rz  s   `   @r7   rb  1DistributedDataParallel._register_accum_grad_hook  sx    @@	+ 	+ 	+& &d&=&=>LE&&""))88%%0$) ?r6   c                     [         R                  " U R                  5      nU R                  R	                  U5        [         R
                  " U R                  U R                  SS9nU$ )NTgroupasync_op)distget_world_sizer  r>  div_ru  )r   rs  
world_size_s       r7   _delayed_all_reduce_hook0DistributedDataParallel._delayed_all_reduce_hook  sT    ((););<
$$Z0OO##4+=+=
 r6   c                 N   Uc  [         R                  " S5      OUS   n[         R                  " [        S U R                   5       5      US9U l        U R                   Vs/ s H  oUR                  5       PM     nn[        R                  " U R                  XaS5        UR                  U R                  5        SnU R                   Hh  nU R
                  XwUR                  5       -    R                  UR                  5      n	U R                  R!                  U	5        XxR                  5       -   nMj     U R"                  R%                  5        HI  u  pUR'                  SS9 H1  u  pUR(                  (       d  M  U
 SU 3nXR*                  ;  d  M0      g    MK     SU l        g s  snf )	Nr   r   c              3   @   #    U  H  oR                  5       v   M     g 7frL   )numelr   s     r7   r^   JDistributedDataParallel._register_delay_all_reduce_hook.<locals>.<genexpr>  s     A#@a		#@s   rD   Frecurser   T)r2   rD   zerosrr  r  r>  detachr  _broadcast_coalescedr  register_hookr  r  viewshaper?  r#  r
  named_modulesr$  rE   r!  r@  )r   r  r  r  rD   r   detached_paramsoffsetrI   	grad_viewmodule_namer
  
param_name	full_names                 r7   rA  7DistributedDataParallel._register_delay_all_reduce_hook  ss    )3(:e$
1"'++A4#@#@AA#
 04/L/LM/L!88:/LM!!$"4"4oVWX 	!..t/L/LM 22E//5;;=:PRWWI "")))4kkm+F 3 $(;;#<#<#>K%+%<%<U%<%K!
&&&#.-q =I (A(AA  &L $? -1)3 Ns   !F"c                    [        S U R                   5       5      (       a  [        R                  R	                  S5        [
        R                  R                  R                  nU R                   H,  nUR                  U/ 5       H  nUR                  5         M     M.     [        R                  " U 5      nSSKJn  U R                  UU" U R                   S95        U R"                  R%                  5         g g )Nc              3   :   #    U  H  n[        US 5      v   M     g7f)_in_backward_optimizersN)r;   r   s     r7   r^   HDistributedDataParallel._setup_in_backward_optimizers.<locals>.<genexpr>  s     V>Uwq344>Us   zddp.optimizer_in_backwardr   )_apply_optim_in_backward_hook)gradient_is_bucket_view)r&  r%  r2   _C_log_api_usage_oncer  optimapply_optimizer_in_backwardparam_to_optim_hook_handle_mapr<  removerN  rO  Ctorch.distributed.algorithms.ddp_comm_hooks.optimizer_overlap_hooksr  rP  r1  r   _set_optimizer_in_backward)r   param_to_handle_mapr   handler   r  s         r7   _setup_in_backward_optimizers5DistributedDataParallel._setup_in_backward_optimizers  s     Vd>U>UVVVHH(()DE
 

66UU   ,,155a<FMMO = - "++d+K ##-,0,H,H LL3359 Wr6   c                 :    U R                   R                  U5        g)a  
Fire the reducer's autograd hook to allreduce params in a Reducer bucket.

Note that this is only used during mixed precision training as the
Reducer's hooks installed during construction time would not be called
as we're working in the low precision parameter setting.
N)r   _autograd_hook)r   idxunuseds      r7   _fire_reducer_autograd_hook3DistributedDataParallel._fire_reducer_autograd_hook  s     	##C(r6   argskwargsreturnc           	      `   [        [        5      U l        U R                     U R                  R                  5        GHD  nUR                  SS9 H  n[        US5      (       a  UR                  (       a  M'  [        UR                  UR                  5       5        [        R                  " 5          UR                  R                  UR                  5        UR                   b>  UR                   R#                  U R$                  R&                  5      UR                   l        SSS5        UR                  Ul        M     [        R(                  " 5       nUR+                  5         U R                  U   R-                  U5        GMG     SSS5        g! , (       d  f       Nt= f! , (       d  f       g= f)a  
For DDP mixed precision, put low precision copies on separate stream and create events to wait for them.

When training with DDP mixed precision, this root pre-forward hook kicks
off low precision copies on a separate stream and creates respective
events to wait for them.
Fr  r9   N)r   r   rH  rG  r
  rK  rF   r;   r9   r   rC   rt  r2   no_gradrv  r=   rs  r<   r   r)   Eventrecordr#  )r   r  r  	submodulerI   
copy_events         r7   rJ  'DistributedDataParallel._root_copy_hook'  s!    $/u#5 __![[002	&11%1@Eun55%:L:L "5??EJJLA--ejj9 !::1.3jjmm $ 4 4 @ @/EJJO ) "'EJ+ A, #[[]
!!#((3:::F3 3 _ ) _s&   BF/A1F	 A%F
FF
F-c                 *    U R                   U   R                  5       nUR                  [        R
                  R                  5       S9  UR                  SS9 H  nUR                  (       a"  [        US5      (       a  UR                  (       a  M8  UR                  U5      nUR                  R                  S   S   nUR                  [        R                   " U R"                  UR$                  5      5      nXx4Ul        M     g! [         a     gf = f)zlBefore carrying out computation, wait on the appropriate event to ensure low precision copies have finished.N)streamFr  r9   r   )rH  popleft
IndexErrorwaitr2   acceleratorcurrent_streamrF   rE   r;   r9   	expand_asgrad_fnnext_functionsr  r  r  r  _idx_ddp_mp_hook_state)	r   r
  r  r  eventr   tmpgrad_accrw  s	            r7   rL  2DistributedDataParallel._module_wait_for_copy_hookO  s    	,,V4<<>E
 	

%++::<
=""5"1A??wq.'A'Aann ++a.C{{11!4Q7H))!!$"B"BAFFKD %-#3A  2  		s   D 
DDc                 ~    U R                   b)  U R                   R                  [        U5       SU 35        U" U5      e)Nz: )r   set_error_and_logstr)r   err_typeerr_msgs      r7   r  &DistributedDataParallel._log_and_throwl  s8    ;;"KK))S]O2gY*GHwr6   c                    USL d  U R                   SL a  [        R                  /nO;U R                  (       a  [        R
                  U R                  /nOU R                  /n[        R                  " UUU5      u  nnU R                  b  [        U5       H  u  pXl
        M     [        R                  " U[        [        U5      5      [        [        U5      5      U R                  UU R                  U R                   U R                  UU R                  (       a  [        R
                  OU R                  5
      U l        [        R"                  " U R                   5      U l        U R                   R'                  U R$                  5        Sn
U R(                  R+                  5        H0  n[-        U[.        R0                  R2                  5      (       d  M.  Sn
  O   U R$                  R5                  U R(                  R6                  R8                  U R:                  c  / OU R:                  U R<                  c  SOU R<                  U R>                  U
U5        U RA                  U R(                  5        g)aa  
DDP init helper function to manage parameters, grad hooks, logging, and SyncBatchNorm.

Initialization helper function that does the following:
(1) bucketing the parameters for reductions
(2) resetting the bucketing states
(3) registering the grad hooks
(4) Logging construction-time DDP logging data
(5) passing a handle of DDP to SyncBatchNorm Layer
TFN)!r   sysmaxsizer:  r  _DEFAULT_FIRST_BUCKET_BYTESr;  "_compute_bucket_assignment_by_sizer   r  r  Reducerrc   reversedr  r1  r   Loggerr   
set_loggerr
  rK  rN   r2   r6  SyncBatchNormset_construction_data_and_logr   r-   r  r.  r  _passing_sync_batchnorm_handle)r   rF   rj  rk  r   bucket_size_limitsbucket_indicesper_bucket_size_limitsir   has_sync_bnr  s               r7   rD  (DistributedDataParallel._ddp_init_helperq  s   H 44#>#>%#G"%++,,44))&"
 '+&;&;%<" 33"
	
" +!*- . ||.)*012" !!''((!
 00 00**)
0 kk$,,/ 	,,,.I)UXX%;%;<<" / 	11KK!!**//)Bt$$,B$2D2D""	
 	++DKK8r6   c                 x    U R                  5         [        R                  " U R                  5      nUS	 US	 US	 U$ )Nr  r   r   )_check_default_groupcopy__dict__)r   attrss     r7   __getstate__$DistributedDataParallel.__getstate__  s<    !!#		$--(/")(Or6   c                   > [        5       U l        [        TU ]  U5        U R                  R                  SS5        U R                  R                  SS5        U R                  5       u  p#U R                  U5      nU R                  UUUU R                  5        U R                  (       aD  U R                  R                  5         U R                  c   eU R                  R                  5         g g )Nr   Tr0  )r   r  r   __setstate__r  
setdefaultrB  rC  rD  r   r   rS  r   )r   rx  rF   rj  rk  r   s        r7   r  $DistributedDataParallel.__setstate__  s    /1U#  !=tD  !=tD-1-K-K-M*
 $ G G
 S"!		
 LL**,;;***KK))+ r6   c                 j   U R                   R                  5        VVVVVs/ s H^  u  pUR                  SS9 VVs/ s H1  u  pEUR                  (       d  M  U SU 3U R                  ;  d  M/  UPM3     snn  H  nX#4PM     M`     nnnnnn[        5       nU VV	s/ s H'  u  pX;  d  M  UR                  U	5      (       a  M$  X4PM)     nnn	U V
Vs/ s H  u  pUPM	     nn
nS nU VV
s/ s H  u  p*U" U5      PM     nnn
U R                  5         X4$ s  snnf s  snnnnnf s  sn	nf s  snn
f s  sn
nf )NFr  r   c                     [        U [        R                  R                  [        R                  R                  45      (       a  U R
                  $ gNF)rN   r2   r6  	EmbeddingEmbeddingBagsparse)r
  s    r7   produces_sparse_gradientSDistributedDataParallel._build_params_for_reducer.<locals>.produces_sparse_gradient  s5    &588#5#5uxx7L7L"MNN}}$r6   )r
  r  r$  rE   r!  r   r"  _assign_modules_buffers)r   r  r
  r7  r  rI   modules_and_parametersmemomr   r  rF   r  rj  s                 r7   rB  1DistributedDataParallel._build_params_for_reducer  sb    (,{{'@'@'B"
'B# *0)@)@)@)O	 *P%J&&  #m1ZL19R9RR 
 *P		 	  'B 	 "
  u
 /	"
 /} %)XXa[ QF.	 	 "
 5KK4JLAi4J
K	 ?U"
>T$V,>T 	 "
 	$$&11O	"
""
 L"
s@   DDD-D3DD#*D#D#D)*D/Dc                    U R                   R                  5        VVs/ s H  u  pXR                  ;  d  M  X!4PM     nnnU VVs/ s H  u  p!UPM	     snnU l        U VVs0 s H  u  p!X_M	     snnU l        gs  snnf s  snnf s  snnf )a`  
Assign self.module.named_buffers to self.modules_buffers.

Assigns module buffers to self.modules_buffers which are then used to
broadcast across ranks when broadcast_buffers=True. Note that this
must be called every time buffers need to be synced because buffers can
be reassigned by user module,
see https://github.com/pytorch/pytorch/issues/63916.
N)r
  named_buffersr!  modules_buffersnamed_module_buffers)r   buffer_namebufferr  s       r7   r  /DistributedDataParallel._assign_modules_buffers,  s     (,{{'@'@'B 
'B#";";; "V!'B 	  
 1E 
0D,F0D 

 >R%
=Q$9VK=Q%
! 

 
%
s   A:A:B "Bc           	          [        [        U5      5       Vs0 s H  o!U   U_M
     nn[        U5      n0 nU R                  R	                  5        Ho  u  pgUR                  SS9 HW  u  pU SU 3n
XR                  ;  d  M  U	R                  (       d  M0  X;  a  U R                  [        SU
 S35        X9   nXU'   MY     Mq     [        U5      [        U5      :w  a/  U R                  [        S[        U5       S[        U5       S35        U$ s  snf )	NFr  r   zParam with name zt found in module parameters, but not DDP parameters. This indicates a bug in DDP, please report an issue to PyTorch.zUExpected param to name mapping to cover all parameters, but got conflicting lengths: z vs zA. This indicates a bug in DDP, please report an issue to PyTorch.)
ranger'  r   r
  r  r$  r!  rE   r  r  )r   rF   r  param_to_param_index	param_setparam_index_to_param_fqnr  r
  r  rI   fqnrp  s               r7   rC  :DistributedDataParallel._build_debug_param_to_name_mappingD  s-   :?J:PQ:PQ1q 0:PQ
O	#% #';;#<#<#>K%+%<%<U%<%K!
$Qzl3 777E<O<O<O-++&.se 4_ _
 #7"=K<?[9 &L $?  y>S!9::114Y0@345 6;; ('=  Rs   Dc              #   x   #    S nU(       a  UR                  5       OU/ H  nU" U5       Sh  vN   M     g N	7f)z(Return a generator of module parameters.c              3      #    [        U S5      (       a  U R                  R                  5       OU R                  SS9nU S h  vN   g  N7f)N_former_parametersFr  )r;   r  rj   rF   )r  pss     r7   model_parametersADistributedDataParallel._get_parameters.<locals>.model_parametersh  sE      1233 $$++-\\%\0 
 MMs   A A
AA
N)rK  )r   r  r  r  mods        r7   _get_parameters'DistributedDataParallel._get_parameterse  s6     	 #*199;s2C',,, 3,s   ,:8
:c                     Sn U R                   [        5       :w  a  SnU(       a  U R                  [        S5        g g ! [         a    Sn N-f = f)NFTzDDP Pickling/Unpickling are only supported when using DDP with the default process group. That is, when you have called init_process_group and have not passed process_group argument to DDP constructor)r  r   r  r  )r   pickle_not_supporteds     r7   r  ,DistributedDataParallel._check_default_groups  s\    $	(!!%7%99'+$  <    	(#' 	(s   = AAc              #   ^   #    U R                   nSU l          Sv   Xl         g! Xl         f = f7f)a  
Context manager to disable gradient synchronizations across DDP processes.

Within this context, gradients will be accumulated on module
variables, which will later be synchronized in the first
forward-backward pass exiting the context.

Example::

    >>> # xdoctest: +SKIP("undefined variables")
    >>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg)
    >>> with ddp.no_sync():
    >>>     for input in inputs:
    >>>         ddp(input).backward()  # no synchronization, accumulate grads
    >>> ddp(another_input).backward()  # synchronize grads

.. warning::
    The forward pass should be included inside the context manager, or
    else gradients will still be synchronized.
FN)r0  )r   old_require_backward_grad_syncs     r7   no_syncDistributedDataParallel.no_sync  s1     , *.)H)H&*/'	M.L+.L+s   -" -*-c                     U R                   $ )zL`TorchDynamo` requires DDP's status and module for cooperative optimization.)r   )clss    r7   _get_active_ddp_module.DistributedDataParallel._get_active_ddp_module  s     %%%r6   )	recursivec              #   b   #    U [         l         S v   S [         l        g ! S [         l        f = f7frL   )r%   r   r   s    r7   _inside_ddp_forward+DistributedDataParallel._inside_ddp_forward  s*      6:2	>9=#6#6s   / /,/c                     U R                   (       a  U R                  " U0 UD6$ U R                  5          U R                  " U0 UD6sS S S 5        $ ! , (       d  f       g = frL   )rY  r
  r.  )r   r   r  s      r7   _run_ddp_forward(DistributedDataParallel._run_ddp_forward  sH    ##;;1&11))+{{F5f5 ,++s   A
Ac                 d   U R                   b  [        S U R                   5       5      n[        U R                  5       HK  u  p#UR                  b  M  U R
                  U   Ul        U(       a  M1  UR                  R                  5         MM     U(       a  U R                   R                  5         g g g )Nc              3   <   #    U  H  oR                   S L v   M     g 7frL   )rs  )r[   rI   s     r7   r^   =DistributedDataParallel._clear_grad_buffer.<locals>.<genexpr>  s      &0Mu

d"0Ms   )r>  allr  r  rs  r?  zero_)r   all_param_grad_noner  rI   s       r7   _clear_grad_buffer*DistributedDataParallel._clear_grad_buffer  s    
 "". #& &040M0M& # !*$*G*G H::%!%!7!7!>EJ..

((*	 !I #''--/ # /r6   c                 2    U R                  5         SU l        g r   )r  rT  r-  s    r7   
_lazy_init"DistributedDataParallel._lazy_init  s     	**,"r6   c           	         U R                   (       a  X4$ U R                  (       d3  [        R                  R	                  5       (       d  U R                  5         U R                  (       a  X4$ [        R                  " 5       (       aT  U R                  (       aC  U R                  c   eU R                  R                  5         U R                  R                  5         [        R                  " U 5      nU(       a%  U R                  R                  X0R                   5        [        R                  " 5       (       a;  U R                  R#                  5       (       a  [        R%                  S5        SU l        U R)                  5       (       a  U R+                  5         U R,                  R.                  (       a  U R1                  SS9  U R2                  (       a  [5        UU[        R6                  " U R8                  U R2                  S   5      U R:                  5      u  pEUS   US   p&U R<                  b%  [?        U R<                  R@                  /UQ70 UD6u  pbXb4$ U R<                  b%  [?        U R<                  R@                  /UQ70 UD6u  pX4$ )Nz4Reducer buckets have been rebuilt in this iteration.TFr   r   )!rY  rT  r2   compileris_compilingr<  r@  is_grad_enabledr0  r   set_runtime_stats_and_logr   prepare_for_forwardr   notify_join_context_set_forward_pass_work_handler   r   r(  rR  _check_sync_bufs_pre_fwd_sync_buffers_join_configenabler   r  r!   rD   r-  r=  r   r   r)   )r   r   r  workmoved_inputsmoved_kwargsr  s          r7   _pre_forward$DistributedDataParallel._pre_forward  s   ##>!""5>>+F+F+H+HOO,,>!  ""t'F'F;;***KK113LL,,. ''-LL6688   ""t||'D'D'F'FKKNO(,D% ((** ##::%:P??)3T--tq/AB66	*&L (?LO&##/3((44    
 < ##/!5((44"" "
 >!r6   c                 d   U R                   (       a  U$ U R                  (       a  U R                  5         U$ U R                  5       (       a  U R	                  5         [
        R                  " 5       (       a  U R                  (       as  SU l        U R                  (       a?  U R                  (       d.  U R                  R                  [        [        U5      5      5        O#U R                  R                  / 5        OSU l        U R                  (       a  U R                  (       a"  U R                  (       a  U R                  (       d  [!        U5      u  nnn[#        [%        U5      5       Vs/ s H  nS PM     nn['        U5       H5  u  pq[
        R(                  " U5      (       d  M"  UR*                  b  M1  XU'   M7     [,        R.                  " [0        R2                  " U 5      /UQ76 n[#        [%        U5      5       H  nXg   b  M
  X   Xg'   M     [5        XcU5      nU R                  5         U$ s  snf )NTF)rY  r@  r9  _check_sync_bufs_post_fwdrG  r2   rA  r0  r   r   r   r   prepare_for_backwardrc   ra   r   rT   r  r'  r  	is_tensorr  r   applyrN  rO  rV   )	r   rP   rR   rS   rQ   r  output_placeholdersr  passthrough_tensor_lists	            r7   _post_forward%DistributedDataParallel._post_forward  s   ##M,,##%M ))++   ""t'F'F.2D+ **43D3D11$}V7L2MN11"5.3D+ ''0A0Ad&Q&Q (/	" $C(:$;<A<<   A
 ''9:	??6**v~~/E-3* ; '/nnD!'#'# 3234&)1-D-G'* 5
 /#~F
 	!;As   *H-c                 R   [         R                  R                  R                  S5         U R                  " U0 UD6u  pU R
                  (       a  U R                  R                  " U0 UD6OU R                  " U0 UD6nU R                  U5      sS S S 5        $ ! , (       d  f       g = f)NzDistributedDataParallel.forward)
r2   autogradprofilerrecord_functionrM  r@  r
  r   r1  rV  )r   r   r  rP   s       r7   r   DistributedDataParallel.forward_  s    ^^$$445VW!..A&ANF 44 ##V6v6**F=f= 
 %%f- XWWs   A$B
B&c                 *    [        XX0R                  S9$ N)r/  )r   r/  )r   r   r  r  s       r7   scatterDistributedDataParallel.scatteri  s    fjhhGGr6   c                 p    [        UU[        R                  " U R                  U5      U R                  5      $ rL   )r!   r2   rD   r-  r=  )r   r   r  	device_ids       r7   	to_kwargs!DistributedDataParallel.to_kwargsl  s2    LL))9522	
 	
r6   c                 *    [        XU R                  S9$ r^  )r   r/  )r   outputsr.  s      r7   r   DistributedDataParallel.gatheru  s    g$((;;r6   c                 &   > [         TU ]  U5        U $ rL   )r   train)r   moder   s     r7   ri  DistributedDataParallel.trainx  s    dr6   c                 H   U(       d1  U R                   (       a   [        R                  " SU R                  S9nO[        R                  " SU R                  S9n[
        R                  " X R                  SS9nU(       a%  UR                  5         UR                  5       S:g  nU$ g )Nr   r  Tr  r   )
r0  r2   onesrD   r  r  ru  r  r  item)r   r   requires_sync_tensorrJ  r   s        r7   r   ADistributedDataParallel._check_global_requires_backward_grad_sync~  s{    $"A"A#(::a#D #(;;q#E  (:(:T
 IIK$8$=$=$?1$D!((r6   c                     U R                  5       (       a.  U R                  U R                  S5      nU R                  U5        g g r  )rF  _find_common_rank_distributed_rank_sync_module_buffersr   authoritative_ranks     r7   r   6DistributedDataParallel._check_and_sync_module_buffers  s?    ((**!%!7!78N8NPU!V%%&89 +r6   c           	          U R                  U R                  U5      U l        [        U R                  U R
                  U R                  U R                  U R                  U R                  S9  g )Nr	  )	rr  rs  _authoritative_rankr    r
  r  r  r!  r  r   s     r7   r   )DistributedDataParallel._sync_final_model  s`     $(#9#9""N$
  	;;,,"&"<"<(()-)B)B"44	
r6   c                     / nU R                   R                  5       nU H/  nU R                   R                  U5      nUR                  U5        M1     U H  nUR	                  5         M     g rL   )r   _get_zeros_like_grad_buckets_run_comm_hookr#  r  )r   	comm_workgrad_bucketsgrad_bucketrJ  s        r7   r   6DistributedDataParallel._match_all_reduce_for_bwd_pass  s^    	 ||@@B'K
 <<..{;DT" ( DIIK r6   c                 n    U R                   R                  5       nU R                  R                  U5        g rL   )r   _get_local_used_mapr  	allreduce)r   locally_used_param_maps     r7   r   6DistributedDataParallel._match_unused_params_allreduce  s*    !%!A!A!C$$%;<r6   r   rI  throw_on_early_terminationc                     [        U /UUUS9$ )aA  
Context manager for training with uneven inputs across processes in DDP.

This context manager will keep track of already-joined DDP processes,
and "shadow" the forward and backward passes by inserting collective
communication operations to match with the ones created by non-joined
DDP processes. This will ensure each collective call has a corresponding
call by already-joined DDP processes, preventing hangs or errors that
would otherwise happen when training with uneven inputs across
processes. Alternatively, if the flag ``throw_on_early_termination`` is
specified to be ``True``, all trainers will throw an error once one rank
runs out of inputs, allowing these errors to be caught and handled
according to application logic.

Once all DDP processes have joined, the context manager will broadcast
the model corresponding to the last joined process to all processes to
ensure the model is the same across all processes
(which is guaranteed by DDP).

To use this to enable training with uneven inputs across processes,
simply wrap this context manager around your training loop. No further
modifications to the model or data loading is required.

.. warning::
    If the model or training loop this context manager is wrapped around
    has additional distributed collective operations, such as
    ``SyncBatchNorm`` in the model's forward pass, then the flag
    ``throw_on_early_termination`` must be enabled. This is because this
    context manager is not aware of non-DDP collective communication.
    This flag will cause all ranks to throw when any one rank
    exhausts inputs, allowing these errors to be caught and recovered
    from across all ranks.

Args:
    divide_by_initial_world_size (bool): If ``True``, will divide
        gradients by the initial ``world_size`` DDP training was launched
        with. If ``False``, will compute the effective world size
        (number of ranks that have not depleted their inputs yet) and
        divide gradients by that during allreduce. Set
        ``divide_by_initial_world_size=True`` to ensure every input
        sample including the uneven inputs have equal weight in terms of
        how much they contribute to the global gradient. This is
        achieved by always dividing the gradient by the initial
        ``world_size`` even when we encounter uneven inputs. If you set
        this to ``False``, we divide the gradient by the remaining
        number of nodes. This ensures parity with training on a smaller
        ``world_size`` although it also means the uneven inputs would
        contribute more towards the global gradient. Typically, you
        would want to set this to ``True`` for cases where the last few
        inputs of your training job are uneven. In extreme cases, where
        there is a large discrepancy in the number of inputs, setting
        this to ``False`` might provide better results.
    enable (bool): Whether to enable uneven input detection or not. Pass
        in ``enable=False`` to disable in cases where you know that
        inputs are even across participating processes. Default is
        ``True``.
    throw_on_early_termination (bool): Whether to throw an error
        or continue training when at least one rank has exhausted
        inputs. If ``True``, will throw upon the first rank reaching end
        of data. If ``False``, will continue training with a smaller
        effective world size until all ranks are joined. Note that if
        this flag is specified, then the flag
        ``divide_by_initial_world_size`` would be ignored. Default
        is ``False``.


Example::

    >>> # xdoctest: +SKIP("Distributed")
    >>> import torch
    >>> import torch.distributed as dist
    >>> import os
    >>> import torch.multiprocessing as mp
    >>> import torch.nn as nn
    >>> # On each spawned worker
    >>> def worker(rank):
    >>>     dist.init_process_group("nccl", rank=rank, world_size=2)
    >>>     torch.cuda.set_device(rank)
    >>>     model = nn.Linear(1, 1, bias=False).to(rank)
    >>>     model = torch.nn.parallel.DistributedDataParallel(
    >>>         model, device_ids=[rank], output_device=rank
    >>>     )
    >>>     # Rank 1 gets one more input than rank 0.
    >>>     inputs = [torch.tensor([1]).float() for _ in range(10 + rank)]
    >>>     with model.join():
    >>>         for _ in range(5):
    >>>             for inp in inputs:
    >>>                 loss = model(inp).sum()
    >>>                 loss.backward()
    >>>     # Without the join() API, the below synchronization will hang
    >>>     # blocking for rank 1's allreduce to complete.
    >>>     torch.cuda.synchronize(device=rank)
r   )r   )r   r   rI  r  s       r7   joinDistributedDataParallel.join  s     F F&)E	
 	
r6   c                 8    UR                  SS5      n[        XS9$ )a  
DDP join hook enables training on uneven inputs by mirroring communications in forward and backward passes.

Arguments:
    kwargs (dict): a :class:`dict` containing any keyword arguments
        to modify the behavior of the join hook at run time; all
        :class:`Joinable` instances sharing the same join context
        manager are forwarded the same value for ``kwargs``.

The hook supports the following keyword arguments:
    divide_by_initial_world_size (bool, optional):
        If ``True``, then gradients are divided by the initial world
        size that DDP was launched with.
        If ``False``, then gradients are divided by the effective world
        size (i.e. the number of non-joined processes), meaning that
        the uneven inputs contribute more toward the global gradient.
        Typically, this should be set to ``True`` if the degree of
        unevenness is small but can be set to ``False`` in extreme
        cases for possibly better results.
        Default is ``True``.
r   Tr  )r<  r   )r   r  r   s      r7   	join_hook!DistributedDataParallel.join_hook2  s'    2 (.zz2PRV'W$
 	
r6   c                     U R                   $ rL   r  r-  s    r7   join_device#DistributedDataParallel.join_deviceP  s    {{r6   c                     U R                   $ rL   )r  r-  s    r7   join_process_group*DistributedDataParallel.join_process_groupT  s    !!!r6   rw  c                 H    [        U5      (       d   e[        UUUS9U l        g)a  
Allow custom registration of hooks that define how buffer are synchronized across ranks.

The hook takes in an optional state and is passed in a Dict[str, Tensor]
corresponding to buffer names and the buffers, and can run arbitrary reductions
on buffers as opposed to DDP's default broadcast from rank 0. This is useful for
example if a counter needs to be summed or averaged across ranks every iteration.

Args:
    state (Any): Optional state that is passed to the hook.
    hook (Callable): Callable with the following signature:
                 ``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]``
    comm_hook_location (_BufferCommHookLocation): Enum value indicating
                    where to run the hook.
                    _BufferCommHookLocation.PRE_FORWARD means that the
                    hook will run _before_ the forward pass, and
                    _BufferCommHookLocation.POST_FORWARD means that the
                    hook will run _after_ the forward pass.

    NOTE: To maximize performance, users can return a
        List[torch.futures.Future] from their hook, and DDP will
        install and await these hooks appropriately at the end of
        the backward pass. This will ensure all buffers are
        synchronized by the end of the backward pass. If this
        setting is used, it is recommended to pass
        comm_hook_location=_BufferCommHookLocation.POST_FORWARD,
        which will trigger the hook after the forward pass.
        If _BufferCommHookLocation.PRE_FORWARD is used, users must
        ensure appropriate synchronization when manipulating GPU
        buffers in the forward pass.
)r   r   r   N)callabler   buffer_hook)r   rx  rw  comm_hook_locations       r7   _register_buffer_comm_hook2DistributedDataParallel._register_buffer_comm_hookX  s)    J ~~~*!#(&8
r6   rx  c                    U R                  U5        U R                  c   eU R                  R                  UR                  5        U R                  R                  X!45        [        R                  " U R                  X5        g)a  
Register communication hook for user-defined DDP aggregation of gradients across multiple workers.

This hook would be very useful for researchers to try out new ideas. For
example, this hook can be used to implement several algorithms like GossipGrad
and gradient compression which involve different communication strategies for
parameter syncs while running Distributed DataParallel training.

Args:
    state (object): Passed to the hook to maintain any state information during the training process.
                    Examples include error feedback in gradient compression,
                    peers to communicate with next in GossipGrad, etc.

                    It is locally stored by each worker
                    and shared by all the gradient tensors on the worker.
    hook (Callable): Callable with the following signature:
                     ``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]``:

                     This function is called once the bucket is ready. The
                     hook can perform whatever processing is needed and return
                     a Future indicating completion of any async work (ex: allreduce).
                     If the hook doesn't perform any communication, it still
                     must return a completed Future. The Future should hold the
                     new value of grad bucket's tensors. Once a bucket is ready,
                     c10d reducer would call this hook and use the tensors returned
                     by the Future and copy grads to individual parameters.
                     Note that the future's return type must be a single tensor.

                     We also provide an API called ``get_future`` to retrieve a
                     Future associated with the completion of ``c10d.ProcessGroup.Work``.
                     ``get_future`` is currently supported for NCCL and also supported for most
                     operations on GLOO and MPI, except for peer to peer operations (send/recv).

.. warning ::
    Grad bucket's tensors will not be predivided by world_size. User is responsible
    to divide by the world_size in case of operations like allreduce.

.. warning ::
    DDP communication hook can only be registered once and should be registered
    before calling backward.

.. warning ::
    The Future object that hook returns should contain a single tensor
    that has the same shape with the tensors inside grad bucket.

.. warning ::
    ``get_future`` API supports NCCL, and partially GLOO and MPI backends (no support
    for peer-to-peer operations like send/recv) and will return a ``torch.futures.Future``.

Example::
    Below is an example of a noop hook that returns the same tensor.

    >>> # xdoctest: +SKIP('undefined name')
    >>> def noop(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:
    >>>     fut = torch.futures.Future()
    >>>     fut.set_result(bucket.buffer())
    >>>     return fut
    >>> ddp.register_comm_hook(state=None, hook=noop)

Example::
    Below is an example of a Parallel SGD algorithm where gradients are encoded before
    allreduce, and then decoded after allreduce.

    >>> # xdoctest: +SKIP('undefined name')
    >>> def encode_and_decode(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]:
    >>>     encoded_tensor = encode(bucket.buffer())  # encode gradients
    >>>     fut = torch.distributed.all_reduce(encoded_tensor).get_future()
    >>>     # Define the then callback to decode.
    >>>     def decode(fut):
    >>>         decoded_tensor = decode(fut.value()[0])  # decode gradients
    >>>         return decoded_tensor
    >>>     return fut.then(decode)
    >>> ddp.register_comm_hook(state=None, hook=encode_and_decode)
N)	_check_comm_hookr   _set_comm_hook_namer/   rE  r#  r  _register_comm_hookr   )r   rx  rw  s      r7   rP  *DistributedDataParallel.register_comm_hook  se    V 	d#{{&&&''(9(9:.  u;r6   c                     U R                   c   eU R                   R                  [        U5      5        [        R                  " U R
                  U5        g)aL  
Register a built-in communication hook that specifies how DDP aggregates gradients across multiple workers.

The built-in hooks aim to provide efficient C++ implementations for certain hooks,
which might not be as efficient if implemented in Python using a Python communication hook.

Args:
    comm_hook_type (dist.BuiltinCommHookType): type of communication hook, such as ALLREDUCE, FP16_COMPRESS, etc.

.. warning ::
    DDP communication hook can only be registered once and should be registered
    before calling backward.

Example::
    Below is an example of a FP16 compression where gradients are
    compressed into 16-bit floating-point numbers before allreduce, and
    then decompressed after allreduce.

    >>> # xdoctest: +SKIP('undefined name')
    >>> ddp._register_builtin_comm_hook(dist.BuiltinCommHookType.FP16_COMPRESS)

N)r   r  r  r  _register_builtin_comm_hookr   )r   comm_hook_types     r7   r  3DistributedDataParallel._register_builtin_comm_hook  s?    . {{&&&''N(;<((~Fr6   )optim_paramsr  c                    SSK Jn  U" X/UQ70 UD6n UR                  U 5        g! [         a  n[	        U SU S35      UeSnAff = f)a+
  
Register an optimizer in DDP to optimize parameter immediately after its gradient reduction.

Registers an optimizer with DDP such that the optimization for a
parameter will run immediately when that parameter's gradient is
finished with reduction, instead of waiting for all parameters'
gradients to finish reduction. This can result in a training speedup
depending on your workload since the optimizer can run while gradient
reduction for other parameters are still ongoing. In addition, this has
the potential to reduce peak memory consumption during training, as it
only needs to load the per-parameter optimizer states of a single
parameter at a time, instead of loading all per-parameter optimizer
states at once.

Args:
    optim (Type): a ``torch.optim.Optimizer`` class to be registered
    as a fused optimizer.
    *args (Sequence[Any]): Arguments to forward to `optim`.
    optim_params (Optional[Iterable[torch.Tensor]]): Set of parameters
    to optimize, similar to `params` argument of traditional `torch.optim`
    Optimizers. If this is omitted, all DDP model parameters will be
    optimized.
    **kwargs: (Dict[str, Any]): Keyword arguments to forward to `optim`.

.. warning ::
    _register_fused_optim should only be called once on a DDP instance,
    and registering multiple fused optimizers for the same DDP model
    is not currently supported. Please ping
    https://github.com/pytorch/pytorch/issues/71595 if this is necessary
    for your use case.

.. warning ::
    _register_fused_optim and register_comm_hook currently do not
    compose together, meaning that custom DDP communication hooks are
    not supported with overlapped optimizers. Please ping
    https://github.com/pytorch/pytorch/issues/71595 if this is necessary
    for your use case.

.. warning ::
    Gradient accumulation and DDP `no_sync` are currently not supported
    with overlapped optimizer. Please ping
    https://github.com/pytorch/pytorch/issues/71595 if this is necessary
    for your use case.

Example::

    >>> # xdoctest: +SKIP("No rendezvous handler")
    >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...')
    >>> net = torch.nn.parallel.DistributedDataParallel(model, pg)
    >>> lr = 1e-2
    >>> betas = (0.9, 0.99)
    >>> eps = 1e-6
    >>> net._register_fused_optim(torch.optim.Adam, lr, betas=betas, eps=eps)
    >>> # Example with subset of parameters
    >>> params_to_opt = [list(net.parameters())[0]]
    >>> net._register_fused_optim(
    ...   torch.optim.Adam, lr, optim_params=params_to_opt,  betas=betas, eps=eps
    ... )
r   )_as_overlapped_optimz] does not support overlapped DDP. Please file an issue to PyTorch or the respective owner of r   N)/torch.distributed.algorithms._optimizer_overlapr  register_ddpNotImplementedErrorr  )r   r  r  r  r  r  overlapped_optimes           r7   _register_fused_optim-DistributedDataParallel._register_fused_optim  se    | 	Y/UdUfU	))$/" 	'vw|v}}~	s   & 
AAAc                 H    [         R                  " U R                  XU5        g rL   )r  r  r  )r   tensorsbuffer_sizerv  s       r7    _distributed_broadcast_coalesced8DistributedDataParallel._distributed_broadcast_coalesced8  s      	!!6H	
r6   c                     U R                  5       =(       a:    [        U S5      =(       a'    U R                  R                  [        R
                  :H  $ Nr  )will_sync_module_buffersr;   r  r   r   r   r-  s    r7   rP  1DistributedDataParallel._check_sync_bufs_post_fwd?  sD    ))+ 4m,4  ::&334	
r6   c                     U R                  5       =(       a?    [        U S5      (       + =(       d'    U R                  R                  [        R
                  :H  $ r  )r  r;   r  r   r   r   r-  s    r7   rF  0DistributedDataParallel._check_sync_bufs_pre_fwdG  sG    ,,. 
m,, 399&223	
r6   c                 ~    U R                   =(       a+    U R                  =(       a    [        U R                  5      S:  $ )Nr   )r   r  r'  r  r-  s    r7   r  0DistributedDataParallel.will_sync_module_buffersN  s6    ++ .&&.D(()A-	
r6   c                 $   [         R                  " U(       a  UOS/U R                  S9n[        R                  " U[
        R                  U R                  S9  UR                  5       S:X  a  U R                  [        S5        UR                  5       $ )Nr  r  )opr  zuBUG! Expected rank_cond to be true for at least one process. This indicates a bug in PyTorch, please report an issue.)r2   tensorrD   r  ru  r   MAXr  rn  r  r  )r   
input_rank	rank_condrank_to_uses       r7   rr  )DistributedDataParallel._find_common_rankU  sx     ll$Z"-;;
 	D<N<NO#L
 !!r6   c                    [         R                  " 5          U R                  R                  (       a  U R	                  U R
                  S5      nOSnU R                  5         U R                  U5        S S S 5        g ! , (       d  f       g = f)NTr   )r2   r  rH  rI  rr  rs  r  rt  ru  s     r7   rG  %DistributedDataParallel._sync_bufferse  sg    ]]_   ''%)%;%;**D&"
 &'" ((*%%&89! __s   AA;;
B	c                     [        U S5      (       d  U R                  US9  g U R                  R                  nU R                  R                  nU" X0R
                  5      nUb  U R                  R                  U5        g g )Nr  )rv  )r;   _default_broadcast_coalescedr  r   r   r  r   _install_post_backward_futures)r   rv  rw  rx  futss        r7   rt  ,DistributedDataParallel._sync_module_buffersx  sp    t]++--AS-T##44D$$;;E889D;;DA  r6   c                 d    Uc  U R                   nUc  U R                  nU R                  XU5        g)z
Broadcasts buffers from rank 0 to rest of workers.

If bufs, bucket_size are None, default values self.modules_buffers
and self.broadcast_bucket_size are used instead.
N)r  r  r  )r   bufsbucket_sizerv  s       r7   r  4DistributedDataParallel._default_broadcast_coalesced  s6     <''D44K--dASTr6   c                     UR                  5        H`  n[        U[        R                  R                   R                  5      (       d  M8  U R
                  S:X  d  MJ  U R                  [        S5        Mb     g )Nr   z/SyncBatchNorm layers only work with GPU modules)rK  rN   r2   r6  r  r-  r  r  )r   r
  layers      r7   r  6DistributedDataParallel._passing_sync_batchnorm_handle  sR    ^^%E%!1!1!?!?@@##u,''"I &r6   c                    [        U5      (       d  U R                  [        S5        [        R                  " U5      nUR
                  S   R                  [        R                  :w  aA  UR
                  S   R                  [        R                  :w  a  U R                  [        S5        UR                  [        R                  :w  aO  UR                  [        R                  R                  [        R                     :w  a  U R                  [        S5        UR                   S;   Ga&  [        R"                  R$                  S L=(       d    [        R"                  R&                  S Ln[        R(                  " 5       =(       aG    [        R*                  " 5       =(       a+    [        R$                  R,                  R#                  5       S:  n[        R(                  " 5       =(       a:    [        R.                  " 5       =(       a    [        R0                  R)                  5       nU(       a  U(       d  U(       d  U R                  [        S5        g g g g )Nz$Communication hook must be callable.bucketz@Communication hook: bucket annotation should be dist.GradBucket.zSCommunication hook: return annotation should be torch.futures.Future[torch.Tensor].)bf16_compress_hookbf16_compress_wrapper_hook)r  
   zSBF16 all reduce communication hook required CUDA 11+ and NCCL 2.10+ or XPU and XCCL)r  r  	TypeErrorinspect	signaturerF   
annotation_emptyr  
GradBucketr  return_annotationr2   futuresFuturerb   r-   versioncudahipis_availableis_nccl_availablencclis_xccl_availablexpu)r   rw  sigcuda_supportednccl_supportedxpu_xccl_supporteds         r7   r  (DistributedDataParallel._check_comm_hook  s   ~~	+QR%NN8$//7>>Ax(33tFR !!W^^3%%)=)=ell)KKe
 ==PP""$./""$.  !!# 9**,9JJOO++-8  !!# -**,-II**,  $;M##i <N Qr6   c                 B    [         R                  " U R                  5      $ rL   )r  get_rankr  r-  s    r7   rs  )DistributedDataParallel._distributed_rank  s    }}T//00r6   c              #      #    U(       d  U R                  5       OU R                  5        H  n[        US5      (       a  M  Uv   M     g7f)z=Return a generator of parameters managed by a given DDP unit.r9   N)rF   r$  r;   )r
  named_paramsrI   s      r7   _get_data_parallel_params1DistributedDataParallel._get_data_parallel_params  sB      (4F9P9P9RR  5.11 Ss   :A	 	A	c                     Xl         U R                  5        H  u  p#X!;   d  M  SUl        M     U R                  5        H  u  p$X!;   d  M  SUl        M     g)a  
Set parameters and buffers to be ignored by DDP.

Expected format for parameters is the fully qualified name: {module_name}.{param_name}, and
similarly, {module_name}.{buffer_name} for buffers. For example:
params_to_ignore = []
# NB: model here is vanilla PyTorch module, not yet wrapped with DDP.
for module_name, module in model.named_modules():
    for param_name, param in module.named_parameters(recurse=False):
        if should_ignore(param):
            # Create expected format
            fqn = f"{module_name}.{param_name}"
            params_to_ignore.append(fqn)
torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
    model,
    params_to_ignore
)
TN)r   r$  r9   r
  )r
  r  rZ   rI   r  s        r7   +_set_params_and_buffers_to_ignore_for_modelCDistributedDataParallel._set_params_and_buffers_to_ignore_for_model  sS    2 4P0!224KD3%)" 5 #002LD3&*# 3r6   c                     U R                   c   eU R                   R                  5       n0 UR                  EUR                  E$ )a  
Return a dictionary of logging data for debugging and analysis.

This interface can be called after DistributedDataParallel() is
constructed. It returns a dictionary of logging data. It could help
for debugging and analysis. The logging data includes DistributedDataParallel
constructor input parameters, some internal states of DistributedDataParallel
and performance metrics. Simply print the dictionary and see what
these metrics are.
This is a prototype interface and subject to change in the future.
)r   _get_ddp_logging_datastrs_mapints_map)r   ddp_logging_datas     r7   r  -DistributedDataParallel._get_ddp_logging_data  sE     {{&&&;;<<>I"++I/?/H/HIIr6   c                 r    US:  a  U R                  [        S5        U R                  R                  U5        g)a  
Set sample_rate of collecting runtime stats.

This interface allows users to set sample_rate of collecting
runtime stats. The runtime stats will be recorded for the
first 10 iterations, after 10 iterations runtime stats will be
recorded once every "sample_rate" training iterations. In
default, runtime stats are recorded for the first 10 iterations,
after 10 iterations runtime stats are recorded once every
"kDDPRuntimeLoggingSampleRate=100" training iterations.
This is a prototype interface and subject to change in the future.
r   zADDP runtime logging sample rate should be equal or greater than 1N)r  r  r   $_set_ddp_runtime_logging_sample_rate)r   sample_rates     r7   r  <DistributedDataParallel._set_ddp_runtime_logging_sample_rate	  s3     ?S 	99+Fr6   c                 F   U R                   (       a  [        R                  " S5        gSU l         SU l        U R                  R                  5         U R                  c   eU R                  R                  5         U R                  (       a  [        R                  " S5        gg)z
Set static graph for DDP.

It is recommended to set static graph in the DDP constructor, which will
call this private API internally.
z<You've set static_graph to be True, no need to set it again.NTFa'  You passed find_unused_parameters=true to DistributedDataParallel, `_set_static_graph` will detect unused parameters automatically, so you do not need to set find_unused_parameters=true, just be sure these unused parameters will not change during training loop while calling `_set_static_graph`.)r   r3  r4  r   r   rS  r   r   r-  s    r7   rS  )DistributedDataParallel._set_static_graph	  s     MMN  6;3&&({{&&&%%'&&MM' 'r6   c                 8    U R                   R                  5         g)zHRemove autograd hooks registered by the reducer on the model parameters.N)r   _remove_autograd_hooksr-  s    r7   r  .DistributedDataParallel._remove_autograd_hooks4	  s    ++-r6   c                 8    U R                   R                  5         g)a  
Check if the reducer has processed all buckets and finalized the backward appropriately.

It is useful to call this method after calling .backward() in your training loop
in order to avoid subsequent hard to debug errors down the road due to the
reducer not finalizing backward.
N)r   _check_reducer_finalizedr-  s    r7   r  0DistributedDataParallel._check_reducer_finalized8	  s     	--/r6   c                 :    U R                   R                  U5        g rL   )r   _set_sparse_metadata)r   global_unique_idss     r7   r	  ,DistributedDataParallel._set_sparse_metadataB	  s    ))*;<r6   c                     SU l         U R                  R                  5         [        U5      (       d"  Xl        U R                  R                  U5        gg)a  
Dynamically updates the process group for DDP so that we can shrink/expand DDP
world size without having to reinitialize DDP.

NOTE: If you are using custom communications hooks via, register_comm_hook,
you need to update the process groups for those hooks separately.
FN)rR  r   _reset_stater   r  _update_process_group)r   new_process_groups     r7   r  -DistributedDataParallel._update_process_groupE	  sF     %*!!!#!"344!2LL../@A 5r6   valc                     Xl         g)a~  
Sets whether or not DDPSink should clone the output tensors or not.
The default is True since if the loss is modified in place we run
into the view is modified in-place error.

Although, cloning the tensors can add significant memory and
performance hit if the number and size of tensors are large. As
a result, this can be set to False if you are not modifying the
loss in place.
N)r   )r   r  s     r7   _set_ddp_sink_clone+DistributedDataParallel._set_ddp_sink_cloneX	  s
      #r6   ))rU  ry  rE  r   r@  r  r>  r?  rR  rT  r%  rG  r   rH  rY  r  r  r;  r:  r  rD   r  r  r-  r/  r   r1  r)  r   r   r
  r  r  r.  r!  r  r   r0  r   r   r=  )NNr   TTNNFFFFNNNN)T)TTF)r   )NNr   )F)Xr-   r.   r/   r0   r1   r   r   r4   r'   r   rb  r  rA  r  r  r   rJ  rL  r  rD  r  r  rB  r  rC  r  r  r   r%  classmethodr)  r2   _disable_dynamor.  r1  r9  r<  rM  rV  r   r_  rc  r   ri  r   r   r   r   r   r   r  r  propertyr  r  r   r   r   r  objectrP  r  r*  r  r  rP  rF  r  rr  rG  rt  r  r  r  rs  r   r  r  r  r  rS  r  r  r	  r  r  r5   r   r   s   @r7   r%   r%   F  s   nb	 ?C!:;B
 $ %&*!%59#d$  "/2!d$ d$L	 D'1R'6R)&GS &GC &GD &GP4 4 	4
 
4: 
n9`,*,2\
0(B-$ M M8 & & 
U+> , >60*#C"JCJ.H
<0:
$*= .2+0	h
&*h
 h
 %)	h
T
<   " " 3??	*
 *
XO< O<h O<bG6 FJ F4 FR 89



" :&B ?@U *X 1 1   + +@J G(6.0=B&#t # #r6   )Qr  r  r  re   loggingr   r  r3  rN  collectionsr   r   
contextlibr   dataclassesr   r   r   enumr	   r
   typingr   r   r   r   r2   torch.distributedr~  r  torch._utilsr   torch.autogradr   r   !torch.distributed.algorithms.joinr   r   r   torch.nn.modulesr    torch.nn.parallel.scatter_gatherr   r   torch.utils._pytreer   r   rM   r  "torch.distributed.distributed_c10dr   r   r   torch.distributed.utilsr   r   r   r    r!   r"   rpctorch.distributed.rpcr#   torch.utils.hooksr$   __all__	getLoggerr-   r   r'   rA   rJ   rT   rV   ra   r   r   r   r   r   r%   r,   r6   r7   <module>r-     s@        	 
   * % 7 7  9 9    * - F F # C <  
  88M*1 %
%			8	$ #/ #/ #/TE)(8,7td 
 7 7 7%x %B138 13h] #fh ] #r6   