
    [ThJ                        S r SSKJr  SSKJr  SSKJrJr  SSKJ	r	  SSK
r
SSKJr  / SQr " S	 S
\5      r " S S\5      r\ " S S5      5       r\ " S S5      5       r " S S\5      r\ " S S5      5       r\ " S S\5      5       r\ " S S\5      5       r\ " S S\5      5       r\ " S S5      5       r\ " S S\5      5       r\ " S S \5      5       r\ " S! S"\5      5       r\ " S# S$5      5       rg)%za
This file includes public APIs for FSDP such as the classes used for the
constructor arguments.
    )Sequence)	dataclass)autoEnum)OptionalN)
_BatchNorm)ShardingStrategyBackwardPrefetchMixedPrecision
CPUOffloadStateDictTypeStateDictConfigFullStateDictConfigLocalStateDictConfigShardedStateDictConfigOptimStateDictConfigFullOptimStateDictConfigLocalOptimStateDictConfigShardedOptimStateDictConfigStateDictSettingsc                   ^    \ rS rSrSr\" 5       r\" 5       r\" 5       r\" 5       r	\" 5       r
Srg)r	   !   a  
This specifies the sharding strategy to be used for distributed training by
:class:`FullyShardedDataParallel`.

- ``FULL_SHARD``: Parameters, gradients, and optimizer states are sharded.
  For the parameters, this strategy unshards (via all-gather) before the
  forward, reshards after the forward, unshards before the backward
  computation, and reshards after the backward computation. For gradients,
  it synchronizes and shards them (via reduce-scatter) after the backward
  computation. The sharded optimizer states are updated locally per rank.
- ``SHARD_GRAD_OP``: Gradients and optimizer states are sharded during
  computation, and additionally, parameters are sharded outside
  computation. For the parameters, this strategy unshards before the
  forward, does not reshard them after the forward, and only reshards them
  after the backward computation. The sharded optimizer states are updated
  locally per rank. Inside ``no_sync()``, the parameters are not resharded
  after the backward computation.
- ``NO_SHARD``: Parameters, gradients, and optimizer states are not sharded
  but instead replicated across ranks similar to PyTorch's
  :class:`DistributedDataParallel` API. For gradients, this strategy
  synchronizes them (via all-reduce) after the backward computation. The
  unsharded optimizer states are updated locally per rank.
- ``HYBRID_SHARD``: Apply ``FULL_SHARD`` within a node, and replicate parameters across
  nodes. This results in reduced communication volume as expensive all-gathers and
  reduce-scatters are only done within a node, which can be more performant for medium
  -sized models.
- ``_HYBRID_SHARD_ZERO2``: Apply ``SHARD_GRAD_OP`` within a node, and replicate parameters across
  nodes. This is like ``HYBRID_SHARD``, except this may provide even higher throughput
  since the unsharded parameters are not freed after the forward pass, saving the
  all-gathers in the pre-backward.
 N)__name__
__module____qualname____firstlineno____doc__r   
FULL_SHARDSHARD_GRAD_OPNO_SHARDHYBRID_SHARD_HYBRID_SHARD_ZERO2__static_attributes__r       R/var/www/auris/envauris/lib/python3.13/site-packages/torch/distributed/fsdp/api.pyr	   r	   !   s.    @ JFMvH6L&r%   r	   c                   4    \ rS rSrSr\" 5       r\" 5       rSrg)r
   I   a  
This configures explicit backward prefetching, which improves throughput by
enabling communication and computation overlap in the backward pass at the
cost of slightly increased memory usage.

- ``BACKWARD_PRE``: This enables the most overlap but increases memory
  usage the most. This prefetches the next set of parameters *before* the
  current set of parameters' gradient computation. This overlaps the *next
  all-gather* and the *current gradient computation*, and at the peak, it
  holds the current set of parameters, next set of parameters, and current
  set of gradients in memory.
- ``BACKWARD_POST``: This enables less overlap but requires less memory
  usage. This prefetches the next set of parameters *after* the current
  set of parameters' gradient computation. This overlaps the *current
  reduce-scatter* and the *next gradient computation*, and it frees the
  current set of parameters before allocating memory for the next set of
  parameters, only holding the next set of parameters and current set of
  gradients in memory at the peak.
- FSDP's ``backward_prefetch`` argument accepts ``None``, which disables
  the backward prefetching altogether. This has no overlap and does not
  increase memory usage. In general, we do not recommend this setting since
  it may degrade throughput significantly.

For more technical context: For a single process group using NCCL backend,
any collectives, even if issued from different streams, contend for the
same per-device NCCL stream, which implies that the relative order in which
the collectives are issued matters for overlapping. The two backward
prefetching values correspond to different issue orders.
r   N)	r   r   r   r   r   r   BACKWARD_PREBACKWARD_POSTr$   r   r%   r&   r
   r
   I   s    D 6LFMr%   r
   c                       \ rS rSr% SrSr\\R                     \	S'   Sr
\\R                     \	S'   Sr\\R                     \	S'   Sr\\	S'   Sr\\	S	'   S
r\\	S'   \4r\\\R(                  R*                        \	S'   Srg)r   p   a  
This configures FSDP-native mixed precision training.

Attributes:
    param_dtype (Optional[torch.dtype]): This specifies the dtype for model
        parameters during forward and backward and thus the dtype for
        forward and backward computation. Outside forward and backward, the
        *sharded* parameters are kept in full precision (e.g. for the
        optimizer step), and for model checkpointing, the parameters are
        always saved in full precision. (Default: ``None``)
    reduce_dtype (Optional[torch.dtype]): This specifies the dtype for
        gradient reduction (i.e. reduce-scatter or all-reduce). If this is
        ``None`` but ``param_dtype`` is not ``None``, then this takes on
        the ``param_dtype`` value, still running gradient reduction in low
        precision. This is permitted to differ from ``param_dtype``, e.g.
        to force gradient reduction to run in full precision. (Default:
        ``None``)
    buffer_dtype (Optional[torch.dtype]): This specifies the dtype for
        buffers. FSDP does not shard buffers. Rather, FSDP casts them to
        ``buffer_dtype`` in the first forward pass and keeps them in that
        dtype thereafter. For model checkpointing, the buffers are saved
        in full precision except for ``LOCAL_STATE_DICT``. (Default:
        ``None``)
    keep_low_precision_grads (bool): If ``False``, then FSDP upcasts
        gradients to full precision after the backward pass in preparation
        for the optimizer step. If ``True``, then FSDP keeps the gradients
        in the dtype used for gradient reduction, which can save memory if
        using a custom optimizer that supports running in low precision.
        (Default: ``False``)
    cast_forward_inputs (bool): If ``True``, then this FSDP module casts
        its forward args and kwargs to ``param_dtype``. This is to ensure
        that parameter and input dtypes match for forward computation, as
        required by many ops. This may need to be set to ``True`` when only
        applying mixed precision to some but not all FSDP modules, in which
        case a mixed-precision FSDP submodule needs to recast its inputs.
        (Default: ``False``)
    cast_root_forward_inputs (bool): If ``True``, then the root FSDP module
        casts its forward args and kwargs to ``param_dtype``, overriding
        the value of ``cast_forward_inputs``. For non-root FSDP modules,
        this does not do anything. (Default: ``True``)
    _module_classes_to_ignore: (Sequence[Type[nn.Module]]): This specifies
        module classes to ignore for mixed precision when using an
        ``auto_wrap_policy``: Modules of these classes will have FSDP
        applied to them separately with mixed precision disabled (meaning
        that the final FSDP construction would deviate from the specified
        policy). If ``auto_wrap_policy`` is not specified, then this does
        not do anything. This API is experimental and subject to change.
        (Default: ``(_BatchNorm,)``)

.. note:: This API is experimental and subject to change.

.. note:: Only floating point tensors are cast to their specified dtypes.

.. note:: In ``summon_full_params``, parameters are forced to full
    precision, but buffers are not.

.. note:: Layer norm and batch norm accumulate in ``float32`` even when
    their inputs are in a low precision like ``float16`` or ``bfloat16``.
    Disabling FSDP's mixed precision for those norm modules only means that
    the affine parameters are kept in ``float32``. However, this incurs
    separate all-gathers and reduce-scatters for those norm modules, which
    may be inefficient, so if the workload permits, the user should prefer
    to still apply mixed precision to those modules.

.. note:: By default, if the user passes a model with any ``_BatchNorm``
    modules and specifies an ``auto_wrap_policy``, then the batch norm
    modules will have FSDP applied to them separately with mixed precision
    disabled. See the ``_module_classes_to_ignore`` argument.

.. note:: ``MixedPrecision`` has ``cast_root_forward_inputs=True`` and
    ``cast_forward_inputs=False`` by default. For the root FSDP instance,
    its ``cast_root_forward_inputs`` takes precedence over its
    ``cast_forward_inputs``. For non-root FSDP instances, their
    ``cast_root_forward_inputs`` values are ignored. The default setting is
    sufficient for the typical case where each FSDP instance has the same
    ``MixedPrecision`` configuration and only needs to cast inputs to the
    ``param_dtype`` at the beginning of the model's forward pass.

.. note:: For nested FSDP instances with different ``MixedPrecision``
    configurations, we recommend setting individual ``cast_forward_inputs``
    values to configure casting inputs or not before each instance's
    forward. In such a case, since the casts happen before each FSDP
    instance's forward, a parent FSDP instance should have its non-FSDP
    submodules run before its FSDP submodules to avoid the activation dtype
    being changed due to a different ``MixedPrecision`` configuration.

    Example::

        >>> # xdoctest: +SKIP("undefined variables")
        >>> model = nn.Sequential(nn.Linear(3, 3), nn.Linear(3, 3))
        >>> model[1] = FSDP(
        >>>     model[1],
        >>>     mixed_precision=MixedPrecision(param_dtype=torch.float16, cast_forward_inputs=True),
        >>> )
        >>> model = FSDP(
        >>>     model,
        >>>     mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, cast_forward_inputs=True),
        >>> )

    The above shows a working example. On the other hand, if ``model[1]``
    were replaced with ``model[0]``, meaning that the submodule using
    different ``MixedPrecision`` ran its forward first, then ``model[1]``
    would incorrectly see ``float16`` activations instead of ``bfloat16``
    ones.

Nparam_dtypereduce_dtypebuffer_dtypeFkeep_low_precision_gradscast_forward_inputsTcast_root_forward_inputs_module_classes_to_ignorer   )r   r   r   r   r   r-   r   torchdtype__annotations__r.   r/   r0   boolr1   r2   r   r3   r   typennModuler$   r   r%   r&   r   r   p   s    iV *.K%++&-*.L(5;;'.*.L(5;;'.%*d* %%%)d)BLxUXX__(=>Nr%   r   c                   (    \ rS rSr% SrSr\\S'   Srg)r      a  
This configures CPU offloading.

Attributes:
    offload_params (bool): This specifies whether to offload parameters to
        CPU when not involved in computation. If ``True``, then this
        offloads gradients to CPU as well, meaning that the optimizer step
        runs on CPU.
Foffload_paramsr   N)	r   r   r   r   r   r=   r7   r6   r$   r   r%   r&   r   r      s     !ND r%   r   c                   B    \ rS rSrSr\" 5       r\" 5       r\" 5       rSr	g)r      aP  
This enum indicates that which type of ``state_dict`` the FSDP module is
currently processing (returning or loading).
The default value is FULL_STATE_DICT to comply the PyTorch convention.

.. note::
    FSDP currently supports three types of ``state_dict``:
        1. ``state_dict/load_state_dict`: this pair of APIs return and load
           the non-sharded, unflattened parameters. The semantics is the
           same as using DDP.
        2. ``_local_state_dict/_load_local_state_dict``: this pair of APIs return
           and load local sharded, flattened parameters. The values returned
           by ``_local_state_dict`` can be directly used by FSDP and is only
           meaningful to FSDP (because parameters are flattened). Note that
           these APIs are meant for use via the :func:`state_dict_type`
           context manager as follows:
               >>> # xdoctest: +SKIP("undefined variables")
               >>> with fsdp.state_dict_type(StateDictType.LOCAL_STATE_DICT):
               ...     state = fsdp.state_dict()  # loads local state dict
        3. ``_sharded_state_dict/_load_sharded_state_dict``: this pair of APIs
           return and load sharded, unflattened parameters. The ``state_dict``
           return by ``sharded_state_dict`` can be used by all other parallel
           schemes (resharding may be required).
r   N)
r   r   r   r   r   r   FULL_STATE_DICTLOCAL_STATE_DICTSHARDED_STATE_DICTr$   r   r%   r&   r   r      s     2 fOvr%   r   c                   (    \ rS rSr% SrSr\\S'   Srg)r   i  a  
``StateDictConfig`` is the base class for all ``state_dict`` configuration
classes. Users should instantiate a child class (e.g.
``FullStateDictConfig``) in order to configure settings for the
corresponding ``state_dict`` type supported by FSDP.

Attributes:
    offload_to_cpu (bool): If ``True``, then FSDP offloads the state dict
        values to CPU, and if ``False``, then FSDP keeps them on GPU.
        (Default: ``False``)
Foffload_to_cpur   N	r   r   r   r   r   rD   r7   r6   r$   r   r%   r&   r   r     s    
 !ND r%   r   c                   (    \ rS rSr% SrSr\\S'   Srg)r   i%  a
  
``FullStateDictConfig`` is a config class meant to be used with
``StateDictType.FULL_STATE_DICT``. We recommend enabling both
``offload_to_cpu=True`` and ``rank0_only=True`` when saving full state
dicts to save GPU memory and CPU memory, respectively. This config class
is meant to be used via the :func:`state_dict_type` context manager as
follows:

    >>> # xdoctest: +SKIP("undefined variables")
    >>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
    >>> fsdp = FSDP(model, auto_wrap_policy=...)
    >>> cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
    >>> with FSDP.state_dict_type(fsdp, StateDictType.FULL_STATE_DICT, cfg):
    >>>     state = fsdp.state_dict()
    >>> # `state` will be empty on non rank 0 and contain CPU tensors on rank 0.
    >>> # To reload checkpoint for inference, finetuning, transfer learning, etc:
    >>> model = model_fn()  # Initialize model in preparation for wrapping with FSDP
    >>> if dist.get_rank() == 0:
    >>> # Load checkpoint only on rank 0 to avoid memory redundancy
    >>>     state_dict = torch.load("my_checkpoint.pt")
    >>>     model.load_state_dict(state_dict)
    >>> # All ranks initialize FSDP module as usual. `sync_module_states` argument
    >>> # communicates loaded checkpoint states from rank 0 to rest of the world.
    >>> fsdp = FSDP(
    ...     model,
    ...     device_id=torch.cuda.current_device(),
    ...     auto_wrap_policy=...,
    ...     sync_module_states=True,
    ... )
    >>> # After this point, all ranks have FSDP model with loaded checkpoint.

Attributes:
    rank0_only (bool): If ``True``, then only rank 0 saves the full state
        dict, and nonzero ranks save an empty dict. If ``False``, then all
        ranks save the full state dict. (Default: ``False``)
F
rank0_onlyr   N	r   r   r   r   r   rG   r7   r6   r$   r   r%   r&   r   r   %  s    #J Jr%   r   c                       \ rS rSrSrg)r   iO  r   N)r   r   r   r   r$   r   r%   r&   r   r   O  s    r%   r   c                   (    \ rS rSr% SrSr\\S'   Srg)r   iT  a  
``ShardedStateDictConfig`` is a config class meant to be used with
``StateDictType.SHARDED_STATE_DICT``.

Attributes:
    _use_dtensor (bool): If ``True``, then FSDP saves the state dict values
        as ``DTensor``, and if ``False``, then FSDP saves them as
        ``ShardedTensor``. (Default: ``False``)

.. warning:: ``_use_dtensor`` is a private field of :class:`ShardedStateDictConfig`
  and it is used by FSDP to determine the type of state dict values. Users should not
  manually modify ``_use_dtensor``.
F_use_dtensorr   N	r   r   r   r   r   rK   r7   r6   r$   r   r%   r&   r   r   T       L$r%   r   c                   (    \ rS rSr% SrSr\\S'   Srg)r   ig  a"  
``OptimStateDictConfig`` is the base class for all ``optim_state_dict``
configuration classes.  Users should instantiate a child class (e.g.
``FullOptimStateDictConfig``) in order to configure settings for the
corresponding ``optim_state_dict`` type supported by FSDP.

Attributes:
    offload_to_cpu (bool): If ``True``, then FSDP offloads the state dict's
        tensor values to CPU, and if ``False``, then FSDP keeps them on the
        original device (which is GPU unless parameter CPU offloading is
        enabled). (Default: ``True``)
TrD   r   NrE   r   r%   r&   r   r   g  s      NDr%   r   c                   (    \ rS rSr% SrSr\\S'   Srg)r   iy  z
Attributes:
    rank0_only (bool): If ``True``, then only rank 0 saves the full state
        dict, and nonzero ranks save an empty dict. If ``False``, then all
        ranks save the full state dict. (Default: ``False``)
FrG   r   NrH   r   r%   r&   r   r   y  s     Jr%   r   c                   $    \ rS rSr% Sr\\S'   Srg)r   i  FrD   r   N)r   r   r   r   rD   r7   r6   r$   r   r%   r&   r   r     s     ND r%   r   c                   (    \ rS rSr% SrSr\\S'   Srg)r   i  a  
``ShardedOptimStateDictConfig`` is a config class meant to be used with
``StateDictType.SHARDED_STATE_DICT``.

Attributes:
    _use_dtensor (bool): If ``True``, then FSDP saves the state dict values
        as ``DTensor``, and if ``False``, then FSDP saves them as
        ``ShardedTensor``. (Default: ``False``)

.. warning:: ``_use_dtensor`` is a private field of :class:`ShardedOptimStateDictConfig`
  and it is used by FSDP to determine the type of state dict values. Users should not
  manually modify ``_use_dtensor``.
FrK   r   NrL   r   r%   r&   r   r     rM   r%   r   c                   4    \ rS rSr% \\S'   \\S'   \\S'   Srg)r   i  state_dict_typestate_dict_configoptim_state_dict_configr   N)	r   r   r   r   r   r6   r   r   r$   r   r%   r&   r   r     s    ""&&11r%   r   )r   collections.abcr   dataclassesr   enumr   r   typingr   r4   torch.nn.modules.batchnormr   __all__r	   r
   r   r   r   r   r   r   r   r   r   r   r   r   r   r%   r&   <module>r\      su  
 % !    1$%!t %!P$t $N rO rO rOj ! ! ! D  > ! ! !  &/ & &R 	? 	 	 _  $      " 3   ! 4 ! ! "6  $ 2 2 2r%   