
    [ThW                     H    S r SSKrSSKrSS/r " S S5      r " S S5      rg)zAutograd anomaly mode.    Ndetect_anomalyset_detect_anomalyc                   B    \ rS rSrSrS
SS jjrSS jrS\SS4S jrS	r	g)r      a
  Context-manager that enable anomaly detection for the autograd engine.

This does two things:

- Running the forward pass with detection enabled will allow the backward
  pass to print the traceback of the forward operation that created the failing
  backward function.
- If ``check_nan`` is ``True``, any backward computation that generate "nan"
  value will raise an error. Default ``True``.

.. warning::
    This mode should be enabled only for debugging as the different tests
    will slow down your program execution.

Example:
    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ANOMALY)
    >>> import torch
    >>> from torch import autograd
    >>> class MyFunc(autograd.Function):
    ...     @staticmethod
    ...     def forward(ctx, inp):
    ...         return inp.clone()
    ...     @staticmethod
    ...     def backward(ctx, gO):
    ...         # Error during the backward pass
    ...         raise RuntimeError("Some error in backward")
    ...         return gO.clone()
    >>> def run_fn(a):
    ...     out = MyFunc.apply(a)
    ...     return out.sum()
    >>> inp = torch.rand(10, 10, requires_grad=True)
    >>> out = run_fn(inp)
    >>> out.backward()
        Traceback (most recent call last):
          File "<stdin>", line 1, in <module>
          File "/your/pytorch/install/torch/_tensor.py", line 93, in backward
            torch.autograd.backward(self, gradient, retain_graph, create_graph)
          File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward
            allow_unreachable=True)  # allow_unreachable flag
          File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply
            return self._forward_cls.backward(self, *args)
          File "<stdin>", line 8, in backward
        RuntimeError: Some error in backward
    >>> with autograd.detect_anomaly():
    ...     inp = torch.rand(10, 10, requires_grad=True)
    ...     out = run_fn(inp)
    ...     out.backward()
        Traceback of forward call that caused the error:
          File "tmp.py", line 53, in <module>
            out = run_fn(inp)
          File "tmp.py", line 44, in run_fn
            out = MyFunc.apply(a)
        Traceback (most recent call last):
          File "<stdin>", line 4, in <module>
          File "/your/pytorch/install/torch/_tensor.py", line 93, in backward
            torch.autograd.backward(self, gradient, retain_graph, create_graph)
          File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward
            allow_unreachable=True)  # allow_unreachable flag
          File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply
            return self._forward_cls.backward(self, *args)
          File "<stdin>", line 8, in backward
        RuntimeError: Some error in backward

returnNc                     [         R                  " 5       U l        Xl        [         R                  " 5       U l        [        R                  " SSS9  g )NzqAnomaly Detection has been enabled. This mode will increase the runtime and should only be enabled for debugging.   )
stacklevel)torchis_anomaly_enabledprev	check_nanis_anomaly_check_nan_enabledprev_check_nanwarningswarn)selfr   s     S/var/www/auris/envauris/lib/python3.13/site-packages/torch/autograd/anomaly_mode.py__init__detect_anomaly.__init__M   s>    ,,.	"#@@B8 		
    c                 F    [         R                  " SU R                  5        g )NT)r   set_anomaly_enabledr   r   s    r   	__enter__detect_anomaly.__enter__X   s    !!$7r   argsc                 Z    [         R                  " U R                  U R                  5        g Nr   r   r   r   r   r   s     r   __exit__detect_anomaly.__exit__[       !!$))T-@-@Ar   )r   r   r   Tr   N)
__name__
__module____qualname____firstlineno____doc__r   r   objectr"   __static_attributes__ r   r   r   r      s(    ?B	
8Bf B Br   c                   N    \ rS rSrSrSS\S\SS4S jjrSS jrS	\SS4S
 jr	Sr
g)r   _   a,  Context-manager that sets the anomaly detection for the autograd engine on or off.

``set_detect_anomaly`` will enable or disable the autograd anomaly detection
based on its argument :attr:`mode`.
It can be used as a context-manager or as a function.

See ``detect_anomaly`` above for details of the anomaly detection behaviour.

Args:
    mode (bool): Flag whether to enable anomaly detection (``True``),
                 or disable (``False``).
    check_nan (bool): Flag whether to raise an error when the backward
                      generate "nan"

moder   r   Nc                     [         R                  " 5       U l        [         R                  " 5       U l        [         R
                  " X5        g r   )r   r   r   r   r   r   )r   r1   r   s      r   r   set_detect_anomaly.__init__p   s1    ,,.	#@@B!!$2r   c                     g r   r.   r   s    r   r   set_detect_anomaly.__enter__u   s    r   r   c                 Z    [         R                  " U R                  U R                  5        g r   r    r!   s     r   r"   set_detect_anomaly.__exit__x   r$   r   )r   r   r%   r&   )r'   r(   r)   r*   r+   boolr   r   r,   r"   r-   r.   r   r   r   r   _   s<     3T 3d 3d 3
Bf B Br   )r+   r   r   __all__r   r   r.   r   r   <module>r:      s6       1
2QB QBhB Br   