
    [Th                          % S SK r S SKrS SKJrJr  S SKJr  S SKrS SKJ	r	J
r
   " S S5      rS\S\S	\4S
 jrS r\q\\S'   \ R"                  S 5       r " S S5      rSS jrg)    N)CallableOptional)
deprecated)KernelRegistrationHandlec                   <    \ rS rSrSrS\4S jrS\S\S\4S jr	S	r
g
)FakeImplHolder   z0A holder where one can register an fake impl to.qualnamec                 ,    Xl         S U l        S U l        g N)r   kernellib)selfr   s     P/var/www/auris/envauris/lib/python3.13/site-packages/torch/_library/fake_impl.py__init__FakeImplHolder.__init__   s    %(,48    funcsourcereturnc                   ^  T R                   b0  [        ST R                   ST R                   R                   S35      e[        R
                  R                  T R                  S5      (       a  [        ST R                   S35      e[        R
                  R                  T R                  S5      (       a  [        ST R                   S35      e[        X5      T l         T R                  cC  T R                  R                  S5      S	   n[        R                  R                  US
5      T l        [        T R                  T 5      nT R                  R                  T R                  US5        U 4S jn[        U5      $ )zeRegister an fake impl.

Returns a RegistrationHandle that one can use to de-register this
fake impl.
z!register_fake(...): the operator z( already has an fake impl registered at .Metaz already has an DispatchKey::Meta implementation via a pre-existing torch.library or TORCH_LIBRARY registration. Please either remove that registration or don't call register_fake.CompositeImplicitAutograda%   already has an implementation for this device type via a pre-existing registration to DispatchKey::CompositeImplicitAutograd.CompositeImplicitAutograd operators do not need an fake impl; instead, the operator will decompose into its constituents and those can have fake impls defined on them.z::r   FRAGMENTc                  x   > T R                   (       a!  T R                   R                  5         S T l         S T l        g r   )r   _destroyr   )r   s   r   deregister_fake_class6FakeImplHolder.register.<locals>.deregister_fake_classA   s(    xx!!#DKr   )r   RuntimeErrorr   r   torch_C%_dispatch_has_kernel_for_dispatch_keyr   r   splitlibraryLibraryconstruct_meta_kernelimplr   )r   r   r   nsmeta_kernelr   s   `     r   registerFakeImplHolder.register   sR    ;;"3DMM? C:;;%%&a) 
 8899$--PP3DMM? C! "  8899MM6
 
 3DMM? C7 8
 
 T* 88$$T*1-B}},,R<DH+DMM4@dmm[&9	 ""788r   )r   r   r   N)__name__
__module____qualname____firstlineno____doc__strr   r   r   r,   __static_attributes__ r   r   r	   r	      s,    :9 9
49X 49s 497I 49r   r	   r   fake_impl_holderr   c                    ^ ^ TR                   c   e[        R                  " TR                   R                  5      UU 4S j5       nU$ )Nc                     >^ TR                   c   eTR                   R                  mUU4S jn[        U5         TR                   " U 0 UD6sS S S 5        $ ! , (       d  f       g = f)Nc                  &   > [        T  ST S35      e)Nz (a  ): You're trying to run this operator with meta Tensors (as opposed to FakeTensors), but this operator may return an output Tensor with data-dependent shape. Meta Tensors don't support operators with outputs that have data-dependent shapes but FakeTensors do. If your operator does not return an output with data-dependent shape, make sure the FakeTensor and/or meta kernel does not call torch.library.get_ctx(). Otherwise, please use FakeTensors.)r!   )r   r   s   r   error_on_ctx@construct_meta_kernel.<locals>.meta_kernel.<locals>.error_on_ctxR   s'    *Bvh 'N O	 	r   )r   r   set_ctx_getter)argskwargsr:   r   r6   r   s      @r   r+   *construct_meta_kernel.<locals>.meta_kernelM   sT    &&222!((//
	 L)#**D;F; *))s   A
A$)r   	functoolswrapsr   )r   r6   r+   s   `` r   r(   r(   J   sE    ""...__%,,112< 3<& r   c                      g r   r5   r5   r   r   get_nonerC   d   s    r   global_ctx_getterc              #   8   #    [         n U q S v   Uq g ! Uq f = f7fr   )rD   )
ctx_getterprevs     r   r<   r<   k   s&      D!& Ds    c                       \ rS rSrSrS r\" S\S9SSS.S	\R                  4S
 jj5       r
SSS.S	\R                  4S jjrSrg)FakeImplCtxv   zG
Context object for writing fake implementations for custom operators.
c                 >    Xl         UR                  U l        X l        g r   )
_fake_mode	shape_env
_shape_env_op)r   rL   rO   s      r   r   FakeImplCtx.__init__{   s    $$..r   zM`create_unbacked_symint` is deprecated, please use `new_dynamic_size` instead)category   Nminmaxr   c                     U R                  XS9$ NrS   )new_dynamic_sizer   rT   rU   s      r   create_unbacked_symint"FakeImplCtx.create_unbacked_symint   s    
 $$$66r   r   c                   U R                   b  U R                   R                  (       d3  [        R                  R                  R                  U R                  5      e[        U[        R                  5      (       d  [        U[        R                  5      (       a  [        SU SU S35      eUS:  a  [        SU S35      e[        U R                   X5      $ )a  Constructs a new symint (symbolic int) representing a data-dependent value.

This is useful for writing the fake implementation (which is necessary
for torch.compile) for a CustomOp where an output Tensor has a size
that depends on the data of the input Tensors.

Args:
    min (int): A statically known inclusive lower bound for this symint. Default: 0
    max (Optional[int]): A statically known inclusive upper bound for this
        symint. Default: None

.. warning:

    It is important that the ``min`` and ``max`` (if not None) values are set
    correctly, otherwise, there will be undefined behavior under
    torch.compile. The default value of ``min`` is 2 due to torch.compile
    specializing on 0/1 sizes.

    You must also verify that your implementation on concrete Tensors
    (e.g. CPU/CUDA) only returns Tensors where the size that corresponds
    to the symint also has respects these constraint.
    The easiest way to do this is to add an assertion in the CPU/CUDA/etc
    implementation that the size follows these bounds.

Example::

    >>> # An operator with data-dependent output shape
    >>> lib = torch.library.Library("mymodule", "FRAGMENT")
    >>> lib.define("mymodule::custom_nonzero(Tensor x) -> Tensor")
    >>>
    >>> @torch.library.register_fake("mymodule::custom_nonzero")
    >>> def _(x):
    >>>     # Number of nonzero-elements is data-dependent.
    >>>     # Since we cannot peek at the data in an fake impl,
    >>>     # we use the ctx object to construct a new symint that
    >>>     # represents the data-dependent size.
    >>>     ctx = torch.library.get_ctx()
    >>>     nnz = ctx.new_dynamic_size()
    >>>     shape = [nnz, x.dim()]
    >>>     result = x.new_empty(shape, dtype=torch.int64)
    >>>     return result
    >>>
    >>> @torch.library.impl(lib, "custom_nonzero", "CPU")
    >>> def _(x):
    >>>     x_np = x.numpy()
    >>>     res = np.stack(np.nonzero(x_np), axis=1)
    >>>     return torch.tensor(res, device=x.device)

zctx.new_dynamic_size(min=z, max=zZ): expected min and max to be statically known ints but got SymInt. This is not supported.r   zc, ...): expected min to be greater than or equal to 0: this API can only create non-negative sizes.)rN   allow_dynamic_output_shape_opsr"   _subclassesfake_tensorDynamicOutputShapeExceptionrO   
isinstanceSymInt
ValueErrorallocate_sizerY   s      r   rX   FakeImplCtx.new_dynamic_size   s    f OO#??AA##//KKDHHUUc5<<((JsELL,I,I+C5se <) *  7+C5 1& '  T__c77r   )rL   rO   rN   )r.   r/   r0   r1   r2   r   r   FutureWarningr"   rb   rZ   rX   r4   r5   r   r   rI   rI   v   s\    
 W -.4 7ELL 7	7 '(T F8ell F8 F8r   rI   c                     U R                  5       n[        R                  R                  R                  R                  X1US9  U$ rW   )rZ   r"   fxexperimentalsymbolic_shapes_constrain_range_for_size)rM   min_valmax_valresults       r   rd   rd      s@    --/F	HH))CC D  Mr   )r   N)
contextlibr@   typingr   r   typing_extensionsr   r"   torch._library.utilsr   r   r	   r3   r(   rC   rD   __annotations__contextmanagerr<   rI   rd   r5   r   r   <module>ru      sz      % (  ;<9 <9~C > h 4 ' 8 & ! !W8 W8tr   