
    JThc              	          S SK JrJrJrJr  S SKrS SKJr  S SKJrJ	r	  SSK
JrJrJrJr  \(       a  S SKJr  \\\\\S4   \\   4      rO
\r\\\      r/ S	Qr\" \	R.                  S
5      r\" \	R2                  S5      r\" \	R6                  S5      rSS\S\S\\   S\4S jjr\" \	R<                  S5      r\" \	R@                  S5      r!\" \	RD                  S5      r#\" \	RH                  S5      r% " S S5      r&S r'g)    )AnyOptionalTYPE_CHECKINGUnionN)Tensor)_add_docstr_sparse   )SparseSemiStructuredTensor$SparseSemiStructuredTensorCUSPARSELT!SparseSemiStructuredTensorCUTLASSto_sparse_semi_structured)_dtype.)addmmcheck_sparse_tensor_invariantsmmsumsoftmaxsolvelog_softmaxr   r   r   r   as_sparse_gradchecka%  
sparse.addmm(mat, mat1, mat2, *, beta=1., alpha=1.) -> Tensor

This function does exact same thing as :func:`torch.addmm` in the forward,
except that it supports backward for sparse COO matrix :attr:`mat1`.
When :attr:`mat1` is a COO tensor it must have `sparse_dim = 2`.
When inputs are COO tensors, this function also supports backward for both inputs.

Supports both CSR and COO storage formats.

.. note::
    This function doesn't support computing derivaties with respect to CSR matrices.

Args:
    mat (Tensor): a dense matrix to be added
    mat1 (Tensor): a sparse matrix to be multiplied
    mat2 (Tensor): a dense matrix to be multiplied
    beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
    alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
a
  
    Performs a matrix multiplication of the sparse matrix :attr:`mat1`
    and the (sparse or strided) matrix :attr:`mat2`. Similar to :func:`torch.mm`, if :attr:`mat1` is a
    :math:`(n \times m)` tensor, :attr:`mat2` is a :math:`(m \times p)` tensor, out will be a
    :math:`(n \times p)` tensor.
    When :attr:`mat1` is a COO tensor it must have `sparse_dim = 2`.
    When inputs are COO tensors, this function also supports backward for both inputs.

    Supports both CSR and COO storage formats.

.. note::
    This function doesn't support computing derivaties with respect to CSR matrices.

    This function also additionally accepts an optional :attr:`reduce` argument that allows
    specification of an optional reduction operation, mathematically performs the following operation:

.. math::

    z_{ij} = \bigoplus_{k = 0}^{K - 1} x_{ik} y_{kj}

where :math:`\bigoplus` defines the reduce operator. :attr:`reduce` is implemented only for
CSR storage format on CPU device.

Args:
    mat1 (Tensor): the first sparse matrix to be multiplied
    mat2 (Tensor): the second matrix to be multiplied, which could be sparse or dense
    reduce (str, optional): the reduction operation to apply for non-unique indices
        (:obj:`"sum"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`). Default :obj:`"sum"`.

Shape:
    The format of the output tensor of this function follows:
    - sparse x sparse -> sparse
    - sparse x dense -> dense

Example::

    >>> a = torch.tensor([[1., 0, 2], [0, 3, 0]]).to_sparse().requires_grad_()
    >>> a
    tensor(indices=tensor([[0, 0, 1],
                           [0, 2, 1]]),
           values=tensor([1., 2., 3.]),
           size=(2, 3), nnz=3, layout=torch.sparse_coo, requires_grad=True)
    >>> b = torch.tensor([[0, 1.], [2, 0], [0, 0]], requires_grad=True)
    >>> b
    tensor([[0., 1.],
            [2., 0.],
            [0., 0.]], requires_grad=True)
    >>> y = torch.sparse.mm(a, b)
    >>> y
    tensor([[0., 1.],
            [6., 0.]], grad_fn=<SparseAddmmBackward0>)
    >>> y.sum().backward()
    >>> a.grad
    tensor(indices=tensor([[0, 0, 1],
                           [0, 2, 1]]),
           values=tensor([1., 0., 2.]),
           size=(2, 3), nnz=3, layout=torch.sparse_coo)
    >>> c = a.detach().to_sparse_csr()
    >>> c
    tensor(crow_indices=tensor([0, 2, 3]),
           col_indices=tensor([0, 2, 1]),
           values=tensor([1., 2., 3.]), size=(2, 3), nnz=3,
           layout=torch.sparse_csr)
    >>> y1 = torch.sparse.mm(c, b, 'sum')
    >>> y1
    tensor([[0., 1.],
            [6., 0.]], grad_fn=<SparseMmReduceImplBackward0>)
    >>> y2 = torch.sparse.mm(c, b, 'max')
    >>> y2
    tensor([[0., 1.],
            [6., 0.]], grad_fn=<SparseMmReduceImplBackward0>)
a  
sparse.sampled_addmm(input, mat1, mat2, *, beta=1., alpha=1., out=None) -> Tensor

Performs a matrix multiplication of the dense matrices :attr:`mat1` and :attr:`mat2` at the locations
specified by the sparsity pattern of :attr:`input`. The matrix :attr:`input` is added to the final result.

Mathematically this performs the following operation:

.. math::

    \text{out} = \alpha\ (\text{mat1} \mathbin{@} \text{mat2})*\text{spy}(\text{input}) + \beta\ \text{input}

where :math:`\text{spy}(\text{input})` is the sparsity pattern matrix of :attr:`input`, :attr:`alpha`
and :attr:`beta` are the scaling factors.
:math:`\text{spy}(\text{input})` has value 1 at the positions where :attr:`input` has non-zero values, and 0 elsewhere.

.. note::
    :attr:`input` must be a sparse CSR tensor. :attr:`mat1` and :attr:`mat2` must be dense tensors.

Args:
    input (Tensor): a sparse CSR matrix of shape `(m, n)` to be added and used to compute
        the sampled matrix multiplication
    mat1 (Tensor): a dense matrix of shape `(m, k)` to be multiplied
    mat2 (Tensor): a dense matrix of shape `(k, n)` to be multiplied

Keyword args:
    beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
    alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
    out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.

Examples::

    >>> input = torch.eye(3, device='cuda').to_sparse_csr()
    >>> mat1 = torch.randn(3, 5, device='cuda')
    >>> mat2 = torch.randn(5, 3, device='cuda')
    >>> torch.sparse.sampled_addmm(input, mat1, mat2)
    tensor(crow_indices=tensor([0, 1, 2, 3]),
        col_indices=tensor([0, 1, 2]),
        values=tensor([ 0.2847, -0.7805, -0.1900]), device='cuda:0',
        size=(3, 3), nnz=3, layout=torch.sparse_csr)
    >>> torch.sparse.sampled_addmm(input, mat1, mat2).to_dense()
    tensor([[ 0.2847,  0.0000,  0.0000],
        [ 0.0000, -0.7805,  0.0000],
        [ 0.0000,  0.0000, -0.1900]], device='cuda:0')
    >>> torch.sparse.sampled_addmm(input, mat1, mat2, beta=0.5, alpha=0.5)
    tensor(crow_indices=tensor([0, 1, 2, 3]),
        col_indices=tensor([0, 1, 2]),
        values=tensor([ 0.1423, -0.3903, -0.0950]), device='cuda:0',
        size=(3, 3), nnz=3, layout=torch.sparse_csr)
inputdimdtypereturnc                     Uc/  Ub  [         R                  " X5      $ [         R                  " U 5      $ Ub  [         R                  " XUS9$ [         R                  " XS9$ )a  Return the sum of each row of the given sparse tensor.

Returns the sum of each row of the sparse tensor :attr:`input` in the given
dimensions :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them. When sum over all ``sparse_dim``, this method
returns a dense tensor instead of a sparse tensor.

All summed :attr:`dim` are squeezed (see :func:`torch.squeeze`), resulting an output
tensor having :attr:`dim` fewer dimensions than :attr:`input`.

During backward, only gradients at ``nnz`` locations of :attr:`input`
will propagate back. Note that the gradients of :attr:`input` is coalesced.

Args:
    input (Tensor): the input sparse tensor
    dim (int or tuple of ints): a dimension or a list of dimensions to reduce. Default: reduce
        over all dims.
    dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
        Default: dtype of :attr:`input`.

Example::

    >>> nnz = 3
    >>> dims = [5, 5, 2, 3]
    >>> I = torch.cat([torch.randint(0, dims[0], size=(nnz,)),
                       torch.randint(0, dims[1], size=(nnz,))], 0).reshape(2, nnz)
    >>> V = torch.randn(nnz, dims[2], dims[3])
    >>> size = torch.Size(dims)
    >>> # xdoctest: +IGNORE_WANT("non-deterministic")
    >>> S = torch.sparse_coo_tensor(I, V, size)
    >>> S
    tensor(indices=tensor([[2, 0, 3],
                           [2, 4, 1]]),
           values=tensor([[[-0.6438, -1.6467,  1.4004],
                           [ 0.3411,  0.0918, -0.2312]],

                          [[ 0.5348,  0.0634, -2.0494],
                           [-0.7125, -1.0646,  2.1844]],

                          [[ 0.1276,  0.1874, -0.6334],
                           [-1.9682, -0.5340,  0.7483]]]),
           size=(5, 5, 2, 3), nnz=3, layout=torch.sparse_coo)

    # when sum over only part of sparse_dims, return a sparse tensor
    >>> torch.sparse.sum(S, [1, 3])
    tensor(indices=tensor([[0, 2, 3]]),
           values=tensor([[-1.4512,  0.4073],
                          [-0.8901,  0.2017],
                          [-0.3183, -1.7539]]),
           size=(5, 2), nnz=3, layout=torch.sparse_coo)

    # when sum over all sparse dim, return a dense tensor
    # with summed dims squeezed
    >>> torch.sparse.sum(S, [0, 1, 3])
    tensor([-2.6596, -1.1450])
)r   )torch_sparse_sum)r   r   r   s      M/var/www/auris/envauris/lib/python3.13/site-packages/torch/sparse/__init__.pyr   r      sZ    r }?$$U00$$U++?$$Uu==$$U88    a  
sparse.softmax(input, dim, *, dtype=None) -> Tensor

Applies a softmax function.

Softmax is defined as:

:math:`\text{Softmax}(x_{i}) = \frac{exp(x_i)}{\sum_j exp(x_j)}`

where :math:`i, j` run over sparse tensor indices and unspecified
entries are ignores. This is equivalent to defining unspecified
entries as negative infinity so that :math:`exp(x_k) = 0` when the
entry with index :math:`k` has not specified.

It is applied to all slices along `dim`, and will re-scale them so
that the elements lie in the range `[0, 1]` and sum to 1.

Args:
    input (Tensor): input
    dim (int): A dimension along which softmax will be computed.
    dtype (:class:`torch.dtype`, optional): the desired data type
        of returned tensor.  If specified, the input tensor is
        casted to :attr:`dtype` before the operation is
        performed. This is useful for preventing data type
        overflows. Default: None
a  
sparse.spsolve(input, other, *, left=True) -> Tensor

Computes the solution of a square system of linear equations with
a unique solution. Its purpose is similar to :func:`torch.linalg.solve`,
except that the system is defined by a sparse CSR matrix with layout
`sparse_csr`.

Args:
    input (Tensor): a sparse CSR matrix of shape `(n, n)` representing the
        coefficients of the linear system.
    other (Tensor): a dense matrix of shape `(n, )` representing the right-hand
        side of the linear system.
    left (bool, optional): whether to solve the system for `input @ out = other`
        (default) or `out @ input = other`. Only `left=True` is supported.
a  
sparse.log_softmax(input, dim, *, dtype=None) -> Tensor

Applies a softmax function followed by logarithm.

See :class:`~torch.sparse.softmax` for more details.

Args:
    input (Tensor): input
    dim (int): A dimension along which softmax will be computed.
    dtype (:class:`torch.dtype`, optional): the desired data type
        of returned tensor.  If specified, the input tensor is
        casted to :attr:`dtype` before the operation is
        performed. This is useful for preventing data type
        overflows. Default: None
a(  
sparse.spdiags(diagonals, offsets, shape, layout=None) -> Tensor

Creates a sparse 2D tensor by placing the values from rows of
:attr:`diagonals` along specified diagonals of the output

The :attr:`offsets` tensor controls which diagonals are set.

- If :attr:`offsets[i]` = 0, it is the main diagonal
- If :attr:`offsets[i]` < 0, it is below the main diagonal
- If :attr:`offsets[i]` > 0, it is above the main diagonal

The number of rows in :attr:`diagonals` must match the length of :attr:`offsets`,
and an offset may not be repeated.

Args:
    diagonals (Tensor): Matrix storing diagonals row-wise
    offsets (Tensor): The diagonals to be set, stored as a vector
    shape (2-tuple of ints): The desired shape of the result
Keyword args:
    layout (:class:`torch.layout`, optional): The desired layout of the
        returned tensor. ``torch.sparse_coo``, ``torch.sparse_csc`` and ``torch.sparse_csr``
        are supported. Default: ``torch.sparse_coo``

Examples:

Set the main and first two lower diagonals of a matrix::

    >>> diags = torch.arange(9).reshape(3, 3)
    >>> diags
    tensor([[0, 1, 2],
            [3, 4, 5],
            [6, 7, 8]])
    >>> s = torch.sparse.spdiags(diags, torch.tensor([0, -1, -2]), (3, 3))
    >>> s
    tensor(indices=tensor([[0, 1, 2, 1, 2, 2],
                           [0, 1, 2, 0, 1, 0]]),
           values=tensor([0, 1, 2, 3, 4, 6]),
           size=(3, 3), nnz=6, layout=torch.sparse_coo)
    >>> s.to_dense()
    tensor([[0, 0, 0],
            [3, 1, 0],
            [6, 4, 2]])


Change the output layout::

    >>> diags = torch.arange(9).reshape(3, 3)
    >>> diags
    tensor([[0, 1, 2],[3, 4, 5], [6, 7, 8])
    >>> s = torch.sparse.spdiags(diags, torch.tensor([0, -1, -2]), (3, 3), layout=torch.sparse_csr)
    >>> s
    tensor(crow_indices=tensor([0, 1, 3, 6]),
           col_indices=tensor([0, 0, 1, 0, 1, 2]),
           values=tensor([0, 3, 1, 6, 4, 2]), size=(3, 3), nnz=6,
           layout=torch.sparse_csr)
    >>> s.to_dense()
    tensor([[0, 0, 0],
            [3, 1, 0],
            [6, 4, 2]])

Set partial diagonals of a large output::

    >>> diags = torch.tensor([[1, 2], [3, 4]])
    >>> offsets = torch.tensor([0, -1])
    >>> torch.sparse.spdiags(diags, offsets, (5, 5)).to_dense()
    tensor([[1, 0, 0, 0, 0],
            [3, 2, 0, 0, 0],
            [0, 4, 0, 0, 0],
            [0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0]])

.. note::

    When setting the values along a given diagonal the index into the diagonal
    and the index into the row of :attr:`diagonals` is taken as the
    column index in the output. This has the effect that when setting a diagonal
    with a positive offset `k` the first value along that diagonal will be
    the value in position `k` of the row of :attr:`diagonals`

Specifying a positive offset::

    >>> diags = torch.tensor([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
    >>> torch.sparse.spdiags(diags, torch.tensor([0, 1, 2]), (5, 5)).to_dense()
    tensor([[1, 2, 3, 0, 0],
            [0, 2, 3, 0, 0],
            [0, 0, 3, 0, 0],
            [0, 0, 0, 0, 0],
            [0, 0, 0, 0, 0]])
c                   d    \ rS rSrSr\S 5       r\S 5       r\S 5       rSS jr	S r
S rS	 rS
rg)r   i  a]  A tool to control checking sparse tensor invariants.

The following options exists to manage sparsr tensor invariants
checking in sparse tensor construction:

1. Using a context manager:

   .. code:: python

       with torch.sparse.check_sparse_tensor_invariants():
           run_my_model()

2. Using a procedural approach:

   .. code:: python

       prev_checks_enabled = torch.sparse.check_sparse_tensor_invariants.is_enabled()
       torch.sparse.check_sparse_tensor_invariants.enable()

       run_my_model()

       if not prev_checks_enabled:
           torch.sparse.check_sparse_tensor_invariants.disable()

3. Using function decoration:

   .. code:: python

       @torch.sparse.check_sparse_tensor_invariants()
       def run_my_model():
           ...

       run_my_model()

4. Using ``check_invariants`` keyword argument in sparse tensor constructor call.
   For example:

   >>> torch.sparse_csr_tensor([0, 1, 3], [0, 1], [1, 2], check_invariants=True)
   Traceback (most recent call last):
     File "<stdin>", line 1, in <module>
   RuntimeError: `crow_indices[..., -1] == nnz` is not satisfied.
c                  >    [         R                  R                  5       $ )a  Return True if the sparse tensor invariants checking is enabled.

.. note::

    Use :func:`torch.sparse.check_sparse_tensor_invariants.enable` or
    :func:`torch.sparse.check_sparse_tensor_invariants.disable` to
    manage the state of the sparse tensor invariants checks.
)r   _C_check_sparse_tensor_invariants r    r   
is_enabled)check_sparse_tensor_invariants.is_enabled  s     xx7799r    c                  B    [         R                  R                  S5        g)a(  Enable sparse tensor invariants checking in sparse tensor constructors.

.. note::

    By default, the sparse tensor invariants checks are disabled. Use
    :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled` to
    retrieve the current state of sparse tensor invariants checking.

.. note::

    The sparse tensor invariants check flag is effective to all sparse
    tensor constructors, both in Python and ATen.

The flag can be locally overridden by the ``check_invariants``
optional argument of the sparse tensor constructor functions.
TNr   r#   #_set_check_sparse_tensor_invariantsr%   r    r   enable%check_sparse_tensor_invariants.enable  s    $ 	44T:r    c                  B    [         R                  R                  S5        g)zDisable sparse tensor invariants checking in sparse tensor constructors.

See :func:`torch.sparse.check_sparse_tensor_invariants.enable` for more information.
FNr)   r%   r    r   disable&check_sparse_tensor_invariants.disable  s     	44U;r    c                     Xl         S U l        g N)statesaved_state)selfr+   s     r   __init__'check_sparse_tensor_invariants.__init__
  s    
+/r    c                     U R                   b  [        S5      eU R                  5       U l         [        R                  R                  U R                  5        g )NzqThis context manager instance is already activated. Use a different context manager instance for context nesting.)r3   RuntimeErrorr&   r   r#   r*   r2   )r4   s    r   	__enter__(check_sparse_tensor_invariants.__enter__  sH    'Q   ??,44TZZ@r    c                     U R                   c   e[        R                  R                  U R                   5        S U l         g r1   )r3   r   r#   r*   )r4   typevalue	tracebacks       r   __exit__'check_sparse_tensor_invariants.__exit__  s4    +++44T5E5EFr    c                    ^ ^ UU 4S jnU$ )Nc                     > [        T5      " TR                  5         T" U 0 UD6sS S S 5        $ ! , (       d  f       g = fr1   )r<   r2   )argskwargsmthr4   s     r   test_mth9check_sparse_tensor_invariants.__call__.<locals>.test_mth  s,    dDJJ'D+F+ (''s   /
=r%   )r4   rE   rF   s   `` r   __call__'check_sparse_tensor_invariants.__call__  s    	, r    )r3   r2   N)T)__name__
__module____qualname____firstlineno____doc__staticmethodr&   r+   r.   r5   r9   r?   rH   __static_attributes__r%   r    r   r   r     sY    )V 	: 	: ; ;& < <0A r    r   c                    ^  U 4S jnU$ )a@  Decorate function, to extend gradcheck for sparse tensors.

Decorator for torch.autograd.gradcheck or its functools.partial
variants that extends the gradcheck function with support to input
functions that operate on or/and return sparse tensors.

The specified gradcheck function itself is guaranteed to operate
on strided tensors only.

For example:

>>> gradcheck = torch.sparse.as_sparse_gradcheck(torch.autograd.gradcheck)
>>> x = torch.tensor([[0, 1], [2, 3]], dtype=torch.float64).to_sparse_coo().requires_grad_(True)
>>> gradcheck(lambda x: x.to_sparse_csr(), x)
True
c                   >^ ^^^^	^
^ UR                  SS5      m[        R                  [        R                  [        R                  [        R
                  [        R                  1m[        R                  [        R                  [        R
                  [        R                  1m
[        R
                  [        R                  1m	SmUUU	U4S jnUU
4S jmU UUU4S jnXC" U5      4nT" U0 UD6$ )z
Create gradcheck with support for sparse tensors.

Same as :func:`torch.autograd.gradcheck` but with sparse tensors inputs and outputs support.
maskedF__STRIDED_REPRESENTATION__c                 p  > [        U [        [        45      (       d  U 4n / nU  GH  n[        U[        R                  5      (       GaO  UR
                  (       Ga=  UR                  T;   Ga,  [        UR                  UR                  S9nT	(       d  UR                  UR                  5       -
  UR                  5       -
  nUR                  T
;   a#  UR                  5       R                  US-   US-    OSn[        R                  " UR                  UR                  [        R                  S9R!                  UR                  UUR                  5       S9nUR#                  5       R%                  U5      nUR                  [        R&                  L a=  UR)                  UR+                  5       UR-                  5       S9  UR/                  5       nOUR                  [        R0                  [        R2                  1;   a=  UR)                  UR5                  5       UR7                  5       S9  UR                  5       nO<UR)                  UR9                  5       UR;                  5       S9  UR                  5       nUR=                  TX7R?                  S	5      45        GMs  URA                  U5        GM     [        U5      $ )
ziConvert differentiable non-strided tensors to a representation containing differentiable strided tensors.)layoutshaper
      N)devicer   )rV   	blocksize	dense_dim)indicesis_coalesced)compressed_indicesplain_indicesT)!
isinstancelisttupler   r   requires_gradrV   dictrW   ndimr[   
sparse_dimvaluesonesrY   bool	to_sparseto_densesparse_mask
sparse_cooupdate_indicesr]   _values
sparse_csr
sparse_bsrcrow_indicescol_indicesccol_indicesrow_indicesextendrequires_grad_append)rC   new_argsobjd	batch_dimrZ   	full_maskrg   STRIDED_REPRESENTATIONrS   sparse_block_layoutssparse_layoutss           r   !convert_to_strided_representationeas_sparse_gradcheck.<locals>.gradcheck_with_sparse_support.<locals>.convert_to_strided_representationN  s%   dT5M22w"$HsELL11)))

n4CJJcii@A!$'HHs}}$>AQ$Q	  #zz-AA  JJL..y1}y1}M!% "
 %*JJIIcjj

%#)#&::&/&)mmo $  " "lln88CzzU%5%55$'LLNAQAQAS !  "%(8(8%:J:J'KK/2/?/?/A*-//*; !  "%/2/?/?/A*-//*; !  "%OO/4I4I$4OP OOC(Y Z ?"r    c                   > / n[        U 5      n U (       a  U R                  S5      nUT:X  a  U R                  S5      U R                  S5      pCUS   [        R                  L a!  [        R                  " US   UUS   US   S9nO@US   T;   a%  [        R
                  " US   US   UUS   US   S	9nO[        S
US    S35      eUR                  U5        U (       a  M  [        U5      $ )zNRestore non-strided differentiable tensosr from their strided representations.r   rV   r\   rW   r]   )sizer]   r^   r_   )r   rV   zconversion of z! strided representation to tensor)	ra   popr   rm   sparse_coo_tensorsparse_compressed_tensorNotImplementedErrorry   rb   )rC   rz   ar|   rg   r   sparse_compressed_layoutss        r   #restore_from_strided_representationgas_sparse_gradcheck.<locals>.gradcheck_with_sparse_support.<locals>.restore_from_strided_representation  s    H:DHHQK.. $TXXa[v{e&6&66!33iL"!"7)*>):	 8(AA!::23o."!"7#$X; 2,Qx[M9Z[  "/ $0 ?"r    c                     > T" U 5      nT" U0 UD6n[        U[        [        45      (       a  [        U5      OU4n[        UU4S jU 5       5      n[        U[        [        45      (       a  U$ US   $ )Nc              3      >#    U  HV  n[        U[        R                  5      (       a0  UR                  (       a  UR                  T;   a  UR                  TS 9OUv   MX     g7f))masked_gradN)r`   r   r   rc   rV   rk   ).0orS   r   s     r   	<genexpr>cas_sparse_gradcheck.<locals>.gradcheck_with_sparse_support.<locals>.func_wrapper.<locals>.<genexpr>  sW      	$ )A "!U\\22N2 JJ6J2 	 )s   AA!r   )r`   ra   rb   )	rC   rD   restored_argsoutputsstrided_outputsfuncrS   r   r   s	        r   func_wrapperPas_sparse_gradcheck.<locals>.gradcheck_with_sparse_support.<locals>.func_wrapper  s    ?EM M4V4G #-WtUm"D"Dg7*  $ 	$ )	$ 	O ge}55   %Q'r    )r   r   rm   rq   
sparse_cscrr   
sparse_bsc)r   inputsrD   r   r   rC   r   rS   r   r   r   r   	gradchecks   `     @@@@@@r   gradcheck_with_sparse_support:as_sparse_gradcheck.<locals>.gradcheck_with_sparse_support7  s     He,
 	%
! !& 0 0%2B2BC!=2	# 2	#h	#<	 	6 ?GH$)&))r    r%   )r   r   s   ` r   r   r   %  s    $F*P )(r    )NN)(typingr   r   r   r   r   r   torch._Cr   r	   semi_structuredr   r   r   r   torch.typesr   DTypeintrb   ra   	DimOrDims__all___sparse_addmmr   
_sparse_mmr   sparse_sampled_addmmsampled_addmmr   _sparse_softmaxr   _spsolvespsolve_sparse_log_softmaxr   _spdiagsspdiagsr   r   r%   r    r   <module>r      sb   7 6   )  +sE#s(OT#Y>?@I Es$I 		2 GJZ   14nB9v B9I B9Xe_ B9PV B9J > ( * Y\~m m`Z)r    