
    JThm                        S SK r S SKJr  S SKJrJrJr  S SKrS SKJ	r	J
r
  S SKJrJrJrJrJrJrJrJrJrJr  / SQr\" SS5      r " S	 S
\R0                  5      r SS\R0                  S\S\4S jjr " S S\5      r " S S\5      rg)    N)
namedtuple)AnyCallableOptional))sparse_semi_structured_from_dense_cutlass'sparse_semi_structured_to_dense_cutlass)
fallback_dispatchersemi_sparse_addmmsemi_sparse_detachsemi_sparse_indicessemi_sparse_linearsemi_sparse_mmsemi_sparse_scaled_mmsemi_sparse_tsemi_sparse_valuessemi_sparse_view)SparseSemiStructuredTensor!SparseSemiStructuredTensorCUTLASS$SparseSemiStructuredTensorCUSPARSELTto_sparse_semi_structured_SEMI_STRUCTURED_SPARSE_CONFIGz=sparse_min_rows sparse_min_cols dense_min_rows dense_min_colsc                   F   \ rS rSr% SrSr\\S'   \\	R                  \4   \S'   Sr\\S'   Sr\\S'   Sr\\S	'   \\S
'   \\\4   \S'   \\	R&                     \S'   \\	R&                     \S'   \\	R&                     \S'   \\	R&                     \S'   \\	R&                     \S'   \\S'   \\S'   / SQr\   S*S\	R,                  S\\	R&                     S\\	R&                     S\\	R&                     S\\	R&                     S\\	R&                     S\S\S\4S jj5       rS\4S jrS\\\   \\	R,                  \\\4   4   4S jr\S\\	R,                  \\\4   S\	R&                  4S j5       r\	R<                  R>                  r \S\!4S j5       r"\S+S,S jj5       r#\S\	R&                  SS4S  j5       r$\S!\	R&                  S\	R&                  4S" j5       r%S# r&\S\	R&                  SS 4S$ j5       r'SS%.S&\	R&                  S'\\	R&                     S\	R&                  4S( jjr(S)r)g)-r   &   a  
This class implementes semi-structured sparsity as a Tensor subclass.

Semi-structured sparsity describes a sparsity pattern where n in every 2n elements are sparse,
depending on the datatype. It is also referred to as 2:4 sparsity or fine-grained
structured sparsity.

There are two backends available for semi_structred sparsity, either cuSPARSELt or CUTLASS.
This class is meant to serve as a base class for both implementations. SparseSemiStructuredCUTLASS
and SparseSemiStructuredCUSPARSELT both inherit from this class and define three backend-specific items.
Note that as such, this class cannot be insantiated directly.

-`_DTYPE_SHAPE_CONSTRAINTS` - A dictionary holding backend specific dense/sparse min shape constraints
- `def from_dense()` - backend specific compression routines
- `def _mm()` - backend specifc mm op (either torch._cslt_sparse_mm or torch._sparse_semi_structured_(mm|addmm))
r   _DEFAULT_ALG_ID_DTYPE_SHAPE_CONSTRAINTSF_FORCE_CUTLASS_FUSE_TRANSPOSE_PROTOTYPE_WARNING_SHOWNBACKENDSPARSE_DISPATCHpackedmetapacked_tmeta_tcompressed_swizzled_bitmaskfuse_transpose_cusparseltalg_id_cusparselt)r!   r"   r#   r$   r%   shaperequires_gradc
                    U R                   (       dQ  [        R                  " S[        5        SU l         U R	                  5         [
        R                  R                  U 5        Ub  Un
OUb  Un
O[        S5      eU
R                  U
R                  U
R                  U	S.n[
        R                  R                  " X40 UD6nX,l        X<l        XLl        X\l        Xll        X|l        Xl        U$ )a  
Create a new instance of the tensor subclass from the compressed sparse representation.

We have the option to create the subclass with the compressed representations of both X and X', for training.
For inference, we only need a single representation (either X or X'), while the corresponding other set will be None.

Depending on the backend selected, certain fields will be set to None. (CUSPARSELT vs CUTLASS)

Args:
    shape: The shape of the original dense tensor
    packed: The compressed representation of the original dense tensor
    meta: The metadata of the original dense tensor, if it is stored separately
    packed_t: The compressed representation of the transposed original dense tensor
    meta_t: The metadata of the transposed original dense tensor, if it is stored separately
    compressed_swizzled_bitmask: The masks used by the CUTLASS backend to determine which threads should
                                 participate in the computation. Used for pointwise ops.
    fuse_transpose_cusparselt: When running with cuSPARSELt, we have the option to fuse a transposition
                               with a matmul, which is useful in the case of 2:4 sparse training.
    alg_id_cusparselt: The algorithm id to use when using cuSPARSELT, will have effect on performance

Returns:
    torch.Tensor: A torch.Tensor wrapper subclass.

Raises:
    ValueError: If all of the tensor arguments are None.
zThe PyTorch API of SparseSemiStructuredTensor is in prototype stage and will change in the near future. Please open a Github issue for features requests and see our documentation on the torch.sparse module for further information about the project.Tz3At least one of packed or packed_t must be provided)devicedtypelayoutr)   )r   warningswarnUserWarning_load_dispatch_tabletorch_dynamoallow_in_graph
ValueErrorr+   r,   r-   Tensor_make_wrapper_subclassr!   r"   r#   r$   r%   r&   r'   )clsr(   r!   r"   r#   r$   r%   r&   r'   r)   previous_tensorkwargstensors                T/var/www/auris/envauris/lib/python3.13/site-packages/torch/sparse/semi_structured.py__new__"SparseSemiStructuredTensor.__new__K   s    N ++MMH
  ,0C(
 $$& MM((-$O!&ORSS &,,$**%,,*	
 44SJ6J"-H*+D(#4     returnc                 t    [        U S5      (       d   eU R                  R                   SU R                   S3$ )Nr(   z(shape=))hasattr	__class____name__r(   )selfs    r<   __repr__#SparseSemiStructuredTensor.__repr__   s7    tW%%%%..))*'$**Q??r?   c                    ^  [        [        U 4S jT R                  5      5      nT R                  T R                  T R
                  T R                  4nX4$ )Nc                     > [        TU 5      S L$ N)getattr)xrF   s    r<   <lambda>?SparseSemiStructuredTensor.__tensor_flatten__.<locals>.<lambda>   s    WT1-T9r?   )listfilter	__slots__r(   r&   r'   r)   )rF   inner_tensorstensor_metas   `  r<   __tensor_flatten__-SparseSemiStructuredTensor.__tensor_flatten__   sT     94>>J
 JJ**""	
 ))r?   rT   c                     Uu  pVpxU " UUR                  SS 5      UR                  SS 5      UR                  SS 5      UR                  SS 5      UR                  SS 5      UUUS9	$ )Nr!   r"   r#   r$   r%   	r(   r!   r"   r#   r$   r%   r&   r'   r)   )get)	r8   rS   rT   
outer_sizeouter_strider(   r&   r'   r)   s	            r<   __tensor_unflatten__/SparseSemiStructuredTensor.__tensor_unflatten__   s     NYJ*; $$Xt4""640"&&z48 $$Xt4(5(9(9-t) '@/'
 	
r?   c                     UR                   U R                  ;  a%  [        U R                   SUR                   S35      eU R                  UR                      " XX45      $ )NzI only supports a specific set of operations, can't perform requested op (rB   )_overloadpacketr    NotImplementedErrorrE   )r8   functypesargsr:   s        r<   __torch_dispatch__-SparseSemiStructuredTensor.__torch_dispatch__   sb    s':'::%<<. !//3}}oQ@  ""4#7#78dSSr?   Nc                    [        U SS5      Gc/  [        R                  R                  R                  [
        [        R                  R                  R                  [        [        R                  R                  R                  [        [        R                  R                  R                  [        [        R                  R                  R                  [        [        R                  R                  R                  [        [        R                  R                  R                  [         [        R                  R                  R"                  [$        [        R                  R                  R&                  [$        [        R                  R                  R(                  [*        [        R                  R                  R,                  [.        [        R                  R                  R0                  [        [        R                  R                  R2                  [4        0U l        Ub  U R6                  R9                  U5        ggg)zD
Loads the op overload sparse dispatch table for the current class.
r    N)rL   r2   opsatenvaluesr   indicesr   is_same_sizer	   detach_detachr   tr   viewr   mmr   matmuladdmmr
   linearr   _to_copy
_scaled_mmr   r    update)r8   custom_dispatch_tables     r<   r1   /SparseSemiStructuredTensor._load_dispatch_table   sF   
 3)408		%%'9		&&(;		++-@		&&(;		%%'9		  -		##%5		!!>		%%~		$$&7		%%'9		'')<		))+@#C %0##**+@A 1! 9r?   original_tensorc           	         UR                   (       d  [        SUR                   S35      eUR                  5       S:w  a  [        SUR                  5        S35      eUR	                  5       (       d  [        S5      eUR
                  U R                  ;  a  [        SUR
                   SU  S	35      eUR                  u  p#U R                  UR
                     R                  nU R                  UR
                     R                  nX$:  d  X$-  (       d  X5:  d	  X5-  (       a  [        S
UR                   SU SU S35      eg)zO
Assert that the given tensor is valid for semi-structured sparse compression.
zError original_tensor.device= z= is not supported! Only CUDA tensors are currently supported.   zError original_tensor.dim = z; is not supported! Only 2d tensors are currently supported.zXError original_tensor is not contiguous!Only contiguous tensors are currently supported.zError original_tensor.dtype z is not a supported dtype for !zError original_tensor.shape zS is not supported! Both dimensions must be larger or equal than and a multiple of (z, rB   N)
is_cudaRuntimeErrorr+   dimis_contiguousr,   r   r(   sparse_min_rowssparse_min_cols)r8   ry   mnmin_rowsmin_colss         r<    _validate_device_dim_dtype_shape;SparseSemiStructuredTensor._validate_device_dim_dtype_shape   sk    &&01G1G0H I= =   A%./B/B/D.E F; ;  ,,..C    (D(DD./D/D.EEcdgchhij 
 $$//0E0EFVV//0E0EFVV<1<1<1<./D/D.E FSS[R\\^_g^hhik  <Hr?   dense_inputc                    UR                  5       S:X  d   eUR                  u  p#U R                  UR                     R                  nU R                  UR                     R
                  nX$:  d	  X$-  (       a  U* U-  OSnX5:  d	  X4-  (       a  U* U-  OSnU(       d  U(       a.  [        R                  R                  R                  USUSU45      $ U$ )z
Calculates padding for dense tensor and pads tensor if necessary.
If padding is not required, this function returns the original tensor.
r{   r   )
r   r(   r   r,   dense_min_rowsdense_min_colsr2   nn
functionalpad)r8   r   r   r   r   r   to_pad_mto_pad_ns           r<   _pad_dense_input+SparseSemiStructuredTensor._pad_dense_input  s      A%%%   //0A0ABQQ//0A0ABQQ %&LALA2=a$%LALA2=ax88&&**;Ha8RSSr?   c           	          U R                   S   n[        R                  " U [        R                  " XR                  U R
                  S95      $ )N)r,   r+   )r(   r2   rp   eyer,   r+   )rF   cols     r<   to_dense#SparseSemiStructuredTensor.to_dense+  s3    jjnxxeii::dkkRSSr?   c                     [         erK   r`   r8   ry   s     r<   
from_dense%SparseSemiStructuredTensor.from_dense/  s    !!r?   biasBr   c                    [         erK   r   )rF   r   r   r:   s       r<   _mmSparseSemiStructuredTensor._mm3  s
     "!r?    )Fr   FrK   )r@   N)*rE   
__module____qualname____firstlineno____doc__r   int__annotations__dictr2   r,   r   r   boolr   r   strr   r   r6   rR   staticmethodSizer=   rG   tuplerP   rU   classmethodr\   _C_disabled_torch_function_impl__torch_function__r   rd   r1   r   r   r   r   r   __static_attributes__r   r?   r<   r   r   &   s   " OS"5;;0N#NOO ND !OT!%*d*L(H,--U\\""
5<<
  u||$$U\\""!)%,,!77##WI +0!"#PzzP &P u||$	P
 5<<(P &P &.ell%;P $(P P P Pd@# @*	tCy%

D#t ;<<	=* 
 5::tS$67
 

 
, ??Tc T T B B. (u|| (PT ( (T 5<< ELL  *T " ":V " " (,	"<<" u||$	" 
" "r?   r   ry   
transposedr@   c                     U(       a  [         R                  " S[        SS9  [        R                  (       a  [
        R                  R                  O[
        R                  R                  nUR                  U 5      $ )aj  
This function converts a dense tensor into a sparse semi-structured tensor.
It will return a SparseSemiStructuredTensor, a subclass of torch.Tensor.

This function will check to ensure the dense tensor has the right dtype, size, dims, and device.
We currently only support semi-structured sparse tensors for 2d CUDA tensors.
Additionally, your tensor must be a positive multiple of the mininum sparse block size, given in
`_DTYPE_TO_SHAPE_CONSTRAINTS` for each dtype (float32, float16, bfloat16, int8).

Args:
    original_tensor (Tensor): the dense tensor to convert
    transposed (bool, optional): deprecated arg to be removed in another release. Do not use.
Returns:
    SparseSemiStructuredTensor: A sparse semi-structured tensor created from the given original_tensor
Raises:
    None
Example:
    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
    >>> A = torch.Tensor([0, 0, 1, 1]).tile((128, 32)).half().cuda()
    tensor([[0., 0., 1.,  ..., 0., 1., 1.],
            [0., 0., 1.,  ..., 0., 1., 1.],
            [0., 0., 1.,  ..., 0., 1., 1.],
            ...,
            [0., 0., 1.,  ..., 0., 1., 1.],
            [0., 0., 1.,  ..., 0., 1., 1.],
            [0., 0., 1.,  ..., 0., 1., 1.]], device='cuda:0', dtype=torch.float16)
    >>> A_sparse = to_sparse_semi_structured(A)
    SparseSemiStructuredTensor(shape=torch.Size([128, 128]))
    >>> A_sparse.values()
    tensor([[1., 1., 1.,  ..., 1., 1., 1.],
            [1., 1., 1.,  ..., 1., 1., 1.],
            [1., 1., 1.,  ..., 1., 1., 1.],
            ...,
            [1., 1., 1.,  ..., 1., 1., 1.],
            [1., 1., 1.,  ..., 1., 1., 1.],
            [1., 1., 1.,  ..., 1., 1., 1.]], device='cuda:0', dtype=torch.float16),
    >>> A_sparse.indices()
    tensor([[-4370, -4370, -4370,  ..., -4370, -4370, -4370],
            [-4370, -4370, -4370,  ..., -4370, -4370, -4370],
            [-4370, -4370, -4370,  ..., -4370, -4370, -4370],
            ...,
            [-4370, -4370, -4370,  ..., -4370, -4370, -4370],
            [-4370, -4370, -4370,  ..., -4370, -4370, -4370],
            [-4370, -4370, -4370,  ..., -4370, -4370, -4370]], device='cuda:0', dtype=torch.int16))
zSetting transpose from `to_sparse_semi_structured` is deprecated and will be removed in a future release. `SparseSemiStructuredTensor` only support contiguous input tensors.r{   )
stacklevel)
r.   r/   FutureWarningr   r   r2   sparser   r   r   )ry   r   SPARSE_SUBCLASSs      r<   r   r   =  sa    b R 	
 &44 	66\\>>  %%o66r?   c                     ^  \ rS rSrSrSr\R                  \" SSSS5      \R                  \" SSSS5      \R                  \" SSSS5      \R                  \" SSS	S	5      0r\S
\R                  SS 4S j5       rU 4S jr\ SS
\R                  SS4S jj5       rSS.S\R                  S\\R                     S\R                  4S jjrSrU =r$ )r   i  a  
This class implements semi-structured sparsity for the CUTLASS backend.


In this implementation, the specified elements and metadata are stored seprately,
in packed and meta respectively.

When _FORCE_CUTLASS is set, or when cuSPARSELt is not available, this subclass calls into _sparse_semi_structured_(mm|addmm) and
sparse_semi_structured_from_dense for conversion to the compressed format.
cutlass          @         ry   r@   c           
          U R                  U5        [        U5      u  nnU " UR                  UUS S S UR                  S9$ )Nr!   r"   r#   r$   r%   r)   )r   r   r(   r)   )r8   ry   sparse_tensor_cutlassmeta_tensor_cutlasss       r<   r   ,SparseSemiStructuredTensorCUTLASS.from_dense  sU     	,,_= 6oF	
!!!($(,)77
 	
r?   c                    > U R                   b  U R                  c   eU R                   R                  S:X  a   [        U R                  U R                   5      $ [        TU ]  5       $ )Nr{   )r"   r!   ndimr   superr   )rF   rD   s    r<   r   *SparseSemiStructuredTensorCUTLASS.to_dense  s]    yy$)@@@ yy~~"	 4			
 !#	
r?   r   c           
      d    [         R                  " XSS9u  nnnnnU " UR                  UUUUUSS9$ )a  
This function takes in a unpruned dense tensor and runs a (branchless) static sort across a 4x4 tile.

It greedily picks the largest values in the tile, upholding the 2:4 sparsity constraint across both rows and columns.
The algorithm used to prune the matrix is implemented in `_sparse_semi_structured_tile`.

Then it creates the packed and meta tensors for the compressed sparse representation of the pruned dense tensor.
It also calculates the packed_t and meta_t tensors for the compressed sparse representation of the transposed
pruned dense tensor.
Since we cannot transpose the compressed representations, we store both for the fw/bw pass respectively.

Finally, this function also computes a compressed swizzled bitmask that encodes the sparsity pattern
This can be used in the backward pass to mask the gradients.

[9 1 7 4]                       [9 0 7 0]
[1 2 3 0]                       [0 2 0 0]
[8 3 5 4] -> prune 4x4 tile  -> [8 0 0 4] -> pack to CUTLASS semi-structured -> packed
[1 2 6 2]                       [0 0 6 2]                                    -> metadata

                                          -> pack to transposed CUTLASS      -> packed_t
                                             semi-structured representation  -> metadata_t

                                          -> compute swizzled bitmask        -> compressed_swizzled_bitmask


The equivalent PyTorch code to create the same five outputs from the dense tensor can be found below:
```
from torch.sparse import SparseSemiStructuredTensorCUTLASS
from torch.sparse._semi_structured_conversions import _sparse_semi_structured_tile, _compute_compressed_swizzled_bitmask

pruned = _sparse_semi_structured_tile(dense)
packed_cutlass, meta_cutlass = sparse_semi_structured_from_dense_cutlass(pruned)
packed_t_cutlass, meta_t_cutlass = sparse_semi_structured_from_dense_cutlass(pruned.t().contiguous())
bitmask = _compute_compressed_swizzled_bitmask(pruned)

SparseSemiStructuredTensorCUTLASS(dense.shape, packed_cutlass, meta_cutlass, packed_t_cutlass, meta_t_cutlass, bitmask)
```
T	algorithmuse_cutlassFr   r2   _sparse_semi_structured_tiler(   r8   ry   r   r!   r"   r#   r$   r%   s           r<   prune_dense_static_sort9SparseSemiStructuredTensorCUTLASS.prune_dense_static_sort  sV    b ..d
	
'
 !!(C
 	
r?   Nr   r   r   c                   [        U[        5      (       a  [        S5      eU R                  R                  nU R
                  S:w  d  UR
                  S:w  a  [        SU S35      eU R                  b  U R                  c  [        SU S35      eUc-  [        R                  " U R                  U R                  U5      nO,[        R                  " X R                  U R                  U5      nUS U R                  S    $ )NZ`SparseSemiStructuredTensor @ SparseSemiStructuredTensor` is not supported by the hardwarer{   `)` matmul: Broadcasting is not implemented$` matmul: operation is not supportedr   )
isinstancer   r5   rD   rE   r   r`   r!   r"   r2   _sparse_semi_structured_mm_sparse_semi_structured_addmmr(   )rF   r   r   r:   cls_nameress         r<   r   %SparseSemiStructuredTensorCUTLASS._mm  s     a344l  >>**99>QVVq[%H:FG  ;;$))"3%H:AB  |66t{{DIIqQ99++tyy! A''r?   r    )rE   r   r   r   r   r   r2   int8r   float16bfloat16float32r   r   r6   r   r   r   r   r   r   __classcell__)rD   s   @r<   r   r     s    	 G

22sBC5b"aC6r2q!D5b"aC	  
#ll
	,
 
$	
 68<
#ll<
	%<
 <
~ BF(((0(>(	( (r?   r   c                      \ rS rSrSrSr\R                  \" SSSS5      \R                  \" SSSS5      \R                  \" SSSS5      \R                  \" SSSS5      0r\S\R                  SS 4S	 j5       r\ SS\R                  SS
4S jj5       rSS.S\R                  S\\R                     S\R                  4S jjrSrg)r   i  ab  
The cuSPARSELt backend expects the specified elements and the metadata to be stored in a single tensor:
packed = [ specified elements of original tensor | metadata ]
For an original tensor of size (m, k) we expect the first m * k // 2 elements to be the kept elements
The rest of the tensor is metadata. Since there is only one tensor, we only use the packed and packed_t
attributes respectively.

cuSPARSELt also supports transposition fusion, which is necessary for performant 2:4 sparse training, as well
as specifying alg_id, a config that affects the performance of the matmul depending on matmul sizes.

cusparseltr   r   r   ry   r@   c                     U R                  U5        U " UR                  [        R                  " U5      S S S S [        R
                  [        R                  UR                  S9	$ )NrX   )r   r(   r2   _cslt_compressr   r   r   r)   r   s     r<   r   /SparseSemiStructuredTensorCUSPARSELT.from_dense   s]     	,,_=!''''8(,&@&P&P8HH)77

 
	
r?   r   c           
      d    [         R                  " XSS9u  nnnnnU " UR                  UUUUUSS9$ )aZ  
This function does the same thing as described in SparseSemiStructuredCUTLASS, but uses the cuSPASRELt metadata
layout and sparse matmul.

The only functional difference is that cuSPARSELt stores `metadata` and `packed` together into a single tensor.

[9 1 7 4]                       [9 0 7 0]
[1 2 3 0]                       [0 2 0 0]
[8 3 5 4] -> prune 4x4 tile  -> [8 0 0 4] -> pack to cuSPARSELT semi-structured -> packed
[1 2 6 2]                       [0 0 6 2]

                                          -> pack to transposed cuSPARSELt      -> packed_t
                                             semi-structured representation

                                          -> compute swizzled bitmask           -> compressed_swizzled_bitmask


The equivalent PyTorch code to create the same three outputs from the dense tensor can be found below:
```
from torch.sparse import SparseSemiStructuredTensorCUSPARSELT
from torch.sparse._semi_structured_conversions import _sparse_semi_structured_tile, _compute_compressed_swizzled_bitmask

pruned = _sparse_semi_structured_tile(dense)
packed_cusparselt = torch._cslt_compress(pruned)
packed_t_cusparselt = torch._cslt_compress(pruned.t().contiguous())
bitmask = _compute_compressed_swizzled_bitmask(pruned)

SparseSemiStructuredTensorCUSPARSELT(dense.shape, packed_cutlass, None, packed_t_cutlass, None, bitmask)
```
Fr   r   r   r   s           r<   r   <SparseSemiStructuredTensorCUSPARSELT.prune_dense_static_sort1  sV    P ..e
	
'
 !!(C
 	
r?   Nr   r   r   c                   [        U[        5      (       a  [        S5      eU R                  S:w  d  UR                  S:w  a#  [	        SU R
                  R                   S35      eUR                  U R                  :w  ai  [	        SU R
                  R                   S[        U R                  5       S[        UR                  5       SU R                   SUR                   S	35      eUb  UR                  U R                  :w  ai  [	        SU R
                  R                   S[        U R                  5       S[        UR                  5       S
U R                   SUR                   S35      eU R                  [        R                  :X  a\  [	        SU R
                  R                   S[        U R                  5       S[        UR                  5       SU R                   S3	5      eU R                  c#  [	        SU R
                  R                   S35      e[        R                  " U R                  UUU R                  U R                  S9nU R                  (       a  UR!                  5       $ U$ )Nr   r{   r   r   z` matmul: trying to do `A=z @ B=z`, with A.dtype=z and B.dtype=zH. This operation is only supported when A and B have the same data type.z + C`, with A.dtype=B.dtype=z and C.dtype=zK. This operation is only supported when A, B and C have the same data type.z`, with A.dtype=B.dtype=zO. mm is not supported for float8_e4m3fn, please use `torch._scaled_mm` instead.r   )r   transpose_resultalg_id)r   r   r5   r   r`   rD   rE   r,   r   r(   r2   float8_e4m3fnr!   _cslt_sparse_mmr&   r'   rn   )rF   r   r   r:   r   s        r<   r   (SparseSemiStructuredTensorCUSPARSELT._mmg  sN    a344l  99>QVVq[%DNN++,,UV  77djj %DNN++,,FuTZZGXFYY^_defelel_m^n o  $

|=	 BYY 
 

djj 8%DNN++,,FuTZZGXFYY^_defelel_m^n o((,

|=	 J\\  ::,,,%DNN++,,FuTZZGXFYY^_defelel_m^n o((,

| 4`` 
 ;;%DNN++,,PQ  ''!%!?!?--C #<<3557E#Er?   r   r   )rE   r   r   r   r   r   r2   r   r   r   r   r   r   r   r6   r   r   r   r   r   r   r?   r<   r   r     s    	 G;BBK

22r2rB5b"aC6r2q!D	  
#ll
	/
 
  683
#ll3
	%3
 3
l BF*F*F(0(>*F	*F *Fr?   r   )F)r.   collectionsr   typingr   r   r   r2   )torch.sparse._semi_structured_conversionsr   r   !torch.sparse._semi_structured_opsr	   r
   r   r   r   r   r   r   r   r   __all__r   r6   r   r   r   r   r   r   r?   r<   <module>r      s     " * *    ",$C" T" T"r A7\\A7A7  A7HH((B H(VEF+E EFr?   