
    [Th#                     l    S SK r S SKrS SKJr  S SKJr  S SKJr  S SKJ	r	  S SK
Jr   " S S\5      rS	 rg)
    N)FakeTensorProp)CapabilityBasedPartitioner)OperatorSupport)CALLABLE_NODE_OPS)_pytreec                   N    \ rS rSrS\R
                  R                  S\4S jrSr	g)CudaGraphsSupport   nodereturnc                   ^ UR                   [        ;  a  gUR                  [        R                  R
                  R                  R                  4;   a  gUR                  [        R                  4;   a  gSmS nU4S jnUR                   H)  n[        R                  " XC" UR                  5      5        M+     [        R                  " XC" UR                  5      5        T(       + $ )NFTc                 "    SU ;   a  U S   $ U S   $ )Nvalfake_result )metas    [/var/www/auris/envauris/lib/python3.13/site-packages/torch/fx/passes/backends/cudagraphs.pymeta_fk4CudaGraphsSupport.is_node_supported.<locals>.meta_fk   s    "'4-4;HT-5HH    c                    > [        U [        R                  5      (       a  U R                  R                  S:w  a  Smg g g )NcudaT)
isinstancetorchTensordevicetype)tfound_not_cudas    r   find_not_cuda:CudaGraphsSupport.is_node_supported.<locals>.find_not_cuda   s1    !U\\**qxx}}/F!% 0G*r   )opr   targetr   opsatenembedding_dense_backwarddefaultoperatorgetitemall_input_nodespytree	tree_map_r   )self
submodulesr   r   r    nr   s         @r   is_node_supported#CudaGraphsSupport.is_node_supported   s    77++;;599>>BBJJKK;;8++,,	I	&
 %%A]GAFFO< & 			(:;
 "!!r   r   N)
__name__
__module____qualname____firstlineno__r   fxNodeboolr0   __static_attributes__r   r   r   r	   r	      s    "%((-- "D "r   r	   c                     [        U 5      R                  " U6   [        5       n[        XSS9nUR	                  5       nUR                  U5      nU$ )z
Partition an FX graph into sub-GraphModules that can be validly run under
CUDA graphs.  For a subgraph to be runnable under CUDA, all of the operations
must involve CUDA tensors only/
T)allows_single_node_partition)r   	propagater	   r   propose_partitionsfuse_partitions)gminputssupported_opspartitioner
partitionsfused_graphs         r   partition_cudagraphsrE   -   sV     2  &)%'M -
K //1J--j9Kr   )r(   r    torch.fx.passes.fake_tensor_propr   !torch.fx.passes.infra.partitionerr    torch.fx.passes.operator_supportr   torch.fx.passes.tools_commonr   torch.utilsr   r+   r	   rE   r   r   r   <module>rK      s,      ; H < : )" "Br   