o
    Zh                     @   s   d Z ddlZddlZddlZddlZddlZddlZddlmZ ddl	m
Z
 ddlZddlmZmZ ddlmZ eeZeeeddd	d
dde
e fddZejeddZejeddZdd Zeddd ZdS )a*  
This module provides TVM backend integration for TorchDynamo.

Apache TVM is a deep learning compiler framework that can optimize and execute
models on various hardware backends. This module enables:

- Compilation of PyTorch models to TVM's computation graphs
- Multiple scheduling options:
  - Default scheduler
  - Auto-scheduler for automatic optimization
  - Meta-schedule for evolutionary search-based tuning
- Hardware-specific optimizations:
  - CUDA GPU support
  - CPU support with LLVM targeting and architecture-specific tuning
  - Automatic detection of CPU capabilities (AVX2, AVX512)
- Tensor conversion utilities between PyTorch and TVM formats
- Configurable optimization levels and tuning trials

The backend can be used with torch.compile():
    model = torch.compile(model, backend="tvm")
    N)MappingProxyType)Optional   )device_from_inputsfake_tensor_unsupported)register_backend N     )	schedulertrials	opt_level)optionsr   c                   sf  dd l ddl m} ddlm} tj| |}t|}dd t|D }| | }t	|dkr6t
d | jS |j||\}	}
|jdkrP|j}j }nd}jt }|dd }|d u rmtjd	d }|d
d}|dd}|dkrddl m} t }tj|s||	d |
|\}}t	|dkr|||}tj|s|dksJ |j ||!|gdd}z|"| W n t#y   tj|rt$|  w |%|* j&j'|ddid |j(|	||
d}W d    n1 sw   Y  W d    n	1 sw   Y  n|dkriddl m)} t* ?}|jdkr8jt  d|j+j,dd }|dks?J |j-j.|	|||d|
d|d}|j-j/||	||
|d}W d    n	1 scw   Y  n-|d ksq|sj&j'|d! |j(|	||
d}W d    n	1 sw   Y  nt0d"|1|d  | d#d$ fd%d& fd'd(}|S ))Nr   )relay)graph_executorc                 S   s    g | ]\}}d | |j fqS )inp_)shape).0idxi r   I/var/www/auris/lib/python3.10/site-packages/torch/_dynamo/backends/tvm.py
<listcomp>;   s     ztvm.<locals>.<listcomp>z0Explicitly fall back to eager due to zero outputcudar
   ZTVM_SCHEDULERr   r   r   r	   auto_scheduler)r   maini  )Znum_measure_trialsZmeasure_callbacksZearly_stoppingz relay.backend.use_auto_schedulerT)r   config)targetparamsmeta_schedule)r   z --num-cores F)Zlogical@   Zevolutionary)modr   work_dirZmax_trials_globalZnum_trials_per_iterr   Zstrategyr   )databaser    r   r   r   default)r   zThis tuning option is invalid/not implemented for torchdynamo's TVM-related backend. There are three available options: default, auto_scheduler and meta_schedule.c                 S   s*   | j dkrt|  S tjj|  S )z8A helper function to transfer a NDArray to torch.tensor.bool)dtypetorchZ
from_numpynumpyutilsZdlpackfrom_dlpackZ	to_dlpack)Z	nd_tensorr   r   r   to_torch_tensor   s   
ztvm.<locals>.to_torch_tensorc                    s,   | j tjkr j|   S  j| S )z8A helper function to transfer a torch.tensor to NDArray.)r%   r&   r$   ndarraycpur'   r)   )Ztorch_tensor)tvmr   r   to_tvm_tensor   s   ztvm.<locals>.to_tvm_tensorc                     s   dd | D }   \}}dd | D }t|dD ])\}}| dkrD|jr,| }d| }||vr<td| q || q 	   fddt
  D S )	Nc                 S   s   g | ]}|  qS r   )
contiguous)r   ar   r   r   r          z)tvm.<locals>.exec_tvm.<locals>.<listcomp>c                 S   s   h | ]\}}|qS r   r   )r   name_r   r   r   	<setcomp>   r2   z(tvm.<locals>.exec_tvm.<locals>.<setcomp>r   r   z6input %s skipped as not found in tvm's runtime libraryc                    s   g | ]	}  |qS r   )Z
get_output)r   r   )mr*   r   r   r      s    )Zget_input_infoitems	enumeratedimZrequires_graddetachlogwarningZ	set_inputrunrangeZget_num_outputs)Zi_argsargsZ
shape_infor4   Zactive_inputsr   argZinp_name)r6   r*   r/   r   r   exec_tvm   s*   
ztvm.<locals>.exec_tvm)2r.   r   Ztvm.contribr   r&   Zjittracer   r8   lenr;   r<   forwardZfrontendZfrom_pytorchtyper   indexr   r-   ZTargetllvm_targetgetosenvironr   tempfileNamedTemporaryFilepathexistsZextract_tasksZTaskSchedulerZTuningOptionsZRecordToFileZtune	ExceptionunlinkZApplyHistoryBestZ	transformZPassContextbuildr   TemporaryDirectoryr(   	cpu_countZrelay_integrationZ
tune_relayZcompile_relayNotImplementedErrorZGraphModule)ZgmZexample_inputsr   r   r   Zjit_modZdeviceZ
shape_listZexample_outputsr    r   devr   r
   r   r   r   Zlog_filetasksZtask_weightsZtunerZtune_optionlibmsr!   r"   rA   r   )r6   r*   r/   r.   r   r.   +   s   










	r.   r   )r
   r   c                   C   s&   zt d W dS  ty   Y dS w )Nr.   TF)	importlibimport_moduleImportErrorr   r   r   r   has_tvm   s   
r\   c                  C   s2   t jdkrtd } d| v rdS d| v rdS dS )Nlinuxz/proc/cpuinfoZavx512zllvm -mcpu=skylake-avx512Zavx2zllvm -mcpu=core-avx2llvm)sysplatformopenread)Zcpuinfor   r   r   rG      s   
rG   )__doc__	functoolsrY   loggingrI   r_   rK   typesr   typingr   r&   commonr   r   registryr   	getLogger__name__r;   r.   partialZtvm_meta_scheduleZtvm_auto_schedulerr\   	lru_cacherG   r   r   r   r   <module>   s6   

 