o
    vZhVE                     @   s  U d Z ddlZddlZddlmZ ddlmZmZmZm	Z	 ddl
Z
ddlZ
ddl
mZ ddlmZmZ ddlmZ dd	lmZmZ d
ae Ze Zg aeeeg df ee f  ed< e e
j!ddd Z"e	eee#df Z$e a%dZ&ee
j!j' ed< de(fddZ)e) re
j!j*Z*e
j!j+Z,e
j!j-Z.nedZ*de#de#fddZ,de#de#fddZ.eddde#fddZ/de(fddZ0dd  Z1d!d" Z2d#d$ Z3d%d& Z4d'd( Z5G d)d* d*Z6G d+d dZG d,d- d-eZ7de$ddfd.d/Z8dYdee$ defd0d1Z9eddYdee$ de:eef fd2d3Z;dYdee$ de*fd4d5Z<de#fd6d7Z=de	e#ee
jf de
jfd8d9Z>G d:d; d;Z?d<ed= de?fd>d<Z@d?d@ ZAd<efdAdBZBdYdee$ defdCdDZC	dYdEe#dee$ defdFdGZDdYde$ddfdHdIZEdee fdJdKZFdefdLdMZGde
jde
j!j'fdNdOZH	PdZdQe#de	e#ee
jf ddfdRdSZIdZde	e#ee
jf de#fdTdUZJddVlKmLZLmMZMmNZNmOZOmPZPmQZQmRZRmSZSmTZTmUZU ddWlVmWZWmXZXmYZYmZZZm[Z[m\Z\m]Z]m^Z^m_Z_ g dXZ`dS )[z
This package introduces support for the XPU backend, specifically tailored for
Intel GPU optimization.

This package is lazily initialized, so you can always import it, and use
:func:`is_available()` to determine if your system supports XPU.
    N)	lru_cache)AnyCallableOptionalUniondevice)_dummy_type_LazySeedTracker   )_get_device_index)EventStreamF_queued_callsZ_xpu_isInBadForkc                   C      dS NF r   r   r   A/var/www/auris/lib/python3.10/site-packages/torch/xpu/__init__.py<lambda>   s    r   r   default_generatorsreturnc                   C   s   t jjS )z(Return true if compile with XPU support.)torch_CZ_has_xpur   r   r   r   _is_compiled#   s   r   _XpuDevicePropertiesr   c                 C      t dNz(PyTorch was compiled without XPU supportNotImplementedErrorr   r   r   r   _exchange_device0      r   c                 C   r   r   r   r   r   r   r   _maybe_exchange_device3   r    r!   )maxsizec                   C   s   t  sdS tj S )z*Return the number of XPU device available.r   )r   r   r   Z_xpu_getDeviceCountr   r   r   r   device_count7   s   
r#   c                   C   s
   t  dkS )z7Return a bool indicating if XPU is currently available.r   )r#   r   r   r   r   is_available?   s   
r$   c                   C   r   )zKReturn a bool indicating if the current XPU device supports dtype bfloat16.Tr   r   r   r   r   is_bf16_supportedE   s   r%   c                   C   s   t ot  S )z8Return whether PyTorch's XPU state has been initialized.)_initialized_is_in_bad_forkr   r   r   r   is_initializedJ   s   r(   c                 K   sf   t  r|   d S |ddrt| t  d S |ddr(t| t  d S t| t f d S )Nseed_allFseed)	r(   get_lazy_seed_trackerZqueue_seed_all	tracebackformat_stackZ
queue_seedr   append)callablekwargsr   r   r   
_lazy_callO   s   
r2   c                   C   s
   t   dS )zInitialize PyTorch's XPU state.
    This is a Python API about lazy initialization that avoids initializing
    XPU until the first time it is accessed. Does nothing if the XPU state is
    already initialized.
    N)
_lazy_initr   r   r   r   init]   s   
r4   c                  C   s  t  sttdr
d S tq t  r	 W d    d S t r tdt s'tdtj	
  dt_tdd t D  z1tD ]'\} }z|   W q> tye } zdt| dd	| }t||d }~ww W ttd nttd w daW d    d S 1 sw   Y  d S )
Nis_initializingzuCannot re-initialize XPU in forked subprocess. To use XPU with multiprocessing, you must use the 'spawn' start methodz#Torch not compiled with XPU enabledTc                 s   s    | ]}|r|V  qd S Nr   ).0Zcallsr   r   r   	<genexpr>}   s    z_lazy_init.<locals>.<genexpr>z5XPU call failed lazily at initialization with error: z'

XPU call was originally invoked at:

 )r(   hasattr_tls_initialization_lockr'   RuntimeErrorr   AssertionErrorr   r   Z	_xpu_initr5   r   extendr,   Z	get_calls	Exceptionstrjoindelattrr&   )Zqueued_callZorig_tracebackemsgr   r   r   r3   f   s>   



"r3   c                   @   s8   e Zd ZdefddZdd Zdededefd	d
ZdS )_DeviceGuardindexc                 C   s   || _ d| _d S N)idxprev_idx)selfrG   r   r   r   __init__   s   
z_DeviceGuard.__init__c                 C      t j| j| _d S r6   r   xpur   rJ   rK   rL   r   r   r   	__enter__      z_DeviceGuard.__enter__typevaluer-   c                 C      t j| j| _dS r   r   rP   r!   rK   rJ   rL   rT   rU   r-   r   r   r   __exit__      z_DeviceGuard.__exit__N)__name__
__module____qualname__intrM   rR   r   rY   r   r   r   r   rF      s    rF   c                   @   s<   e Zd ZdZd efddZdd Zdededefd	d
ZdS )r   zContext-manager that changes the selected device.

    Args:
        device (torch.device or int or str): device index to select. It's a no-op if
            this argument is a negative integer or ``None``.
    c                 C   s   t |dd| _d| _d S )NToptionalrI   )r   rJ   rK   )rL   r   r   r   r   rM      s   
zdevice.__init__c                 C   rN   r6   rO   rQ   r   r   r   rR      rS   zdevice.__enter__rT   rU   r-   c                 C   rV   r   rW   rX   r   r   r   rY      rZ   zdevice.__exit__N)r[   r\   r]   __doc__r   rM   rR   rY   r   r   r   r   r      s
    c                       s    e Zd ZdZ fddZ  ZS )	device_ofa  Context-manager that changes the current device to that of given object.

    You can use both tensors and storages as arguments. If a given object is
    not allocated on a XPU, this is a no-op.

    Args:
        obj (Tensor or Storage): object allocated on the selected device.
    c                    s"   |j r| nd}t | d S rH   )Zis_xpuZ
get_devicesuperrM   )rL   objrJ   	__class__r   r   rM      s   zdevice_of.__init__)r[   r\   r]   ra   rM   __classcell__r   r   re   r   rb      s    	rb   c                 C   s*   t   t| } | dkrtj|  dS dS )zSet the current device.

    Args:
        device (torch.device or int or str): selected device. This function is a
            no-op if this argument is negative.
    r   N)r3   r   r   r   Z_xpu_setDevicer   r   r   r   
set_device   s
   rh   c                 C   s
   t | jS )a  Get the name of a device.

    Args:
        device (torch.device or int or str, optional): device for which to
            return the name. This function is a no-op if this argument is a
            negative integer. It uses the current device, given by :func:`~torch.xpu.current_device`,
            if :attr:`device` is ``None`` (default).

    Returns:
        str: the name of the device
    )get_device_propertiesnamer   r   r   r   get_device_name   s   
rk   c                    s   t |   fddt D S )a  Get the xpu capability of a device.

    Args:
        device (torch.device or int or str, optional): device for which to
            return the device capability. This function is a no-op if this
            argument is a negative integer. It uses the current device, given by
            :func:`~torch.xpu.current_device`, if :attr:`device` is ``None``
            (default).

    Returns:
        Dict[str, Any]: the xpu capability dictionary of the device
    c                    s"   i | ]}| d s|t |qS ))__Z
_pybind11_)
startswithgetattr)r7   proppropsr   r   
<dictcomp>   s    
z)get_device_capability.<locals>.<dictcomp>)ri   dirr   r   rp   r   get_device_capability   s   
rt   c                 C   s   t   t| dd} t| S )zGet the properties of a device.

    Args:
        device (torch.device or int or str): device for which to return the
            properties of the device.

    Returns:
        _XpuDeviceProperties: the properties of the device
    Tr_   )r3   r   Z_get_device_propertiesr   r   r   r   ri      s   
ri   c                   C   s   t   tj S )z0Return the index of a currently selected device.)r3   r   r   Z_xpu_getDevicer   r   r   r   current_device  s   
ru   c                 C   s2   t | trt| } | S t | trtd| } | S )zReturn the torch.device type object from the passed in device.

    Args:
        device (torch.device or int or str): selected device.
    rP   )
isinstancerA   r   r   r^   r   r   r   r   _get_device
  s   


rw   c                   @   sN   e Zd ZU dZed ed< ded fddZdd Zd	ed
edefddZ	dS )StreamContexta  Context-manager that selects a given stream.

    All XPU kernels queued within its context will be enqueued on a selected
    stream.

    Args:
        Stream (Stream): selected stream. This manager is a no-op if it's
            ``None``.
    .. note:: Streams are per-device.
    torch.xpu.Stream
cur_streamstreamc                 C   s*   || _ td d| _| jd u rd| _d S d S )NTrI   )r{   r   rJ   )rL   r{   r   r   r   rM   $  s
   

zStreamContext.__init__c                 C   s   | j }|d u s| jdkrd S tjd | _| jj|jkr9t|j tj|j| _W d    n1 s4w   Y  tj| d S rH   )	r{   rJ   r   rP   current_streamsrc_prev_streamr   dst_prev_stream
set_stream)rL   rz   r   r   r   rR   *  s   zStreamContext.__enter__rT   rU   r-   c                 C   sJ   | j }|d u s| jdkrd S | jj|jkrtj| j tj| j d S rH   )r{   rJ   r}   r   r   rP   r   r~   )rL   rT   rU   r-   rz   r   r   r   rY   6  s   zStreamContext.__exit__N)
r[   r\   r]   ra   r   __annotations__rM   rR   r   rY   r   r   r   r   rx     s   
 
rx   r{   ry   c                 C   s   t | S )zWrap around the Context-manager StreamContext that selects a given stream.

    Arguments:
        stream (Stream): selected stream. This manager is a no-op if it's ``None``.
    )rx   r{   r   r   r   r{   A  s   c                 C   s   t jj| ||d dS )a  set stream specified by the stream id, device index and device type

    Args: stream_id (int): not visible to the user, used to assigned to the specific stream.
          device_index (int): selected device index.
          device_type (int): selected device type.
    	stream_iddevice_indexdevice_typeN)r   r   Z_xpu_setStreamr   r   r   r   _set_stream_by_idJ  s
   
r   c                 C   s*   | du rdS t   t| j| j| jd dS )a  Set the current stream.This is a wrapper API to set the stream.
        Usage of this function is discouraged in favor of the ``stream``
        context manager.

    Args:
        stream (Stream): selected stream. This function is a no-op
            if this argument is ``None``.
    Nr   )r3   r   r   r   r   r   r   r   r   r   X  s   	
r   c                 C   s4   t   tjt| dd}t|d |d |d dS )aR  Return the currently selected :class:`Stream` for a given device.

    Args:
        device (torch.device or int, optional): selected device. Returns
            the currently selected :class:`Stream` for the current device, given
            by :func:`~torch.xpu.current_device`, if :attr:`device` is ``None``
            (default).
    Tr_   r   r      r   )r3   r   r   Z_xpu_getCurrentStreamr   r   )r   
streamdatar   r   r   r|   k  s   	
r|   data_ptrc                 C   s6   t   tj| t|dd}t|d |d |d dS )a;  Return a :class:`Stream` from an external SYCL queue.

    This function is used to wrap SYCL queue created in other libraries in order
    to facilitate data exchange and multi-library interactions.

    .. note:: This function doesn't manage the queue life-cycle, it is the user
       responsibility to keep the referenced queue alive while this returned stream is
       being used. The different SYCL queue pointers will result in distinct
       :class:`Stream` objects, even if the SYCL queues they dereference are equivalent.

    Args:
        data_ptr(int): Integer representation of the `sycl::queue*` value passed externally.
        device(torch.device or int, optional): the device where the queue was originally created.
            It is the user responsibility to ensure the device is specified correctly.
    Tr_   r   r   r   r   )r3   r   r   Z_xpu_getStreamFromExternalr   r   )r   r   r   r   r   r   get_stream_from_external}  s   r   c                 C   s   t   t| dd} tj| S )a*  Wait for all kernels in all streams on a XPU device to complete.

    Args:
        device (torch.device or int, optional): device for which to synchronize.
            It uses the current device, given by :func:`~torch.xpu.current_device`,
            if :attr:`device` is ``None`` (default).
    Tr_   )r3   r   r   r   Z_xpu_synchronizer   r   r   r   synchronize  s   r   c                  C   s(   t  sg S tj } | du rg S |  S )z<Return list XPU architectures this library was compiled for.N)r   r   r   Z_xpu_getArchFlagssplit)Z
arch_flagsr   r   r   get_arch_list  s   
r   c                  C   s0   t  } t| dkrdS dddd | D  S )zIReturn XPU AOT(ahead-of-time) build flags this library was compiled with.r   r9   z-device ,c                 s   s    | ]}|V  qd S r6   r   )r7   archr   r   r   r8     s    z$get_gencode_flags.<locals>.<genexpr>)r   lenrB   )Z	arch_listr   r   r   get_gencode_flags  s   r   c                 C   s    | j }|du r
t }tjj| S )zuReturn the XPU Generator object for the given device.

    Args:
        device (torch.device): selected device.
    N)rG   ru   r   rP   r   )r   rJ   r   r   r   _get_generator  s   r   rP   offsetc                    s"   t |  fdd}t| dS )a$  Set the random number generator state offset of the specified GPU.

    Args:
        offset (int): The desired offset
        device (torch.device or int, optional): The device to set the RNG state.
            Default: ``'xpu'`` (i.e., ``torch.device('xpu')``, the current XPU device).
    c                     s   t  } |  d S r6   )r   Z
set_offset)default_generatorfinal_devicer   r   r   cb  s   z!_set_rng_state_offset.<locals>.cbN)rw   r2   )r   r   r   r   r   r   _set_rng_state_offset  s   
r   c                 C   s   t   t| }t|}| S )aL  Return the random number generator state offset of the specified GPU.

    Args:
        device (torch.device or int, optional): The device to return the RNG state offset of.
            Default: ``'xpu'`` (i.e., ``torch.device('xpu')``, the current XPU device).

    .. warning::
        This function eagerly initializes XPU.
    )r3   rw   r   Z
get_offset)r   r   r   r   r   r   _get_rng_state_offset  s   
r   )
empty_cachemax_memory_allocatedmax_memory_reservedmem_get_infomemory_allocatedmemory_reservedmemory_statsmemory_stats_as_nested_dictreset_accumulated_memory_statsreset_peak_memory_stats)	get_rng_stateget_rng_state_allinitial_seedmanual_seedmanual_seed_allr*   r)   set_rng_stateset_rng_state_all)+r   r   rx   ru   r|   r   r   rb   r#   r   r   rt   rk   ri   r   r   r   r   r4   r   r$   r%   r(   r   r   r   r   r   r   r   r   r   r   r   r*   r)   rh   r   r   r   r{   streamsr   r6   )rP   )ara   	threadingr-   	functoolsr   typingr   r   r   r   r   Ztorch._Cr   Z_deviceZtorch._utilsr	   r
   _utilsr   r   r   r   r&   localr;   Lockr<   r   listtuplerA   r   rn   r   r'   r^   Z	_device_tr,   r   	Generatorboolr   r   Z_xpu_exchangeDevicer   Z_xpu_maybeExchangeDevicer!   r#   r$   r%   r(   r2   r4   r3   rF   rb   rh   rk   dictrt   ri   ru   rw   rx   r{   r   r   r|   r   r   r   r   r   r   r   Zmemoryr   r   r   r   r   r   r   r   r   r   randomr   r   r   r   r   r*   r)   r   r   __all__r   r   r   r   <module>   s   
	(" *	


 0,