o
    vZh\                     @   s.  d dl Z d dlmZmZ d dlZd dlmZ ddlmZm	Z	 eee
edf ZdddZdd	eddfd
dZdd	eddfddZdd	edee
ef fddZdd	edee
ef fddZdd	edefddZdd	edefddZdd	edefddZdd	edefddZdd	edeeef fddZg dZdS )    N)AnyUnion)Device   )_get_device_indexis_initializedreturnc                   C   s   t  r
tj  dS dS )aZ  Release all unoccupied cached memory currently held by the caching
    allocator so that those can be used in other XPU application.

    .. note::
        :func:`~torch.xpu.empty_cache` doesn't increase the amount of XPU
        memory available for PyTorch. However, it may help reduce fragmentation
        of XPU memory in certain cases.
    N)r   torch_CZ_xpu_emptyCache r   r   ?/var/www/auris/lib/python3.10/site-packages/torch/xpu/memory.pyempty_cache   s   	r   devicec                 C      t | dd} tj| S )a  Reset the "peak" stats tracked by the XPU memory allocator.

    See :func:`~torch.xpu.memory_stats` for details. Peak stats correspond to the
    `"peak"` key in each individual stat dict.

    Args:
        device (torch.device or int or str, optional): selected device. Returns
            statistic for the current device, given by :func:`~torch.xpu.current_device`,
            if :attr:`device` is ``None`` (default).
    Toptional)r   r	   r
   Z_xpu_resetPeakMemoryStatsr   r   r   r   reset_peak_memory_stats      r   c                 C   r   )a  Reset the "accumulated" (historical) stats tracked by the XPU memory allocator.

    See :func:`~torch.xpu.memory_stats` for details. Accumulated stats correspond to
    the `"allocated"` and `"freed"` keys in each individual stat dict.

    Args:
        device (torch.device or int or str, optional): selected device. Returns
            statistic for the current device, given by :func:`~torch.xpu.current_device`,
            if :attr:`device` is ``None`` (default).
    Tr   )r   r	   r
   Z _xpu_resetAccumulatedMemoryStatsr   r   r   r   reset_accumulated_memory_stats)   r   r   c                 C   s"   t  si S t| dd} tj| S )zLReturn the result of :func:`~torch.xpu.memory_stats` as a nested dictionary.Tr   )r   r   r	   r
   Z_xpu_memoryStatsr   r   r   r   memory_stats_as_nested_dict8   s   r   c                    sF   g dt dtddf fdd t| d} d|   tS )	a@  Return a dictionary of XPU memory allocator statistics for a given device.

    The return value of this function is a dictionary of statistics, each of
    which is a non-negative integer.

    Core statistics:

    - ``"allocated_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
      amount of allocated memory.
    - ``"reserved_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
      amount of reserved memory.
    - ``"active_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
      amount of active memory.
    - ``"requested_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
      memory requested by client code, compare this with allocated_bytes to check if
      allocation rounding adds too much overhead.

    For these core statistics, values are broken down as follows.

    Pool type:

    - ``all``: combined statistics across all memory pools.
    - ``large_pool``: statistics for the large allocation pool (for size >= 1MB allocations).
    - ``small_pool``: statistics for the small allocation pool (for size < 1MB allocations).

    Metric type:

    - ``current``: current value of this metric.
    - ``peak``: maximum value of this metric.
    - ``allocated``: historical total increase in this metric.
    - ``freed``: historical total decrease in this metric.

    Args:
        device (torch.device or int or str, optional): selected device. Returns
            statistics for the current device, given by :func:`~torch.xpu.current_device`,
            if :attr:`device` is ``None`` (default).
    prefixobjr   Nc                    sT   t |tr!t| dkr| d7 } | D ]\}} | | | qd S | |f d S )Nr   .)
isinstancedictlenitemsappend)r   r   kv_recurse_add_to_resultresultr   r   r"   h   s   
z,memory_stats.<locals>._recurse_add_to_resultr    )strr   r   sortcollectionsOrderedDict)r   statsr   r!   r   memory_stats@   s   &
	

r*   c                 C      t | dddS )a  Return the current GPU memory occupied by tensors in bytes for a given device.

    Args:
        device (torch.device or int or str, optional): selected device. Returns
            statistic for the current device, given by :func:`~torch.xpu.current_device`,
            if :attr:`device` is ``None`` (default).

    .. note::
        This is likely less than the amount shown in `xpu-smi` since some
        unused memory can be held by the caching allocator and some context
        needs to be created on GPU.
    r   zallocated_bytes.all.currentr   r*   getr   r   r   r   memory_allocatedx   s   r.   c                 C   r+   )a  Return the maximum GPU memory occupied by tensors in bytes for a given device.

    By default, this returns the peak allocated memory since the beginning of
    this program. :func:`~torch.xpu.reset_peak_memory_stats` can be used to
    reset the starting point in tracking this metric. For example, these two
    functions can measure the peak allocated memory usage of each iteration in a
    training loop.

    Args:
        device (torch.device or int or str, optional): selected device. Returns
            statistic for the current device, given by :func:`~torch.xpu.current_device`,
            if :attr:`device` is ``None`` (default).
    r   zallocated_bytes.all.peakr   r,   r   r   r   r   max_memory_allocated      r/   c                 C   r+   )aJ  Return the current GPU memory managed by the caching allocator in bytes for a given device.

    Args:
        device (torch.device or int or str, optional): selected device. Returns
            statistic for the current device, given by :func:`~torch.xpu.current_device`,
            if :attr:`device` is ``None`` (default).
    r   zreserved_bytes.all.currentr   r,   r   r   r   r   memory_reserved   s   r1   c                 C   r+   )a  Return the maximum GPU memory managed by the caching allocator in bytes for a given device.

    By default, this returns the peak cached memory since the beginning of this
    program. :func:`~torch.xpu.reset_peak_memory_stats` can be used to reset
    the starting point in tracking this metric. For example, these two functions
    can measure the peak cached memory amount of each iteration in a training
    loop.

    Args:
        device (torch.device or int or str, optional): selected device. Returns
            statistic for the current device, given by :func:`~torch.xpu.current_device`,
            if :attr:`device` is ``None`` (default).
    r   zreserved_bytes.all.peakr   r,   r   r   r   r   max_memory_reserved   r0   r2   c                 C   r   )a  Return the global free and total GPU memory for a given device.

    Args:
        device (torch.device or int or str, optional): selected device. Returns
            statistic for the current device, given by :func:`~torch.xpu.current_device`,
            if :attr:`device` is ``None`` (default).

    Returns:
        int: the memory available on the device in units of bytes.
        int: the total memory on the device in units of bytes
    Tr   )r   r	   r
   Z_xpu_getMemoryInfor   r   r   r   mem_get_info   s   r3   )
r   r/   r2   r3   r.   r1   r*   r   r   r   )r   N)N)r'   typingr   r   r	   Ztorch.typesr   r$   r   r   r%   intZ	_device_tr   r   r   r   r   r*   r.   r/   r1   r2   tupler3   __all__r   r   r   r   <module>   s"    
8