o
    wZh                     @   s  d dl mZ d dlmZmZmZmZmZ d dlm	Z	 d dl
Z
d dlmZmZ g dZe	dedd	ed
efddZd	ed
efddZedeeeZe	dde
jdeeee
jf  ded
ee
jdf fddZe	ddedeeee
jf  ded
ee fddZd ddZ	 d deedf deeeef  deeee
jf  ded
eeedf eeeef df f f
ddZd dedeee
jf ded
efddZdS )!    )Sequence)AnyOptionaloverloadTypeVarUnion)
deprecatedN)GatherScatter)scatterscatter_kwargsgatherzC`is_namedtuple` is deprecated, please use the python checks instead)categoryobjreturnc                 C   s   t | S N)_is_namedtupler    r   O/var/www/auris/lib/python3.10/site-packages/torch/nn/parallel/scatter_gather.pyis_namedtuple   s   r   c                 C   s   t | tot| dot| dS )N_asdict_fields)
isinstancetuplehasattrr   r   r   r   r      s   r   T.inputstarget_gpusdimc                 C      d S r   r   r   r   r   r   r   r   r   !      r   c                 C   r    r   r   r!   r   r   r   r   *   r"   c                    *    fddz	| }W d|S dw )zSlice tensors into approximately equal chunks and distributes them across given GPUs.

    Duplicates references to objects that are not tensors.
    c                    s   t  tjrtd  S t r  fddtt  D S t  tr4t	 dkr4t
tt  S t  t
rKt	 dkrKdd tt  D S t  trft	 dkrf fddtt   D S  fddD S )Nc                    s   g | ]}t  | qS r   type).0argsr   r   r   
<listcomp>=       z0scatter.<locals>.scatter_map.<locals>.<listcomp>r   c                 S   s   g | ]}t |qS r   )listr&   ir   r   r   r(   A       c                    s   g | ]}t  |qS r   r$   r+   r   r   r   r(   C   r)   c                    s   g | ]} qS r   r   r&   _r   r   r   r(   D   s    )r   torchTensorr
   applyr   zipmapr   lenr*   dictitemsr   r   scatter_mapr   r   r   r9   9   s    zscatter.<locals>.scatter_mapNr   )r   r   r   resr   r8   r   r   3   s   
kwargsc                 C   s   | rt | ||ng }|rt |||ng }t|t|k r/|dd tt|t| D  nt|t| k rI|dd tt|t| D  t|t|fS )z+Scatter with support for kwargs dictionary.c                 s   s    | ]}d V  qdS )r   Nr   r.   r   r   r   	<genexpr>\       
z!scatter_kwargs.<locals>.<genexpr>c                 s   s    | ]}i V  qd S r   r   r.   r   r   r   r<   `   r=   )r   r5   extendranger   )r   r;   r   r   Zscattered_inputsZscattered_kwargsr   r   r   r   R   s   


r   outputstarget_devicec                    r#   )a  Gather tensors from different GPUs on a specified device.

    This function is useful for gathering the results of a distributed computation.
    It takes a sequence of objects, one for each GPU, and returns a single object
    on the specified device.

    Args:
        outputs (Any): A sequence of objects (potentially tensors) to gather.
        target_device (Union[int, torch.device]): The device to gather the tensors to.
            Use 'cpu' for CPU to avoid a deprecation warning.
        dim (int, optional): The dimension along which to gather. Default: 0.

    Returns:
        Any: A gathered object (potentially tensor) on the specified device.
    c                    s   d  t  tjrtjgR  S  d u rd S t  tr<t fddD s.tdt fdd D S t	 rLt 
tt S t tt S )Nr   c                 3   s     | ]}t  t |kV  qd S r   )r5   r&   d)outr   r   r<   ~   s    z-gather.<locals>.gather_map.<locals>.<genexpr>z+All dicts must have the same number of keysc                 3   s*    | ]   fd dD fV  qdS )c                    s   g | ]}|  qS r   r   rB   kr   r   r(      r-   z8gather.<locals>.gather_map.<locals>.<genexpr>.<listcomp>Nr   )r&   )
gather_mapr@   rE   r   r<      s   ( )r   r0   r1   r	   r2   r6   all
ValueErrorr%   r   _maker4   r3   )r@   r   rG   rA   )rD   r@   r   rG   w   s   
zgather.<locals>.gather_mapNr   )r@   rA   r   r:   r   rK   r   r   f   s   
r   ).)r   )collections.abcr   typingr   r   r   r   r   Ztyping_extensionsr   r0   Ztorch.nn.parallel._functionsr	   r
   __all__FutureWarningboolr   r   r6   r*   r   r   r1   intZdevicer   strr   r   r   r   r   r   <module>   sb   
#
"
*