o
    ZŽh¶n  ã                   @   s€  d Z ddlZddlmZ ddlmZ ddlZddlZddl	m
Z
 ddlm
  mZ ddlmZ ddlmZmZ dd	lmZ eG d
d„ deƒƒZeG dd„ deƒƒZeG dd„ deƒƒZG dd„ de
jƒZG dd„ de
jƒZG dd„ de
jƒZG dd„ de
jƒZG dd„ de
jƒZG dd„ de
jƒZG dd„ de
jƒZG dd„ de
jƒZ eG d d!„ d!eƒƒZ!ed"d#G d$d%„ d%e!ƒƒZ"d%d!gZ#dS )&zTransformers DAC model.é    N)Ú	dataclass)ÚOptionalé   )ÚPreTrainedModel)ÚModelOutputÚauto_docstringé   )Ú	DacConfigc                   @   sl   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eej ed< dZeej ed< dZeej ed< dS )Ú	DacOutputa.  
    Args:
        loss (`torch.Tensor`):
            Loss from the encoder model, comprising the weighted combination of the commitment and codebook losses.
        audio_values (`torch.Tensor` of shape `(batch_size, input_length)`):
            Reconstructed audio data.
        quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`):
            Quantized continuous representation of input.
        audio_codes (`torch.LongTensor` of shape `(batch_size, num_codebooks, time_steps)`):
            Codebook indices for each codebook (quantized discrete representation of input).
        projected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`):
            Projected latents (continuous representation of input before quantization).
    NÚlossÚaudio_valuesÚquantized_representationÚaudio_codesÚprojected_latents)Ú__name__Ú
__module__Ú__qualname__Ú__doc__r   r   ÚtorchÚFloatTensorÚ__annotations__r   r   r   Z
LongTensorr   © r   r   úS/var/www/auris/lib/python3.10/site-packages/transformers/models/dac/modeling_dac.pyr
      s   
 r
   c                   @   sZ   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eej ed< dZeej ed< dS )ÚDacEncoderOutputaÛ  
    Args:
        loss (`torch.Tensor`):
            Loss from the encoder model, comprising the weighted combination of the commitment and codebook losses.
        quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`, *optional*):
            Quantized continuous representation of input.
        audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`, *optional*):
            Codebook indices for each codebook (quantized discrete representation of input).
        projected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`, *optional*):
            Projected latents (continuous representation of input before quantization).
    Nr   r   r   r   )r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   6   s   
 r   c                   @   s$   e Zd ZU dZdZeej ed< dS )ÚDacDecoderOutputz¸
    Args:
        audio_values (`torch.FloatTensor`  of shape `(batch_size, input_length)`, *optional*):
            Decoded audio values, obtained using the decoder part of Dac.
    Nr   )	r   r   r   r   r   r   r   r   r   r   r   r   r   r   J   s   
 r   c                       s(   e Zd ZdZ‡ fdd„Zdd„ Z‡  ZS )ÚSnake1dz;
    A 1-dimensional Snake activation function module.
    c                    s$   t ƒ  ¡  t t d|d¡¡| _d S )Nr   )ÚsuperÚ__init__ÚnnÚ	Parameterr   ÚonesÚalpha)ÚselfÚ
hidden_dim©Ú	__class__r   r   r   [   s   
zSnake1d.__init__c                 C   sR   |j }| |d |d d¡}|| jd  ¡ t | j| ¡ d¡  }| |¡}|S )Nr   r   éÿÿÿÿg•Ö&è.>é   )ÚshapeÚreshaper!   Z
reciprocalr   ÚsinÚpow)r"   Úhidden_statesr(   r   r   r   Úforward_   s
   (
zSnake1d.forward)r   r   r   r   r   r-   Ú__classcell__r   r   r$   r   r   V   s    r   c                       s6   e Zd ZdZdef‡ fdd„Zdd„ Zdd„ Z‡  ZS )	ÚDacVectorQuantizeaÕ  
    Implementation of VQ similar to Karpathy's repo (https://github.com/karpathy/deep-vector-quantization)

    Additionally uses following tricks from improved VQGAN
    (https://arxiv.org/pdf/2110.04627.pdf):
        1. Factorized codes: Perform nearest neighbor lookup in low-dimensional space
            for improved codebook usage
        2. l2-normalized codes: Converts euclidean distance to cosine similarity which
            improves training stability
    Úconfigc                    sL   t ƒ  ¡  tj|j|jdd| _tj|j|jdd| _t |j	|j¡| _
d S )Nr   ©Úkernel_size)r   r   r   ÚConv1dÚhidden_sizeÚcodebook_dimÚin_projÚout_projZ	EmbeddingÚcodebook_sizeÚcodebook©r"   r0   r$   r   r   r   s   s   
zDacVectorQuantize.__init__c                 C   sh   |   |¡}|  |¡\}}tj|| ¡ dd}tj|| ¡ dd}|||  ¡  }|  |¡}|||||fS )aJ  
        Quantizes the input tensor using a fixed codebook and returns the corresponding codebook vectors.

        Args:
            hidden_state (`torch.FloatTensor` of shape `(batch_size, dimension, time_steps)`):
                Input tensor.

        Returns:
            quantized_representation (`torch.Tensor`of shape `(batch_size, dimension, time_steps)`):
                Quantized continuous representation of input.
            commitment_loss (`torch.FloatTensor`of shape `(1)`):
                Commitment loss to train encoder to predict vectors closer to codebook entries.
            codebook_loss (`torch.FloatTensor`of shape `(1)`):
                Codebook loss to update the codebook.
            audio_codes (`torch.LongTensor` of shape `(batch_size, time_steps)`):
                Codebook indices for each codebook, quantized discrete representation of input.
            projected_latents (torch.FloatTensor of shape `(batch_size, num_codebooks * dimension, time_steps)`):
                Projected latents (continuous representation of input before quantization).
        Úmean)Z	reduction)r6   Údecode_latentsÚFZmse_lossÚdetachr7   )r"   Úhidden_stater   r   r   Úcommitment_lossÚcodebook_lossr   r   r   r-   z   s   

zDacVectorQuantize.forwardc                 C   s¾   |j \}}}| ddd¡ || |¡}| jj}t |¡}t |¡}| d¡jddd}|d| | 	¡    | d¡jddd 	¡  }| 
d¡d }	|	 | d¡d¡}	|  |	¡ dd¡}
|
|	fS )Nr   r'   r   T)Zkeepdimr&   )r(   Zpermuter)   r9   Úweightr=   Ú	normalizer+   ÚsumÚtÚmaxÚsizeÚ	transpose)r"   r,   Z
batch_sizer#   Zsequence_lengthÚ	encodingsr9   Zl2_normÚdistÚindicesr   r   r   r   r<   š   s   

.z DacVectorQuantize.decode_latents)	r   r   r   r   r	   r   r-   r<   r.   r   r   r$   r   r/   g   s
     r/   c                       s4   e Zd ZdZd
dedef‡ fdd„Zdd	„ Z‡  ZS )ÚDacResidualUnitza
    A residual unit composed of Snake1d and weight-normalized Conv1d layers with dilations.
    é   r   Ú	dimensionÚdilationc                    sV   t ƒ  ¡  d| d }t|ƒ| _tj||d||d| _t|ƒ| _tj||dd| _d S )Né   r'   é   )r2   rO   Úpaddingr   r1   )	r   r   r   Úsnake1r   r3   Úconv1Úsnake2Úconv2)r"   rN   rO   Úpadr$   r   r   r   ²   s   


zDacResidualUnit.__init__c                 C   sb   |}|   |  |¡¡}|  |  |¡¡}|jd |jd  d }|dkr+|d|| …f }|| }|S )ar  
        Forward pass through the residual unit.

        Args:
            hidden_state (`torch.Tensor` of shape `(batch_size, channels, time_steps)`):
                Input tensor .

        Returns:
            output_tensor (`torch.Tensor` of shape `(batch_size, channels, time_steps)`):
                Input tensor after passing through the residual unit.
        r&   r'   r   .)rT   rS   rV   rU   r(   )r"   r?   Zoutput_tensorrR   r   r   r   r-   »   s   zDacResidualUnit.forward)rM   r   )r   r   r   r   Úintr   r-   r.   r   r   r$   r   rL   ­   s    	rL   c                       ó8   e Zd ZdZd
dededef‡ fdd„Zdd	„ Z‡  ZS )ÚDacEncoderBlockz"Encoder block used in DAC encoder.r   r0   ÚstrideÚstride_indexc              	      sˆ   t ƒ  ¡  |jd|  }t|d dd| _t|d dd| _t|d dd| _t|d ƒ| _t	j
|d |d| |t |d ¡d| _d S )Nr'   r   ©rO   r   é	   ©r2   r[   rR   )r   r   Úencoder_hidden_sizerL   Ú	res_unit1Ú	res_unit2Ú	res_unit3r   rS   r   r3   ÚmathÚceilrT   )r"   r0   r[   r\   rN   r$   r   r   r   Õ   s   
ÿzDacEncoderBlock.__init__c                 C   s2   |   |¡}|  |¡}|  |  |¡¡}|  |¡}|S ©N)ra   rb   rS   rc   rT   ©r"   r?   r   r   r   r-   á   s
   


zDacEncoderBlock.forward©r   r   ©	r   r   r   r   r	   rX   r   r-   r.   r   r   r$   r   rZ   Ò   s    rZ   c                       rY   )ÚDacDecoderBlockz"Decoder block used in DAC decoder.r   r0   r[   r\   c              	      s†   t ƒ  ¡  |jd|  }|jd|d   }t|ƒ| _tj||d| |t |d ¡d| _	t
|dd| _t
|dd| _t
|dd| _d S )Nr'   r   r_   r]   r   r^   )r   r   Údecoder_hidden_sizer   rS   r   ZConvTranspose1drd   re   Úconv_t1rL   ra   rb   rc   )r"   r0   r[   r\   Z	input_dimÚ
output_dimr$   r   r   r   í   s   

ûzDacDecoderBlock.__init__c                 C   s6   |   |¡}|  |¡}|  |¡}|  |¡}|  |¡}|S rf   )rS   rl   ra   rb   rc   rg   r   r   r   r-   ÿ   s   




zDacDecoderBlock.forwardrh   ri   r   r   r$   r   rj   ê   s    rj   c                       sZ   e Zd ZdZdef‡ fdd„Zddee fdd„Zd	e	j
fd
d„Zde	j
fdd„Z‡  ZS )ÚDacResidualVectorQuantizez„
    ResidualVectorQuantize block - Introduced in SoundStream: An end2end neural audio codec (https://arxiv.org/abs/2107.03312)
    r0   c                    sF   t ƒ  ¡  ˆ j}ˆ j}|| _t ‡ fdd„tˆ jƒD ƒ¡| _|| _d S )Nc                    s   g | ]}t ˆ ƒ‘qS r   )r/   )Ú.0Úi©r0   r   r   Ú
<listcomp>  s    z6DacResidualVectorQuantize.__init__.<locals>.<listcomp>)r   r   Ún_codebooksÚquantizer_dropoutr   Ú
ModuleListÚrangeÚ
quantizers)r"   r0   rs   rt   r$   rq   r   r     s   
 
z"DacResidualVectorQuantize.__init__NÚn_quantizersc                 C   sn  d}|}d}d}g }g }|dur|n| j }| jrNt |jd f¡| j  d }t d| j d |jd f¡}	t|jd | j ƒ}
|	d|
… |d|
…< | |j	¡}t
| jƒD ]N\}}| jdu rb||krb n@||ƒ\}}}}}tj|jd f||j	d|k }|||dd…ddf   }|| }||| 7 }||| 7 }| |¡ | |¡ qStj|dd}tj|dd}|||||fS )aQ  
        Quantizes the input tensor using a fixed set of codebooks and returns corresponding codebook vectors.
        Args:
            hidden_state (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`):
                Input tensor to be quantized.
            n_quantizers (`int`, *optional*):
                Number of quantizers to use. If specified and `self.quantizer_dropout` is True,
                this argument is ignored during training, and a random number of quantizers is used.

        Returns:
            quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`):
                Quantized continuous representation of input.
            audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`):
                Codebook indices for each codebook (quantized discrete representation of input).
            projected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`):
                Projected latents (continuous representation of input before quantization).
            commitment_loss (`torch.Tensor` of shape `(1)`):
                Commitment loss to train the encoder to predict vectors closer to codebook entries.
            codebook_loss (`torch.Tensor` of shape `(1)`):
                Codebook loss to update the codebook.
        r   Nr   F)Z
fill_valueÚdevice©Údim)rs   Ztrainingr   r    r(   ÚrandintrX   rt   Útory   Ú	enumeraterw   ÚfullÚappendÚstackÚcat)r"   r?   rx   r   Zresidualr@   rA   r   r   ZdropoutZ	n_dropoutrp   Ú	quantizerÚquantized_representation_iZcommitment_loss_iZcodebook_loss_iZ	indices_iÚprojected_latents_iÚmaskr   r   r   r-     s:   ÿ
z!DacResidualVectorQuantize.forwardr   c                 C   s|   d}g }|j d }t|ƒD ]&}| j|  |dd…|dd…f ¡ dd¡}| |¡ || j|  |¡7 }q|tj|dd|fS )a–  
        Reconstructs the continuous representation from quantized codes.

        Args:
            audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`):
                Quantized discrete representation of input.

        Returns:
            quantized_representation (`torch.Tensor`):
                Quantized continuous representation of input.
            projected_latents (`torch.Tensor`):
                List of projected latents (continuous representations of input before quantization)
                for each codebook.
            audio_codes (`torch.Tensor`):
                Codebook indices for each codebook.
        g        r   Nr'   rz   )	r(   rv   rw   r9   rH   r€   r7   r   r‚   )r"   r   r   r   rs   rp   r…   r   r   r   Ú
from_codesY  s   
*
z$DacResidualVectorQuantize.from_codesÚlatentsc                 C   sè   d}g }g }t  dgdd„ | jD ƒ ¡}t j|dd}t ||jd k¡d jdddd }t|ƒD ]8}|| ||d  }	}
| j|  	|dd…|	|
…dd…f ¡\}}| 
|¡ | 
|¡ | j|  |¡}|| }q2|t j|ddfS )	a  Reconstructs the quantized representation from unquantized latents.

        Args:
            latents (`torch.Tensor` of shape `(batch_size, total_latent_dimension, time_steps)`):
                Continuous representation of input after projection.

        Returns:
            quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`):
                Quantized representation of the full-projected space.
            quantized_latents (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`):
                Quantized representation of the latent space (continuous representation before quantization).
        r   c                 S   s   g | ]}|j ‘qS r   )r5   )ro   Úqr   r   r   rr   ƒ  s    z:DacResidualVectorQuantize.from_latents.<locals>.<listcomp>rz   r   T)ZaxisZkeepdimsN)r   Ztensorrw   ZcumsumÚnpÚwherer(   rF   rv   r<   r€   r7   r‚   )r"   rˆ   r   Zquantized_latentsÚcodesZcodebook_dims_tensorÚdimsrs   rp   Zhidden_dim_jZhidden_dim_kZquantized_latents_iZcodes_ir„   r   r   r   Úfrom_latentss  s   &*


z&DacResidualVectorQuantize.from_latentsrf   )r   r   r   r   r	   r   r   rX   r-   r   ÚTensorr‡   rŽ   r.   r   r   r$   r   rn   	  s    @rn   c                       ó.   e Zd ZdZdef‡ fdd„Zdd„ Z‡  ZS )Ú
DacDecoderzDAC Decoderr0   c           	         s¢   t ƒ  ¡  |j}|j}|j}tj||ddd| _g }t|ƒD ]\}}|t	|||ƒg7 }qt 
|¡| _|jd|d   }t|ƒ| _tj|dddd| _t ¡ | _d S )NrQ   r   ©r2   rR   r'   r   )r   r   r4   rk   Zupsampling_ratiosr   r3   rT   r~   rj   ru   Úblockr   rS   rV   ZTanhÚtanh)	r"   r0   Zinput_channelZchannelsÚstridesr“   r\   r[   rm   r$   r   r   r   –  s   

zDacDecoder.__init__c                 C   s@   |   |¡}| jD ]}||ƒ}q|  |¡}|  |¡}|  |¡}|S rf   )rT   r“   rS   rV   r”   )r"   r?   Úlayerr   r   r   r-   «  s   





zDacDecoder.forward©r   r   r   r   r	   r   r-   r.   r   r   r$   r   r‘   “  s    r‘   c                       r   )Ú
DacEncoderzDAC Encoderr0   c                    s    t ƒ  ¡  |j}tjd|jddd| _g | _t|ƒD ]\}}|d }|  jt	|||dg7  _qt 
| j¡| _|jd|  }t|ƒ| _tj||jddd| _d S )Nr   rQ   r   r’   )r[   r\   r'   )r   r   Zdownsampling_ratiosr   r3   r`   rT   r“   r~   rZ   ru   r   rS   r4   rV   )r"   r0   r•   r\   r[   Zd_modelr$   r   r   r   »  s   

zDacEncoder.__init__c                 C   s6   |   |¡}| jD ]}||ƒ}q|  |¡}|  |¡}|S rf   )rT   r“   rS   rV   )r"   r?   Úmoduler   r   r   r-   Í  s   




zDacEncoder.forwardr—   r   r   r$   r   r˜   ¸  s    r˜   c                   @   s0   e Zd ZeZdZdZdd„ Zdd„ Zdd„ Z	d	S )
ÚDacPreTrainedModelZdacÚinput_valuesc                 C   s6   t |tjƒrtjj|jdd tj |jd¡ d S d S )Ng{®Gáz”?)Zstdr   )Ú
isinstancer   r3   ÚinitZtrunc_normal_rB   Z	constant_Zbias)r"   r™   r   r   r   Ú_init_weightsß  s   þz DacPreTrainedModel._init_weightsc                 C   s6  t jj}tt jjdƒrt jjj}| jjD ]}||jƒ ||jƒ q|| j	j
ƒ || j	jƒ | j	jD ]+}||j
ƒ ||jj
ƒ ||jjƒ ||jj
ƒ ||jjƒ ||jj
ƒ ||jjƒ q1|| jj
ƒ || jjƒ | jjD ]+}||jƒ ||jj
ƒ ||jjƒ ||jj
ƒ ||jjƒ ||jj
ƒ ||jjƒ qmd S )NÚweight_norm)r   ÚutilsrŸ   ÚhasattrZparametrizationsrƒ   rw   r6   r7   ÚencoderrT   rV   r“   ra   rb   rc   Údecoderrl   )r"   rŸ   r–   r   r   r   Úapply_weight_normä  s6   



ùz$DacPreTrainedModel.apply_weight_normc                 C   sf  | j jD ]}tj |j¡ tj |j¡ qtj | jj¡ tj | jj	¡ | jj
D ]9}tj |j¡ tj |jj¡ tj |jj	¡ tj |jj¡ tj |jj	¡ tj |jj¡ tj |jj	¡ q)tj | jj¡ tj | jj	¡ | jj
D ]9}tj |j¡ tj |jj¡ tj |jj	¡ tj |jj¡ tj |jj	¡ tj |jj¡ tj |jj	¡ qwd S rf   )rƒ   rw   r   r    Úremove_weight_normr6   r7   r¢   rT   rV   r“   ra   rb   rc   r£   rl   )r"   r–   r   r   r   r¥     s0   ùz%DacPreTrainedModel.remove_weight_normN)
r   r   r   r	   Zconfig_classZbase_model_prefixZmain_input_namerž   r¤   r¥   r   r   r   r   rš   Ù  s    !rš   z/
    The DAC (Descript Audio Codec) model.
    )Zcustom_introc                	       s¨   e Zd Zdef‡ fdd„Ze		ddejdee	 dee
 fdd	„ƒZe			dd
eej deej dee
 fdd„ƒZe		ddejdee	 dee
 fdd„ƒZ‡  ZS )ÚDacModelr0   c                    sj   t ƒ  |¡ || _t|ƒ| _t|ƒ| _t|ƒ| _t	t
 | jj¡ƒ| _d| j | jjkr/tdƒ‚|  ¡  d S )Nr'   z'The codebook_size must be a power of 2.)r   r   r0   r˜   r¢   r‘   r£   rn   rƒ   rX   rd   Úlog2r8   Zbits_per_codebookÚ
ValueErrorZ	post_initr:   r$   r   r   r   )  s   


zDacModel.__init__Nr›   rx   Úreturn_dictc           
      C   sj   |dur|n| j j}|  |¡}|  ||¡\}}}}}| j j| | j j|  }	|s.|	|||fS t|	|||ƒS )a  
        input_values (`torch.Tensor of shape `(batch_size, 1, time_steps)`):
            Input audio data to encode,
        n_quantizers (int, *optional*):
            Number of quantizers to use. If None, all quantizers are used. Default is None.
        N)r0   r©   r¢   rƒ   Zcommitment_loss_weightZcodebook_loss_weightr   )
r"   r›   rx   r©   r   r   r   r@   rA   r   r   r   r   Úencode9  s   
ÿzDacModel.encoder   r   c                 C   sf   |du r|du rt dƒ‚|dur|n| jj}|dur"| j |¡d }|  |¡ d¡}|s/|fS t|ƒS )a%  
        quantized_representation (torch.Tensor of shape `(batch_size, dimension, time_steps)`, *optional*):
            Quantized continuous representation of input.
        audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`, *optional*):
            The codebook indices for each codebook, representing the quantized discrete
            representation of the input. This parameter should be provided if you want
            to decode directly from the audio codes (it will overwrite quantized_representation).
        NzDEither `quantized_representation` or `audio_codes` must be provided.r   r   )r¨   r0   r©   rƒ   r‡   r£   Zsqueezer   )r"   r   r   r©   r   r   r   r   ÚdecodeT  s   zDacModel.decodec           
      C   sv   |dur|n| j j}|jd }| j||dd\}}}}| j|ddd dd|…f }	|s3||	|||fS t||	|||ƒS )a‹  
        input_values (`torch.Tensor` of shape `(batch_size, 1, time_steps)`):
            Audio data to encode.
        n_quantizers (`int`, *optional*):
            Number of quantizers to use. If `None`, all quantizers are used. Default is `None`.

        Examples:

        ```python
        >>> from datasets import load_dataset, Audio
        >>> from transformers import DacModel, AutoProcessor
        >>> librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")

        >>> model = DacModel.from_pretrained("descript/dac_16khz")
        >>> processor = AutoProcessor.from_pretrained("descript/dac_16khz")
        >>> librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate))
        >>> audio_sample = librispeech_dummy[-1]["audio"]["array"]
        >>> inputs = processor(raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt")

        >>> encoder_outputs = model.encode(inputs["input_values"])
        >>> # Get the intermediate audio codes
        >>> audio_codes = encoder_outputs.audio_codes
        >>> # Reconstruct the audio from its quantized representation
        >>> audio_values = model.decode(encoder_outputs.quantized_representation)
        >>> # or the equivalent with a forward pass
        >>> audio_values = model(inputs["input_values"]).audio_values
        ```Nr&   F)r©   r   .)r0   r©   r(   rª   r«   r
   )
r"   r›   rx   r©   Úlengthr   r   r   r   r   r   r   r   r-   s  s   #
ÿzDacModel.forward)NN)NNN)r   r   r   r	   r   r   r   r   r   rX   Úboolrª   r«   r-   r.   r   r   r$   r   r¦   #  sB    üþýüüþýüüþýür¦   )$r   rd   Údataclassesr   Útypingr   ÚnumpyrŠ   r   Ztorch.nnr   Ztorch.nn.functionalZ
functionalr=   Zmodeling_utilsr   r    r   r   Zconfiguration_dacr	   r
   r   r   ÚModuler   r/   rL   rZ   rj   rn   r‘   r˜   rš   r¦   Ú__all__r   r   r   r   Ú<module>   sB   
F% %!Iÿ{