o
    Zhq:                     @   s   d Z ddlZddlmZmZ ddlmZ ddlmZ e	e
ZG dd deZG d	d
 d
eZG dd deZG dd deZdS )z#BARK model generation configuration    N)DictOptional   )GenerationConfig)loggingc                       sB   e Zd ZdZ															
		d fdd	Z  ZS )BarkSemanticGenerationConfigZsemantic'  T   F      ?@'  ; ?    33333H@Nc                    sZ   t  jd||	|||||||d	| |
| _|| _|| _|| _|| _|| _|| _|| _	dS )a  Class that holds a generation configuration for [`BarkSemanticModel`].

        This configuration inherit from [`GenerationConfig`] and can be used to control the model generation. Read the
        documentation from [`GenerationConfig`] for more information.

        Args:
            eos_token_id (`int`, *optional*, defaults to 10_000):
                The id of the *end-of-sequence* token.
            renormalize_logits (`bool`, *optional*, defaults to `True`):
                Whether to renormalize the logits after applying all the logits processors (including the
                custom ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the
                score logits are normalized but some logit processors break the normalization.
            max_new_tokens (`int`, *optional*, defaults to 768):
                The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.
            output_scores (`bool`, *optional*, defaults to `False`):
                Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
            return_dict_in_generate (`bool`, *optional*, defaults to `False`):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
            output_hidden_states (`bool`, *optional*, defaults to `False`):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more details.
            output_attentions (`bool`, *optional*, defaults to `False`):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more details.
            temperature (`float`, *optional*, defaults to 1.0):
                The value used to modulate the next token probabilities.
            do_sample (`bool`, *optional*, defaults to `False`):
                Whether or not to use sampling ; use greedy decoding otherwise.
            text_encoding_offset (`int`, *optional*, defaults to 10_048):
                Text encoding offset.
            text_pad_token (`int`, *optional*, defaults to 129_595):
                Text pad token.
            semantic_infer_token (`int`, *optional*, defaults to 129_599):
                Semantic infer token.
            semantic_vocab_size (`int`, *optional*, defaults to 10_000):
                Semantic vocab size.
            max_input_semantic_length (`int`, *optional*, defaults to 256):
                Max length of semantic input vector.
            semantic_rate_hz (`float`, *optional*, defaults to 49.9):
                Semantic rate in Hertz.
            min_eos_p (`float`, *optional*):
                Minimum threshold of the probability of the EOS token for it to be sampled. This is an early stopping
                strategy to mitigate potential unwanted generations at the end of a prompt. The original implementation
                suggests a default value of 0.2.
        )	temperature	do_sampleeos_token_idrenormalize_logitsmax_new_tokensoutput_scoresreturn_dict_in_generateoutput_hidden_statesoutput_attentionsN )
super__init__text_encoding_offsettext_pad_tokenZsemantic_pad_tokensemantic_infer_tokensemantic_vocab_sizemax_input_semantic_lengthsemantic_rate_hz	min_eos_p)selfr   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   kwargs	__class__r   e/var/www/auris/lib/python3.10/site-packages/transformers/models/bark/generation_configuration_bark.pyr      s*   A

z%BarkSemanticGenerationConfig.__init__)r   Tr	   FFFFr
   Fr   r   r   r   r   r   N)__name__
__module____qualname__
model_typer   __classcell__r   r   r%   r'   r      s&    r   c                       sH   e Zd ZdZ														
	ddedef fddZ  ZS )BarkCoarseGenerationConfigZcoarse_acousticsTFr
   /  K      /  r   v  <   max_coarse_historysliding_window_lenc              
      sP   t  jd|||||||d| || _|	| _|
| _|| _|| _|| _|| _dS )as
  Class that holds a generation configuration for [`BarkCoarseModel`].

        This configuration inherit from [`GenerationConfig`] and can be used to control the model generation. Read the
        documentation from [`GenerationConfig`] for more information.

        Args:
            renormalize_logits (`bool`, *optional*, defaults to `True`):
                Whether to renormalize the logits after applying all the logits processors (including the
                custom ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the
                score logits are normalized but some logit processors break the normalization.
            output_scores (`bool`, *optional*, defaults to `False`):
                Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
            return_dict_in_generate (`bool`, *optional*, defaults to `False`):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
            output_hidden_states (`bool`, *optional*, defaults to `False`):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more details.
            output_attentions (`bool`, *optional*, defaults to `False`):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more details.
            temperature (`float`, *optional*, defaults to 1.0):
                The value used to modulate the next token probabilities.
            do_sample (`bool`, *optional*, defaults to `False`):
                Whether or not to use sampling ; use greedy decoding otherwise.
            coarse_semantic_pad_token (`int`, *optional*, defaults to 12_048):
                Coarse semantic pad token.
            coarse_rate_hz (`int`, *optional*, defaults to 75):
                Coarse rate in Hertz.
            n_coarse_codebooks (`int`, *optional*, defaults to 2):
                Number of coarse codebooks.
            coarse_infer_token (`int`, *optional*, defaults to 12_050):
                Coarse infer token.
            max_coarse_input_length (`int`, *optional*, defaults to 256):
                Max length of input coarse vector.
            max_coarse_history (`int`, *optional*, defaults to 630):
                Max length of the output of the coarse acoustics model used in the fine generation step.
            sliding_window_len (`int`, *optional*, defaults to 60):
                The coarse generation step uses a sliding window to generate raw audio.
        )r   r   r   r   r   r   r   Nr   )	r   r   coarse_semantic_pad_tokencoarse_rate_hzn_coarse_codebookscoarse_infer_tokenmax_coarse_input_lengthr4   r5   )r#   r   r   r   r   r   r   r   r6   r7   r8   r9   r:   r4   r5   r$   r%   r   r'   r   y   s$   9
z#BarkCoarseGenerationConfig.__init__)TFFFFr
   Fr.   r/   r0   r1   r   r2   r3   )r(   r)   r*   r+   intr   r,   r   r   r%   r'   r-   v   s*    r-   c                       s2   e Zd ZdZ				d
 fdd	Zdd	 Z  ZS )BarkFineGenerationConfigZfine_acousticsr
            c                    s$   t  j|d || _|| _|| _dS )a  Class that holds a generation configuration for [`BarkFineModel`].

        [`BarkFineModel`] is an autoencoder model, so should not usually be used for generation. However, under the
        hood, it uses `temperature` when used by [`BarkModel`]

        This configuration inherit from [`GenerationConfig`] and can be used to control the model generation. Read the
        documentation from [`GenerationConfig`] for more information.

        Args:
            temperature (`float`, *optional*):
                The value used to modulate the next token probabilities.
            max_fine_history_length (`int`, *optional*, defaults to 512):
                Max length of the fine history vector.
            max_fine_input_length (`int`, *optional*, defaults to 1024):
                Max length of fine input vector.
            n_fine_codebooks (`int`, *optional*, defaults to 8):
                Number of codebooks used.
        )r   N)r   r   max_fine_history_lengthmax_fine_input_lengthn_fine_codebooks)r#   r   r@   rA   rB   r$   r%   r   r'   r      s   
z!BarkFineGenerationConfig.__init__c                 K   s   dS )z
        Overrides GenerationConfig.validate because BarkFineGenerationConfig don't use any parameters outside
        temperature.
        Nr   )r#   r$   r   r   r'   validate   s   z!BarkFineGenerationConfig.validate)r
   r=   r>   r?   )r(   r)   r*   r+   r   rC   r,   r   r   r%   r'   r<      s     r<   c                   @   s`   e Zd ZdZ					ddee dee dee fdd	Zedede	de
fd
dZdd ZdS )BarkGenerationConfigZbarkN]  r>   semantic_configcoarse_acoustics_configfine_acoustics_configc                 K   s   |du ri }t d |du ri }t d |du r!i }t d tdi || _tdi || _tdi || _|| _|| _	dS )a$  Class that holds a generation configuration for [`BarkModel`].

        The [`BarkModel`] does not have a `generate` method, but uses this class to generate speeches with a nested
        [`BarkGenerationConfig`] which uses [`BarkSemanticGenerationConfig`], [`BarkCoarseGenerationConfig`],
        [`BarkFineGenerationConfig`].

        This configuration inherit from [`GenerationConfig`] and can be used to control the model generation. Read the
        documentation from [`GenerationConfig`] for more information.

        Args:
            semantic_config (`Dict`, *optional*):
                Semantic generation configuration.
            coarse_acoustics_config (`Dict`, *optional*):
                Coarse generation configuration.
            fine_acoustics_config (`Dict`, *optional*):
                Fine generation configuration.
            sample_rate (`int`, *optional*, defaults to 24_000):
                Sample rate.
            codebook_size (`int`, *optional*, defaults to 1024):
                Vector length for each codebook.
        NzMsemantic_config is None. initializing the semantic model with default values.zScoarse_acoustics_config is None. initializing the coarse model with default values.zOfine_acoustics_config is None. initializing the fine model with default values.r   )
loggerinfor   rF   r-   rG   r<   rH   sample_ratecodebook_size)r#   rF   rG   rH   rK   rL   r$   r   r   r'   r      s   



zBarkGenerationConfig.__init__c                 K   s"   | d|  |  |  d|S )z
        Instantiate a [`BarkGenerationConfig`] (or a derived class) from bark sub-models generation configuration.

        Returns:
            [`BarkGenerationConfig`]: An instance of a configuration object
        )rF   rG   rH   Nr   )to_dict)clsrF   rG   rH   r$   r   r   r'   from_sub_model_configs'  s   z+BarkGenerationConfig.from_sub_model_configsc                 C   sF   t | j}| j |d< | j |d< | j |d< | jj|d< |S )z
        Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].

        Returns:
            `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
        rF   rG   rH   r+   )	copydeepcopy__dict__rF   rM   rG   rH   r&   r+   )r#   outputr   r   r'   rM   <  s   zBarkGenerationConfig.to_dict)NNNrE   r>   )r(   r)   r*   r+   r   r   r   classmethodr   r-   r<   rO   rM   r   r   r   r'   rD      s.    
1rD   )__doc__rP   typingr   r   Zgeneration.configuration_utilsr   utilsr   Z
get_loggerr(   rI   r   r-   r<   rD   r   r   r   r'   <module>   s   
[P+