o
    Zh/                     @   s  d Z ddlZddlZddlZddlmZ ddlmZmZm	Z	m
Z
 ddlZddlZddlmZmZ ddlmZ dd	lmZmZ d
ddddZddiZdd ZeeZdd Zi dddddddddddd d!d"d#d$d%d&d'd(d)d*d+d,d-d.d/d0d1d2d3d4d5d6i d7d8d9d:d;d<d=d>d?d@dAdBdCdDdEdFdGdHdIdJdKdLdMdNdOdPdQdRdSdTdUdVdWdXi dYdZd[d\d]d^d_d`dadbdcdddedfdgdhdidjdkdldmdndodpdqdrdsdtdudvdwdxdydzi d{d|d}d~ddddddddddddddddddddddddddddddi ddddddddddddddddddddddddddddddddddddddddddddddddddΜZi ddЄ e D dd+d-dddddGdGddddӜZ ddgZ!G ddׄ deZ"ddل Z#dddۄZ$dd݄ Z%			ddee& dee' de'de'fddZ(dee& fddZ)dee& fddZ*dd Z+dgZ,dS )z!Tokenization classes for Whisper.    N)	lru_cache)ListOptionalTupleUnion   )
AddedTokenPreTrainedTokenizer)logging   )BasicTextNormalizerEnglishTextNormalizerz
vocab.jsonztokenizer.jsonz
merges.txtznormalizer.json)
vocab_fileZtokenizer_filemerges_filenormalizer_filezopenai/whisper-basei  c                  C   s   t ttdtdd t ttdtdd  t ttdtdd  } | dd }d	}td
D ]}|| vrI| | |d
|  |d7 }q3dd |D }tt| |S )a8  
    Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
    characters the bpe code barfs on.

    The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
    if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
    decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
    tables between utf-8 bytes and unicode strings.
    !~r      ¡   ¬   ®   ÿNr      c                 S   s   g | ]}t |qS  )chr).0nr   r   _/var/www/auris/lib/python3.10/site-packages/transformers/models/whisper/tokenization_whisper.py
<listcomp>A       z$bytes_to_unicode.<locals>.<listcomp>)listrangeordappenddictzip)bscsr   br   r   r   bytes_to_unicode-   s   L
r(   c                 C   s6   t  }| d }| dd D ]}|||f |}q|S )z
    Return set of symbol pairs in a word.

    Word is represented as tuple of symbols (symbols being variable-length strings).
    r   r   N)setadd)wordpairsZ	prev_charcharr   r   r   	get_pairsI   s   r.   enenglishzhchinesedegermanesspanishrurussiankokoreanfrfrenchjajapanesept
portuguesetrturkishplpolishcacatalannldutchararabicsvswedishititalianidZ
indonesianhiZhindififinnishviZ
vietnamesehehebrewukZ	ukrainianelgreekmsZmalayr&   czechroromaniandadanishhu	hungariantaZtamilno	norwegianththaiurZurduhrcroatianbg	bulgarianlt
lithuanianZlalatinmiZmaorimlZ	malayalamcyZwelshskslovakteZtelugufaZpersianlvZlatvianZbnZbengalisrZserbianazZazerbaijanisl	slovenianknZkannadaetestonianmkZ
macedonianbrZbretoneuZbasqueis	icelandichyZarmenianneZnepaliZmnZ	mongolianr%   ZbosniankkZkazakhsqZalbanianswZswahiliglgalicianmrZmarathipaZpunjabisisinhalakmZkhmerZsnZshonaZyoZyorubasoZsomaliafZ	afrikaansocZoccitankaZgeorgianbeZ
belarusiantgZtajiksdZsindhiguZgujaratiamZamharicyiZyiddishlolaouzZuzbekfoZfaroeseZhtzhaitian creoleZpsZpashtoZtkZturkmennnnynorskmtZmalteseZsanskritZluxembourgishmyanmarZtibetanZtagalogZmalagasyZassameseZtatarZhawaiianZlingalaZhausaZbashkirZjavaneseZ	sundanese	cantonese)salbmyZbotlZmgasttZhawlnZhabaZjwZsuZyuec                 C      i | ]\}}||qS r   r   )r   codelanguager   r   r   
<dictcomp>       r   r   r   )ZburmeseZ	valencianZflemishZhaitianZletzeburgeschZpushtoZpanjabiZ	moldavianZmoldovanZ	sinhaleseZ	castilianZmandarin	translate
transcribec                       sb  e Zd ZdZeZddgZ										d] fdd		Zed
e	fddZ
dd Zdd Z	d^dee dee dee fddZed
ee	 fddZd_d
ee	 fddZ	d`dee	 deee	  ded
ee	 f fddZdd  Zd!d" Zd#d$ Zd%d& Zdad'd(Zd)d* Zedad+d,Z	.dbd
efd/d0Zdcd1d2Zeddd3d4Z dad5efd6d7Z!d8d9 Z"				-				ded5ed:ee d;ed<e#d=ed>ed?ed@ed
ef fdAdBZ$				dfdCe%e	ee	 f d5ed>ed?ed@ed
efdDdEZ&dFdG Z'd_dHedIee d
e(e fdJdKZ)dadLdMZ*dgdOdPZ+dQdR Z,dhdTefdUdVZ-dCee	 dWe	dXe	fdYdZZ.ed[d\ Z/  Z0S )iWhisperTokenizeraa	  
    Construct a Whisper tokenizer.

    This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to
    the superclass for more information regarding such methods.

    Args:
        vocab_file (`str`):
            Path to the vocabulary file.
        merges_file (`str`):
            Path to the merges file.
        normalizer_file (`str`, *optional*):
            Path to the normalizer_file file.
        errors (`str`, *optional*, defaults to `"replace"`):
            Paradigm to follow when decoding bytes to UTF-8. See
            [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
        unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
            The beginning of sequence token. The `decoder_start_token_id` is used to set the first token as
            `"<|startoftranscript|>"` when generating.
        eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
            The end of sequence token.
        pad_token (`str`, *optional*):
            The token used for padding, for example when batching sequences of different lengths.
        add_prefix_space (`bool`, *optional*, defaults to `False`):
            Whether or not to add an initial space to the input. This allows to treat the leading word just as any
            other word.
        language (`str`, *optional*):
            The language of the transcription text. The corresponding language id token is appended to the start of the
            sequence for multilingual speech recognition and speech translation tasks, e.g. for Spanish the token
            `"<|es|>"` is appended to the start of sequence. This should be used for multilingual fine-tuning only.
        task (`str`, *optional*):
            Task identifier to append at the start of sequence (if any). This should be used for mulitlingual
            fine-tuning, with `"transcribe"` for speech recognition and `"translate"` for speech translation.
        predict_timestamps (`bool`, *optional*, defaults to `False`):
            Whether to omit the `<|notimestamps|>` token at the start of the sequence.
    	input_idsZattention_maskNreplace<|endoftext|>Fc              	      s  t |trt|dddddn|}t |trt|dddddn|}t |tr.t|dddddn|}t |tr>t|dddddn|}t|dd}t|| _W d    n1 sWw   Y  dd | j D | _|| _	t
 | _dd | j D | _t|dd}| d	d
d }W d    n1 sw   Y  dd |D }tt|tt|| _i | _|	| _|d urt|dd}t|| _W d    n1 sw   Y  nd | _td| _td| _|
| _t jd||||||	d| || _|| _d S )NFT)lstriprstrip
normalizedspecialutf-8encodingc                 S   r   r   r   r   kvr   r   r   r   %  r   z-WhisperTokenizer.__init__.<locals>.<dictcomp>c                 S   r   r   r   r   r   r   r   r   (  r   
r   c                 S   s   g | ]}t | qS r   )tuplesplit)r   merger   r   r   r   +      z-WhisperTokenizer.__init__.<locals>.<listcomp>zJ's|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+z<\|(\d+\.\d+)\|>)errors	unk_token	bos_token	eos_token	pad_tokenadd_prefix_spacer   ) 
isinstancestrr   openjsonloadencoderitemsdecoderr   r(   byte_encoderbyte_decoderreadr   r#   r$   r    len	bpe_rankscacher   english_spelling_normalizerrecompilepattimestamp_patr   super__init__taskpredict_timestamps)selfr   r   r   r   r   r   r   r   r   r   r   r   kwargsZvocab_handleZmerges_handleZ
bpe_merges	__class__r   r   r      sf   

zWhisperTokenizer.__init__returnc                 C   s
   t | jS N)r   r   r   r   r   r   
vocab_sizeH  s   
zWhisperTokenizer.vocab_sizec                    s(    fddt  jD }| j |S )Nc                    s   i | ]}  ||qS r   )convert_ids_to_tokensr   ir   r   r   r   M  r   z.WhisperTokenizer.get_vocab.<locals>.<dictcomp>)r    r   updateadded_tokens_encoder)r   Zvocabr   r   r   	get_vocabL  s   zWhisperTokenizer.get_vocabc           
         sX  | j v r
 j | S t|}t|}|s|S 	 t| fddd}| jvr'ny|\}}g }d}|t|k rz|||}	W n tyO   |||d   Y n?w ||||	  |	}|| |kr}|t|d k r}||d  |kr}|	||  |d7 }n|	||  |d7 }|t|k s5t|}|}t|dkrnt|}qd
|}| j |< |S )	NTc                    s    j | tdS )Ninf)r   getfloat)pairr   r   r   <lambda>\  s    z&WhisperTokenizer.bpe.<locals>.<lambda>keyr   r       )r   r   r.   minr   r   index
ValueErrorextendr"   join)
r   tokenr+   r,   ZbigramfirstsecondZnew_wordr   jr   r   r   bpeR  sJ   


,


zWhisperTokenizer.bper   r   r   c                 C   sF   |dur|n| j | _ |dur|n| j| _|dur|| _dS | j| _dS )a  
        Override the prefix tokens appended to the start of the label sequence. This method can be used standalone to
        update the prefix tokens as required when fine-tuning. Example:

        ```python
        >>> # instantiate the tokenizer and set the prefix token to Spanish
        >>> tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny", language="spanish")
        >>> # now switch the prefix token from Spanish to French
        >>> tokenizer.set_prefix_tokens(language="french")
        ```

        Args:
            language (`str`, *optional*, defaults to `None`):
                The language of the transcription text.
            task (`str`, *optional*, defaults to `None`):
                Task identifier to append at the start of sequence (if any).
            predict_timestamps (`bool`, *optional*, defaults to `None`):
                Whether to omit the `<|notimestamps|>` token at the start of the sequence.
        N)r   r   r   )r   r   r   r   r   r   r   set_prefix_tokens|  s   z"WhisperTokenizer.set_prefix_tokensc           	      C   s<  |  d}|  d}|  d}|  d}tt }| jd urZ| j | _| jtv r0t| j }n*| jt v r;| j}nt| jdk}t	d| j d|rPt
t nt
t  d| jd uro| jtvrot	d	| j d
t |g}| jd ur||d ||  | jd ur|| jdkr|n| | js|| |S )N<|startoftranscript|>z<|translate|>z<|transcribe|><|notimestamps|>r   zUnsupported language: z. Language should be one of: .zUnsupported task: z. Task should be in: r   r   )convert_tokens_to_idsr   	LANGUAGESkeysr   lowerTO_LANGUAGE_CODEvaluesr   r   r   r   TASK_IDSr"   r   r   )	r   Zbos_token_idZtranslate_token_idZtranscribe_token_idZnotimestamps_token_idZlangsZlanguage_idZis_language_codeZbos_sequencer   r   r   prefix_tokens  s8   











zWhisperTokenizer.prefix_tokensc                 C   s0   |du r| j | | jg S | j | | | jg S )z=Build model inputs from a sequence by appending eos_token_id.N)r  eos_token_id)r   token_ids_0token_ids_1r   r   r    build_inputs_with_special_tokens  s   z1WhisperTokenizer.build_inputs_with_special_tokensr  r  already_has_special_tokensc                    sn   |rt  j||ddS dgt| j }dg}|du r%|dgt|  | S |dgt|  dgt|  | S )a  
        Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
        special tokens using the tokenizer `prepare_for_model` method.

        Args:
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.
            already_has_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not the token list is already formatted with special tokens for the model.

        Returns:
            `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
        T)r  r  r  r   Nr   )r   get_special_tokens_maskr   r  )r   r  r  r  Zprefix_onesZsuffix_onesr   r   r   r    s   $z(WhisperTokenizer.get_special_tokens_maskc                    sZ   g }t  j|D ]!}d fdd|dD }|dd  |dD  q	|S )zTokenize a string. c                 3   s    | ]} j | V  qd S r   )r   )r   r'   r   r   r   	<genexpr>  s    

z-WhisperTokenizer._tokenize.<locals>.<genexpr>r   c                 s       | ]}|V  qd S r   r   )r   Z	bpe_tokenr   r   r   r        r   )r   findallr   r   encoder   r  r   )r   text
bpe_tokensr   r   r   r   	_tokenize  s   "zWhisperTokenizer._tokenizec                 C   s   | j || j | jS )z0Converts a token (str) in an id using the vocab.)r   r   r   )r   r   r   r   r   _convert_token_to_id  s   z%WhisperTokenizer._convert_token_to_idc                 C   s   | j |dS )z
        Converts an index (integer) in a token (str) using the vocab. Whisper's base tokenizer always decodes OOV
        tokens as "", thus we do not use the `unk_token` here.
        r  )r   r   )r   r   r   r   r   _convert_id_to_token  s   z%WhisperTokenizer._convert_id_to_tokenc                 C   s   t d | |S )NzThe private method `_normalize` is deprecated and will be removed in v5 of Transformers.You can normalize an input string using the Whisper English normalizer using the `normalize` method.)warningswarn	normalize)r   r  r   r   r   
_normalize  s   
zWhisperTokenizer._normalizec                 C   s   t d | j||dS )NzThe private method `_basic_normalize` is deprecated and will be removed in v5 of Transformers.You can normalize an input string using the Whisper basic normalizer using the `basic_normalize` method.remove_diacritics)r  r   basic_normalize)r   r  r$  r   r   r   _basic_normalize  s   z!WhisperTokenizer._basic_normalizec                 C   s   t | j}||S )z
        Normalize a given string using the `EnglishTextNormalizer` class, which performs commons transformation on
        english text.
        )r   r   )r   r  
normalizerr   r   r   r!    s   
zWhisperTokenizer.normalizec                 C   s   t |d}|| S )z
        Normalize a given string using the `BasicTextNormalizer` class, which performs commons transformation on
        multilingual text.
        r#  )r   )r  r$  r'  r   r   r   r%    s   
z WhisperTokenizer.basic_normalize{Gz?  c                    s    j d d }g g}d}d}d}	t|D ][\}
}||krht|| | }||k rR|
dko<||
d  |ko;||
d  |k }|rF||| 7 }n|	}||	7 }|dd }|}	|}|d|| dd	 |g  q|d | q fd
d|D }d|S )z
        Timestamp tokens are above the special tokens' id range and are ignored by `decode()`. This method decodes
        given tokens with timestamps tokens annotated, e.g. "<|1.08|>".
        r   r           r   Nz<|z.2fz|>c                    s(   g | ]}t |tr|n j|d qS )skip_special_tokens)r   r   decode)r   sr   r-  r   r   r   :  s    z<WhisperTokenizer._decode_with_timestamps.<locals>.<listcomp>r  )all_special_ids	enumerater   r"   r   )r   	token_idsr-  time_precisionsegment_sizetimestamp_beginZoutputscur_max_timestampprev_segments_lenZpenultimate_timestampr   r   	timestampZlast_was_single_endingr   r0  r   _decode_with_timestamps  s4   
z(WhisperTokenizer._decode_with_timestampsc                 C   s  g }dt t|v rt|drt|jr| }t|}|jd dkr/t|jdkr/t	d| j
d d }||k}t|dd |dd @ d d }|jd dkr\| dkr\g S t|d d d |vrxt|t|d d d }t|d d }d}	d}
|D ]k}||| }t|dkr|d  | }|d  | }||	k r|dko||d  |ko||d  |k }|r|
|7 }
n|
|	7 }
|}	| |}| |}| |}|||| |
|  || |
|  fd	 |}q|S )
a  
        Compute offsets for a given tokenized input

        Args:
            token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
                List of tokenized input ids. Can be obtained using the `__call__` method.
            time_precision (`float`, *optional*, defaults to 0.02):
                The time ratio to convert from token to time.
            segment_size (`int`, *optional*, defaults to 1500):
                The number of features in the input mel spectrogram.
        torchcpur   r   z)Can only process a single input at a timer   Nr   r  r9  )r   typehasattrcallabler<  nparrayshaper   r   r1  wheresumr"   item_preprocess_token_ids_decode_filter_timestamp_ids)r   r3  r4  r5  offsetsr6  Ztimestamp_tokensZconsecutiveZ
last_slicer7  r8  Zcurrent_sliceZsliced_tokensZstart_timestamp_positionZend_timestamp_positionZis_single_endingr  r   r   r   _compute_offsets?  sR   $
&



	z!WhisperTokenizer._compute_offsetsc                    s   |   fddtdD S )a  
        Compute the timestamp token ids for a given precision and save to least-recently used (LRU) cache.

        Args:
            time_precision (`float`, *optional*, defaults to 0.02):
                The time ratio to convert from token to time.
        c                    s   g | ]}d |   qS )z<|%.2f|>r   r   r4  r   r   r     r   z2WhisperTokenizer.timestamp_ids.<locals>.<listcomp>i  )r  r    )r   r4  r   rL  r   timestamp_ids  s   	zWhisperTokenizer.timestamp_idsr-  c                 C   s*   |r|  d}|  d}| |||}|S )a  
        Pre-process the token ids for decoding by removing the prompt tokens ids and timestamp token ids.

        Args:
            token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
                List of tokenized input ids. Typically, obtained using the `__call__` method of the tokenizer.
            skip_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not to remove special tokens from the token ids. If `True`, the prompt token ids will be
                removed.
        <|startofprev|>r  )r  _strip_prompt)r   r3  r-  prompt_token_iddecoder_start_token_idr   r   r   rG    s
   

z&WhisperTokenizer._preprocess_token_idsc                 C   s   t | jd|S )Nr  )r   subr   )r   r3  r   r   r   rI       z&WhisperTokenizer._filter_timestamp_idsclean_up_tokenization_spacesoutput_offsetsr4  decode_with_timestampsr!  r%  r$  c
                    sn   | j ||d}t j|f|||||	d|
}|r"| j|||d}n| |}|r5| j||d}||dS |S )a	  
        Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
        tokens and clean up tokenization spaces.

        Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.

        Args:
            token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
                List of tokenized input ids. Can be obtained using the `__call__` method.
            skip_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not to remove special tokens in the decoding. Will remove the previous tokens (pre-prompt)
                if present.
            clean_up_tokenization_spaces (`bool`, *optional*):
                Whether or not to clean up the tokenization spaces. If `None`, will default to
                `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
            output_offsets (`bool`, *optional*, defaults to `False`):
                Whether or not to output the offsets of the tokens. This should only be set if the model predicted
                timestamps. If there are previous tokens (pre-prompt) to decode, they will only appear in the decoded
                text if they contain timestamp tokens.
            time_precision (`float`, *optional*, defaults to 0.02):
                The time ratio to convert from token to time.
            decode_with_timestamps (`bool`, *optional*, defaults to `False`):
                Whether or not to decode with timestamps included in the raw text.
            normalize (`bool`, *optional*, defaults to `False`):
                Whether or not to apply the English text normalizer to the decoded text. Only applicable when the
                target text is in English. Otherwise, the basic text normalizer should be applied.
            basic_normalize (`bool`, *optional*, defaults to `False`):
                Whether or not to apply the Basic text normalizer to the decoded text. Applicable to multilingual
                target text.
            remove_diacritics (`bool`, *optional*, defaults to `False`):
                Whether or not to remove diacritics when applying the Basic text normalizer. Removing diacritics may
                destroy information in the decoded text, hence it should be used with caution.
            kwargs (additional keyword arguments, *optional*):
                Will be passed to the underlying model specific decode method.
        Returns:
            `str`: The decoded sentence.
        r,  )r-  rT  r!  r%  r$  )r4  r-  rL  )r  rJ  )rG  r   r.  r:  rI  rK  )r   r3  r-  rT  rU  r4  rV  r!  r%  r$  r   Zfiltered_idsr  rJ  r   r   r   r.    s0   2	

zWhisperTokenizer.decoder3  c                 K   s   | dd| _| j||d}g }g }	|D ]&}
|r|
| jv rq|
| jv r5|	r/|| |	 g }	||
 q|	|
 q|	rE|| |	 d|}|rS| |}|S |r^| j	||d}|S |S )NZuse_source_tokenizerFr,  r  r#  )
popZ_decode_use_source_tokenizerr   r1  r   r"   convert_tokens_to_stringr   r!  r%  )r   r3  r-  r!  r%  r$  r   Zfiltered_tokensZ	sub_textsZcurrent_sub_textr   r  Z
clean_textr   r   r   rH    s.   	


zWhisperTokenizer._decodec                    s0   d |}t fdd|D jd jd}|S )z:Converts a sequence of tokens (string) in a single string.r  c                    s   g | ]} j | qS r   )r   )r   cr   r   r   r     r   z=WhisperTokenizer.convert_tokens_to_string.<locals>.<listcomp>r   )r   )r   	bytearrayr.  r   )r   tokensr  r   r   r   rX    s   
"z)WhisperTokenizer.convert_tokens_to_stringsave_directoryfilename_prefixc              	   C   s  t j|std| d d S t j||r|d ndtd  }t j||r,|d ndtd  }t j||r=|d ndtd  }t|dd	d
}|t	j
| jddddd  W d    n1 sew   Y  d}t|dd	d
:}|d t| j dd dD ]!\}	}
||
krtd| d |
}|d|	d  |d7 }qW d    n1 sw   Y  | jd urt|dd	d
}|t	j
| jddddd  W d    n1 sw   Y  |||fS )NzVocabulary path (z) should be a directory-r  r   r   r   wr   r   r   TF)indent	sort_keysensure_asciir   r   z#version: 0.2
c                 S   s   | d S )Nr   r   )kvr   r   r   r   5  s    z2WhisperTokenizer.save_vocabulary.<locals>.<lambda>r   zSaving vocabulary to zZ: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!r   r   )ospathisdirloggererrorr   VOCAB_FILES_NAMESr   writer   dumpsr   sortedr   r   warningr   )r   r\  r]  r   Z
merge_filer   fr   writerr  Ztoken_indexr   r   r   save_vocabulary!  sF    




z WhisperTokenizer.save_vocabularyc                 K   s&   | d| j}|s|rd| }||fS )Nr   r   )rW  r   )r   r  Zis_split_into_wordsr   r   r   r   r   prepare_for_tokenizationH  s   z)WhisperTokenizer.prepare_for_tokenizationTc                 C   s6   | j ||| d | jdd  }dd t|D }|S )N)r   r   r   r   c                 S   s   g | ]
\}}|d  |fqS )r   r   )r   Zrankr   r   r   r   r   U  s    z;WhisperTokenizer.get_decoder_prompt_ids.<locals>.<listcomp>)r  r  r2  )r   r   r   Zno_timestampsZforced_tokensZforced_decoder_idsr   r   r   get_decoder_prompt_idsN  s   z'WhisperTokenizer.get_decoder_prompt_idsc                C   s   t | ||||dS )N)return_timestampsreturn_languager4  )_decode_asr)r   model_outputsrs  rt  r4  r   r   r   ru  X  s   zWhisperTokenizer._decode_asrrA  r  c                    st    dd|   dd}|d dd }t fdd	|D d}|dur0 |}td
| d|j|d |d S )z`Converts prompt text to IDs that can be passed to [`~WhisperForConditionalGeneration.generate`].rN  r   F)Zadd_special_tokensr   r   Nc                 3   s"    | ]}| j d  kr|V  qdS )r   N)r1  )r   xr   r   r   r  g  s     z2WhisperTokenizer.get_prompt_ids.<locals>.<genexpr>zJEncountered text in the prompt corresponding to disallowed special token: r  )Ztensor_type)stripnextr   r   Zconvert_to_tensors)r   r  Zreturn_tensorsZbatch_encodingZprompt_text_idsZspecial_token_idr   r   r   r   get_prompt_idsa  s   
zWhisperTokenizer.get_prompt_idsrP  rQ  c                 C   sN   t |ts
| |}|s|S |d |k}|r%||v r#|||d  S g S |S )Nr   )r   r   _convert_to_listr   )r   r3  rP  rQ  Z
has_promptr   r   r   rO  o  s   

zWhisperTokenizer._strip_promptc                 C   sr   t | dr!dtt| v r|   } ndtt| v r |  } ndtt| v r-|  } t| tjr7|  } | S )Nnumpyr;  Z
tensorflowZjaxlib)	r?  r   r>  r<  r|  tolistr   rA  Zndarray)r3  r   r   r   r{    s   
z!WhisperTokenizer._convert_to_list)
Nr   r   r   r   NFNNF)NNNr   )NF)F)Fr(  r)  )r(  r)  )r(  )FNFr(  FFFF)FFFF)NNT)rA  )1__name__
__module____qualname____doc__ri  Zvocab_files_namesZmodel_input_namesr   propertyintr   r   r  r   r   boolr  r   r  r  r  r  r  r  r"  r&  r!  staticmethodr%  r:  rK  r   rM  rG  rI  r   r.  r   rH  rX  r   rp  rq  rr  ru  rz  rO  r{  __classcell__r   r   r   r   r      s    (J+
"	

	

*D
	
Q
* 
'

	r   c          )         s  d  fdd}g }| }d}|  dd }	g }
g }d}d}t| j}|  d}|  d	}t|D ]\}}|d
 d  }| |||}|dkrQ|d d  }d}|	}d|v r|d \}}}||8 }|| }|rp|| |	 }|rt|D ]}||	kr|dur||	 | |k r n|}qvg }g }t|D ]\}}||v r| |g}|dd }t	|d}|dur r| kr|s|

| t|
} | | }!|!|d< |
| g }
g }| }||d< | q	 q||	krk||	 | | }"t|"d}"|r||krd}q|s	|
r||k rd}q|d d du r|"|d d< q|"|d d kr&q|"|d d< |

| |dkr;|
| t|
|\} }#| | }!|!|d< |dkrZt| | |# ||d< |
| g }
g }g }g }| }q|
| |dkrt|| | d}$|d t|k rt||d  | d}%nd}%|
|$|%f qd|v r||| 7 }|r|

| |dkr|
| q1tdd |
D s| }g }
g }g }g }q1|
r |rtd t|
|\} }#| | }!|!|d< |dkrt| | |# ||d< |
| ddd |D }&|s|rS|D ]}|s|d nt|d |d< |s-|d q|dkrKg }'|D ]
}|'|d  q8d|'i}(|&|(fS d|i}(|&|(fS i }(|&|(fS )z
    Internal method meant to only be used by asr pipeline. Handles all the little quirks specific to whisper to handle
    the various options not allowed in other seq2seq models
    Nc                      s    d d gddS )Nr  )r   r9  r  r   r   Zlast_languager   r   	new_chunk  rS  z_decode_asr.<locals>.new_chunkr*  r  r   FrN  r  r[  r   r+   token_timestampsZstrider   r+  r  r   Tr9  wordsc                 s   r  r   r   )r   pr   r   r   r  F  r  z_decode_asr.<locals>.<genexpr>zWhisper did not predict an ending timestamp, which can happen if audio is cut off in the middle of a word. Also make sure WhisperTimeStampLogitsProcessor was used during generation.r  c                 s   s    | ]}|d  V  qdS )r  Nr   )r   chunkr   r   r   r  `  s    chunks)r  r)   r1  r2  r}  rO  reversedr.  r  r   r"   _find_longest_common_sequenceround_collate_word_timestampsr   anyrg  rm  r   rW  r   r   ))	tokenizerrv  rs  rt  r4  r  r  r  Ztime_offsetr6  Zprevious_tokensZprevious_token_timestampsskipZright_stride_startr1  rP  rQ  Zchunk_idoutputr3  r  Zlast_timestampZfirst_timestampZ	chunk_lenZstride_leftZstride_rightr   current_tokensZcurrent_token_timestampsr   r  r   Zresolved_tokensZresolved_texttimeZresolved_token_timestamps
start_timeend_time	full_textZ
new_chunksoptionalr   r  r   ru    s  


























ru  c              	      s  | d }t |}g }rd g }t| dd  D ]\}d}||ddf}t |}	td||	 D ]r}
|
d }td||
  t|||	 |
 }t| | }td|
| t|	|
}t|| t |t krqtdrt fddt|D }nt|k}||
 | }|dkr||kr|} ||f}q1|\ }}|  d }| d }|	|d |  ||d  }t |}r|	d |  d  |d  q|	| d u r|S t dkr|	 ||fS |g fS )	Nr   r   r*  g     @ziThere is a bug within whisper `decode_asr` function, please report it. Dropping to prevent bad inference.c                 3   sD    | ]\}}|| kr |  d   |  krd V  qdS )r   Nr   )r   idxelemZ
left_startZleft_token_timestamp_sequencerightZright_startZseq_idxtoken_timestamp_sequencesr   r   r    s    
z0_find_longest_common_sequence.<locals>.<genexpr>r   )
r   r2  r    maxr   rA  rB  RuntimeErrorrE  r   )	sequencesr  Zleft_sequenceZleft_lengthZtotal_sequenceZtotal_token_timestamp_sequenceZright_sequenceZmax_Zmax_indicesZright_lengthr   ZepsZ	left_stopleftZ
right_stopmatchesZmatchingZleft_midZ	right_midr   r  r   r  v  sb   



r  c           	         s@   t | ||\}}}|rd|ini   fddt||D }|S )Nr   c                    s:   g | ]\}}||d   d  |d  d fd qS )r   r   r   r=  r   )r   r+   indicesZoptional_language_fieldr  r   r   r     s    z,_collate_word_timestamps.<locals>.<listcomp>)_combine_tokens_into_wordsr$   )	r  r[  r  r   rt  r  _token_indicesZtimingsr   r  r   r    s   r     "'“¡¿([{-   "'.。,，!！?？:：”)]}、r[  r   prepend_punctuationsappend_punctuationsc                 C   s^   |du r| j }|du rd}|dv rt| |\}}}nt| |\}}}t||||| |||fS )z
    Groups tokens by word. Returns a tuple containing a list of strings with the words, and a list of `token_id`
    sequences with the tokens making up each word.
    Nr0   >   r   r>   r   re   r   r2   )r   _split_tokens_on_unicode_split_tokens_on_spaces_merge_punctuations)r  r[  r   r  r  r  word_tokensr  r   r   r   r    s   
r  c                 C   s   | j |dd}d}g }g }g }g }g }d}	t|D ]=\}
}|| ||
 | j |dd}||vs=||	||  |krV|| || || g }g }|	t|7 }	q|||fS )zlCombine tokens into words by splitting at any position where the tokens are decoded as valid unicode points.T)rV  u   �r   )r.  r2  r"   r   r   )r  r[  Zdecoded_fullreplacement_charr  r  r  r  Zcurrent_indicesZunicode_offsetZ	token_idxr   decodedr   r   r   r    s,   





r  c                 C   s   t | |\}}}g }g }g }t|||D ]I\}}	}
|	d | jk}|d}| dv }|s7|s7|s7t|dkrG|| ||	 ||
 q|d | |d< |d |	 |d |
 q|||fS )zLCombine tokens into words by splitting at whitespace and punctuation tokens.r   r   z !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~r   )r  r$   r  
startswithrx  r   r"   r   )r  r[  ZsubwordsZsubword_tokens_listZsubword_indices_listr  r  r  ZsubwordZsubword_tokensZsubword_indicesr   Z
with_spacepunctuationr   r   r   r  7  s    



r  c                 C   s  t | d }t | d }|dkrT| | drJ| |  |v rJ| | | |  | |< || ||  ||< || ||  ||< d| |< g ||< g ||< n|}|d8 }|dksd}d}|t | k r| | ds| | |v r| |  | | 7  < ||  || 7  < ||  || 7  < d| |< g ||< g ||< n|}|d7 }|t | k s^dd | D | dd< d	d |D |dd< d
d |D |dd< dS )z1Merges punctuation tokens with neighboring words.r   r   r   r   r  c                 S      g | ]}|r|qS r   r   )r   r+   r   r   r   r   p  r   z'_merge_punctuations.<locals>.<listcomp>Nc                 S   r  r   r   )r   r   r   r   r   r   q  r   c                 S   r  r   r   )r   r  r   r   r   r   r  r   )r   r  rx  endswith)r  r[  r  Z	prependedZappendedr   r   r   r   r   r  O  s:   

r  r   )Nr  r  )-r  r   rd  r  	functoolsr   typingr   r   r   r   r|  rA  regexr   Ztokenization_utilsr   r	   utilsr
   Zenglish_normalizerr   r   ri  ZMAX_MODEL_INPUT_SIZESr(   Z
get_loggerr~  rg  r.   r  r   r
  r  r   ru  r  r  r  r   r  r  r  r  __all__r   r   r   r   <module>   s  	
	
 !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVh     D 
fx

&