o
    Zh7                     @   s   d dl Z d dlmZ d dlmZmZmZmZmZ d dl	Z
ddlmZmZmZ ddlmZ ddlmZ eeZdZd	d
iZg dZeddG dd deZdgZdS )    N)copyfile)AnyDictListOptionalTuple   )
AddedTokenBatchEncodingPreTrainedTokenizer)logging)requiresu   ▁
vocab_filezsentencepiece.bpe.model)Zar_ARcs_CZde_DEen_XXZes_XXet_EEfi_FIZfr_XXgu_INhi_INit_ITZja_XXkk_KZko_KRlt_LTlv_LVZmy_MMne_NPZnl_XXro_ROru_RUsi_LKtr_TRvi_VNzh_CN)sentencepiece)backendsc                       s  e Zd ZU dZeZddgZg Zee	 e
d< g Zee	 e
d< 								
					dGdeeeef  f fddZdd Zdd Zedd ZedefddZejdeddfddZ	dHdee	 deee	  dedee	 f fddZ	dIdee	 deee	  dee	 fd d!Z	dIdee	 deee	  dee	 fd"d#Zd$ed%ee d&ee fd'd(Zd)d* Zd+edee fd,d-Zd.d/ Zd0d1 Zd2d3 Z dId4ed5ee de!e fd6d7Z"	8		9dJd:ee d%ed;eee  d&ede#f
 fd<d=Z$d>d? Z%d@dA Z&dKdBdCZ'dDeddfdEdFZ(  Z)S )LMBartTokenizeruT  
    Construct an MBART tokenizer.

    Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
    [SentencePiece](https://github.com/google/sentencepiece).

    The tokenization method is `<tokens> <eos> <language code>` for source language documents, and `<language code>
    <tokens> <eos>` for target language documents.

    Examples:

    ```python
    >>> from transformers import MBartTokenizer

    >>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro", src_lang="en_XX", tgt_lang="ro_RO")
    >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
    >>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria"
    >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_romanian, return_tensors="pt")
    ```Z	input_idsZattention_maskprefix_tokenssuffix_tokens<s></s><unk><pad><mask>Nsp_model_kwargsc                    s~  t |trt|dddn|}|d u ri n|_tjdi j_jt| |_ddddd_	d_
tj_fd	d
ttD _dd
 j D _tjtj j
 j	d< j	j dd
 j	 D _tj  |d ur  fdd|D  t jd|||||||d |
| jd| |
d ur|
nd_jj _|_j d S )NTF)lstrip
normalizedr         r   )r'   r*   r(   r)   c                    s"   i | ]\}}| j |  j qS  )sp_model_sizefairseq_offset).0icodeselfr1   [/var/www/auris/lib/python3.10/site-packages/transformers/models/mbart/tokenization_mbart.py
<dictcomp>j   s    z+MBartTokenizer.__init__.<locals>.<dictcomp>c                 S      i | ]\}}||qS r1   r1   r4   kvr1   r1   r9   r:   m       r+   c                 S   r;   r1   r1   r<   r1   r1   r9   r:   q   r?   c                    s   g | ]}| vr|qS r1   r1   )r4   t)_additional_special_tokensr1   r9   
<listcomp>w       z+MBartTokenizer.__init__.<locals>.<listcomp>)	bos_token	eos_token	unk_token	sep_token	cls_token	pad_token
mask_tokentokenizer_filesrc_langtgt_langadditional_special_tokensr,   r   r1   )
isinstancestrr	   r,   spmSentencePieceProcessorsp_modelLoadr   fairseq_tokens_to_idsr3   lenr2   	enumerateFAIRSEQ_LANGUAGE_CODESlang_code_to_iditemsZid_to_lang_codeupdatefairseq_ids_to_tokenslistkeysextendsuper__init__	_src_langZcur_lang_code_idrM   set_src_lang_special_tokens)r8   r   rD   rE   rG   rH   rF   rI   rJ   rK   rL   rM   r,   rN   kwargs	__class__)rA   r8   r9   ra   A   sR   	
 zMBartTokenizer.__init__c                 C   s$   | j  }d |d< | j |d< |S )NrS   sp_model_proto)__dict__copyrS   serialized_model_proto)r8   stater1   r1   r9   __getstate__   s   
zMBartTokenizer.__getstate__c                 C   s<   || _ t| dsi | _tjdi | j| _| j| j d S )Nr,   r1   )rh   hasattrr,   rQ   rR   rS   ZLoadFromSerializedProtorg   )r8   dr1   r1   r9   __setstate__   s
   
zMBartTokenizer.__setstate__c                 C   s   t | jt | j | j d S )Nr/   )rV   rS   rY   r3   r7   r1   r1   r9   
vocab_size   s   zMBartTokenizer.vocab_sizereturnc                 C   s   | j S N)rb   r7   r1   r1   r9   rL      s   zMBartTokenizer.src_langnew_src_langc                 C   s   || _ | | j  d S rr   )rb   rc   )r8   rs   r1   r1   r9   rL      s   Ftoken_ids_0token_ids_1already_has_special_tokensc                    sx   |rt  j||ddS dgt| j }dgt| j }|du r*|dgt|  | S |dgt|  dgt|  | S )a  
        Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
        special tokens using the tokenizer `prepare_for_model` method.

        Args:
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.
            already_has_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not the token list is already formatted with special tokens for the model.

        Returns:
            `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
        T)rt   ru   rv   r/   Nr   )r`   get_special_tokens_maskrV   r%   r&   )r8   rt   ru   rv   Zprefix_onesZsuffix_onesre   r1   r9   rw      s   $z&MBartTokenizer.get_special_tokens_maskc                 C   s,   |du r| j | | j S | j | | | j S )ab  
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
        adding special tokens. An MBART sequence has the following format, where `X` represents the sequence:

        - `input_ids` (for encoder) `X [eos, src_lang_code]`
        - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`

        BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
        separator.

        Args:
            token_ids_0 (`List[int]`):
                List of IDs to which the special tokens will be added.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
        N)r%   r&   )r8   rt   ru   r1   r1   r9    build_inputs_with_special_tokens   s   z/MBartTokenizer.build_inputs_with_special_tokensc                 C   sP   | j g}| jg}|du rt|| | dg S t|| | | | | dg S )a  
        Create a mask from the two sequences passed to be used in a sequence-pair classification task. mBART does not
        make use of token type ids, therefore a list of zeros is returned.

        Args:
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of zeros.

        Nr   )Zsep_token_idZcls_token_idrV   )r8   rt   ru   sepclsr1   r1   r9   $create_token_type_ids_from_sequences   s
   "z3MBartTokenizer.create_token_type_ids_from_sequencesreturn_tensorsrL   rM   c                 K   sJ   |du s|du rt d|| _| |fd|d|}| |}||d< |S )zIUsed by translation pipeline, to prepare inputs for the generate functionNzATranslation requires a `src_lang` and a `tgt_lang` for this modelT)Zadd_special_tokensr|   Zforced_bos_token_id)
ValueErrorrL   Zconvert_tokens_to_ids)r8   Z
raw_inputsr|   rL   rM   extra_kwargsZinputsZtgt_lang_idr1   r1   r9   _build_translation_inputs   s   
z(MBartTokenizer._build_translation_inputsc                    s(    fddt  jD }| j |S )Nc                    s   i | ]}  ||qS r1   )Zconvert_ids_to_tokens)r4   r5   r7   r1   r9   r:     rC   z,MBartTokenizer.get_vocab.<locals>.<dictcomp>)rangerp   r[   Zadded_tokens_encoder)r8   Zvocabr1   r7   r9   	get_vocab
  s   zMBartTokenizer.get_vocabtextc                 C   s   | j j|tdS )N)Zout_type)rS   encoderP   )r8   r   r1   r1   r9   	_tokenize  s   zMBartTokenizer._tokenizec                 C   s4   || j v r
| j | S | j|}|r|| j S | jS )z0Converts a token (str) in an id using the vocab.)rU   rS   Z	PieceToIdr3   Zunk_token_id)r8   tokenZspm_idr1   r1   r9   _convert_token_to_id  s   

z#MBartTokenizer._convert_token_to_idc                 C   s&   || j v r
| j | S | j|| j S )z=Converts an index (integer) in a token (str) using the vocab.)r\   rS   Z	IdToPiecer3   )r8   indexr1   r1   r9   _convert_id_to_token  s   

z#MBartTokenizer._convert_id_to_tokenc                 C   s   d |td }|S )zIConverts a sequence of tokens (strings for sub-words) in a single string.  )joinreplaceSPIECE_UNDERLINEstrip)r8   tokensZ
out_stringr1   r1   r9   convert_tokens_to_string!  s   z'MBartTokenizer.convert_tokens_to_stringsave_directoryfilename_prefixc                 C   s   t j|std| d d S t j||r|d ndtd  }t j| jt j|kr?t j	| jr?t
| j| |fS t j	| jsgt|d}| j }|| W d    |fS 1 sbw   Y  |fS )NzVocabulary path (z) should be a directory-r   r   wb)ospathisdirloggererrorr   VOCAB_FILES_NAMESabspathr   isfiler   openrS   rj   write)r8   r   r   Zout_vocab_filefiZcontent_spiece_modelr1   r1   r9   save_vocabulary&  s"   (

zMBartTokenizer.save_vocabularyr   r   	src_texts	tgt_textsc                    s"   || _ || _t j||fi |S rr   )rL   rM   r`   prepare_seq2seq_batch)r8   r   rL   r   rM   rd   re   r1   r9   r   7  s   z$MBartTokenizer.prepare_seq2seq_batchc                 C      |  | jS rr   )rc   rL   r7   r1   r1   r9   _switch_to_input_modeC     z$MBartTokenizer._switch_to_input_modec                 C   r   rr   )set_tgt_lang_special_tokensrM   r7   r1   r1   r9   _switch_to_target_modeF  r   z%MBartTokenizer._switch_to_target_modec                 C   $   | j | | _g | _| j| jg| _dS )z_Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].NrY   Zcur_lang_coder%   Zeos_token_idr&   )r8   rL   r1   r1   r9   rc   I     z*MBartTokenizer.set_src_lang_special_tokenslangc                 C   r   )zcReset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].Nr   )r8   r   r1   r1   r9   r   O  r   z*MBartTokenizer.set_tgt_lang_special_tokens)r'   r(   r(   r'   r)   r*   r+   NNNNN)NFrr   )r   Nr   )rq   N)*__name__
__module____qualname____doc__r   Zvocab_files_namesZmodel_input_namesr%   r   int__annotations__r&   r   r   rP   r   ra   rl   ro   propertyrp   rL   setterboolrw   rx   r{   r   r   r   r   r   r   r   r   r
   r   r   r   rc   r   __classcell__r1   r1   re   r9   r$   %   s   
 N







	 

r$   )r   shutilr   typingr   r   r   r   r   r"   rQ   Ztokenization_utilsr	   r
   r   utilsr   Zutils.import_utilsr   Z
get_loggerr   r   r   r   rX   r$   __all__r1   r1   r1   r9   <module>   s    
  
2