o
    Zha                     @   s|   d Z ddlmZmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ e	eZd	d
ddZddiZG dd deZdgZdS )zTokenization classes for Qwen2.    )OptionalTuple   )
AddedToken)PreTrainedTokenizerFast)logging   )Qwen2Tokenizerz
vocab.jsonz
merges.txtztokenizer.json)
vocab_filemerges_filetokenizer_filezqwen/qwen-tokenizeri   c                       s`   e Zd ZdZeZddgZeZ							d fdd	Z	dde
d	ee
 d
ee
 fddZ  ZS )Qwen2TokenizerFastac  
    Construct a "fast" Qwen2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
    Byte-Pair-Encoding.

    Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
    be encoded differently whether it is at the beginning of the sentence (without space) or not:

    ```python
    >>> from transformers import Qwen2TokenizerFast

    >>> tokenizer = Qwen2TokenizerFast.from_pretrained("Qwen/Qwen-tokenizer")
    >>> tokenizer("Hello world")["input_ids"]
    [9707, 1879]

    >>> tokenizer(" Hello world")["input_ids"]
    [21927, 1879]
    ```
    This is expected.

    This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
    refer to this superclass for more information regarding those methods.

    Args:
        vocab_file (`str`, *optional*):
            Path to the vocabulary file.
        merges_file (`str`, *optional*):
            Path to the merges file.
        tokenizer_file (`str`, *optional*):
            Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
            contains everything needed to load the tokenizer.
        unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead. Not applicable to this tokenizer.
        bos_token (`str`, *optional*):
            The beginning of sequence token. Not applicable for this tokenizer.
        eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
            The end of sequence token.
        pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
            The token used for padding, for example when batching sequences of different lengths.
    Z	input_idsZattention_maskN<|endoftext|>c           	   
      s   t |trt|dddddn|}t |trt|dddddn|}t |tr.t|dddddn|}t |tr>t|dddddn|}t jd|||||||d| d S )NFT)lstriprstripZspecial
normalized)r
   r   r   	unk_token	bos_token	eos_token	pad_token )
isinstancestrr   super__init__)	selfr
   r   r   r   r   r   r   kwargs	__class__r   `/var/www/auris/lib/python3.10/site-packages/transformers/models/qwen2/tokenization_qwen2_fast.pyr   S   s6   
zQwen2TokenizerFast.__init__save_directoryfilename_prefixreturnc                 C   s   | j jj||d}t|S )N)name)Z
_tokenizermodelsavetuple)r   r    r!   filesr   r   r   save_vocabulary   s   z"Qwen2TokenizerFast.save_vocabulary)NNNr   Nr   r   )N)__name__
__module____qualname____doc__VOCAB_FILES_NAMESZvocab_files_namesZmodel_input_namesr	   Zslow_tokenizer_classr   r   r   r   r(   __classcell__r   r   r   r   r   %   s    )(1r   N)r,   typingr   r   Ztokenization_utilsr   Ztokenization_utils_fastr   utilsr   Ztokenization_qwen2r	   Z
get_loggerr)   loggerr-   ZMAX_MODEL_INPUT_SIZESr   __all__r   r   r   r   <module>   s   

d