
    eTh~7                         S SK JrJrJrJrJr  S SKJrJrJ	r	J
r
Jr  S SKJr  S SKJr  S SKJr  S SKJr  S SKJr  \\\4   r " S S	5      rg
)    )DictListOptionalTupleUnion)
AddedTokenEncodeInputEncodingInputSequence	Tokenizer)Decoder)Model)
Normalizer)PreTokenizer)PostProcessorc                   B   \ rS rSrSJS\4S jjrS rS\S\4S jr	SKS	\S\
\\4   4S
 jjrS\
\\4   4S jrSKS	\S\4S jjr      SLS\\   S\\   S\\   S\\   S\\   S\\   4S jjrS r\S\\   4S j5       rSMS\S\\   S\\   4S jjrS r\S\\   4S j5       rS\\\\4      S\4S jrS\\\\4      S\4S jrS \S\4S! jr   SNS \S"\\   S#\S$\S\4
S% jjr   SOS&\\!   S#\S$\S\\   4S' jjr"SKS(\\   S)\\   S\4S* jjr#SKS+\\\      S)\\   S\4S, jjr$S-\S\\   4S. jr%S/\S\\   4S0 jr&SJS1\S2\\   4S3 jjr'SKS4\S5\4S6 jjr(SPS5\4S7 jjr) SQS8\S"\\   S$\S\4S9 jjr*\S\+4S: j5       r,\,RZ                  S;\+4S< j5       r,\S\.4S= j5       r/\/RZ                  S>\.4S? j5       r/\S\04S@ j5       r1\1RZ                  SA\04SB j5       r1\S\24SC j5       r3\3RZ                  SD\24SE j5       r3\S\44SF j5       r5\5RZ                  SG\44SH j5       r5SIr6g)RBaseTokenizer   N	tokenizerc                 2    Xl         Ub  X l        g 0 U l        g N)
_tokenizer_parameters)selfr   
parameterss      a/var/www/auris/envauris/lib/python3.13/site-packages/tokenizers/implementations/base_tokenizer.py__init__BaseTokenizer.__init__   s    #)3)?:R    c                     SR                  U R                  R                  5       SR                  S U R                  R                  5        5       5      5      $ )Nz!Tokenizer(vocabulary_size={}, {})z, c              3   H   #    U  H  u  pUS -   [        U5      -   v   M     g7f)=N)str).0kvs      r   	<genexpr>)BaseTokenizer.__repr__.<locals>.<genexpr>   s!     L3K41a#gA&3Ks    ")formatr   get_vocab_sizejoinr   itemsr   s    r   __repr__BaseTokenizer.__repr__   sF    299OO**,IIL43C3C3I3I3KLL
 	
r   is_pairreturnc                 8    U R                   R                  U5      $ )z
Return the number of special tokens that would be added for single/pair sentences.
:param is_pair: Boolean indicating if the input would be a single sentence or a pair
:return:
)r   num_special_tokens_to_add)r   r0   s     r   r3   'BaseTokenizer.num_special_tokens_to_add   s     88AAr   with_added_tokensc                 4    U R                   R                  US9$ )zReturns the vocabulary

Args:
    with_added_tokens: boolean:
        Whether to include the added tokens in the vocabulary

Returns:
    The vocabulary
r5   )r   	get_vocabr   r5   s     r   r8   BaseTokenizer.get_vocab!   s     ((;L(MMr   c                 6    U R                   R                  5       $ )zdReturns the added reverse vocabulary

Returns:
    The added vocabulary mapping ints to AddedTokens
)r   get_added_tokens_decoderr-   s    r   r<   &BaseTokenizer.get_added_tokens_decoder-   s     7799r   c                 4    U R                   R                  US9$ )zReturn the size of vocabulary, with or without added tokens.

Args:
    with_added_tokens: (`optional`) bool:
        Whether to count in added special tokens or not

Returns:
    Size of vocabulary
r7   )r   r*   r9   s     r   r*   BaseTokenizer.get_vocab_size5   s     --@Q-RRr   	directionpad_to_multiple_ofpad_idpad_type_id	pad_tokenlengthc           	      >    U R                   R                  UUUUUUS9$ )a/  Change the padding strategy

Args:
    direction: (`optional`) str:
        Can be one of: `right` or `left`

    pad_to_multiple_of: (`optional`) unsigned int:
        If specified, the padding length should always snap to the next multiple of
        the given value. For example if we were going to pad with a length of 250 but
        `pad_to_multiple_of=8` then we will pad to 256.

    pad_id: (`optional`) unsigned int:
        The indice to be used when padding

    pad_type_id: (`optional`) unsigned int:
        The type indice to be used when padding

    pad_token: (`optional`) str:
        The pad token to be used when padding

    length: (`optional`) unsigned int:
        If specified, the length at which to pad. If not specified
        we pad using the size of the longest sequence in a batch
)r@   rA   rB   rC   rD   rE   )r   enable_padding)r   r@   rA   rB   rC   rD   rE   s          r   rG   BaseTokenizer.enable_paddingA   s3    B --1# . 
 	
r   c                 6    U R                   R                  5       $ )zDisable padding)r   
no_paddingr-   s    r   rJ   BaseTokenizer.no_paddingk   s    ))++r   c                 .    U R                   R                  $ )zGet the current padding parameters

Returns:
    None if padding is disabled, a dict with the currently set parameters
    if the padding is enabled.
)r   paddingr-   s    r   rM   BaseTokenizer.paddingo   s     &&&r   
max_lengthstridestrategyc                 6    U R                   R                  XUS9$ )af  Change the truncation options

Args:
    max_length: unsigned int:
        The maximum length at which to truncate

    stride: (`optional`) unsigned int:
        The length of the previous first sequence to be included
        in the overflowing sequence

    strategy: (`optional`) str:
        Can be one of `longest_first`, `only_first` or `only_second`
)rP   rQ   )r   enable_truncation)r   rO   rP   rQ   s       r   rS   BaseTokenizer.enable_truncationy   s     00U]0^^r   c                 6    U R                   R                  5       $ )zDisable truncation)r   no_truncationr-   s    r   rV   BaseTokenizer.no_truncation   s    ,,..r   c                 .    U R                   R                  $ )zGet the current truncation parameters

Returns:
    None if truncation is disabled, a dict with the current truncation parameters if
    truncation is enabled
)r   
truncationr-   s    r   rY   BaseTokenizer.truncation   s     )))r   tokensc                 8    U R                   R                  U5      $ )a  Add the given tokens to the vocabulary

Args:
    tokens: List[Union[str, AddedToken]]:
        A list of tokens to add to the vocabulary. Each token can either be
        a string, or an instance of AddedToken

Returns:
    The number of tokens that were added to the vocabulary
)r   
add_tokens)r   r[   s     r   r]   BaseTokenizer.add_tokens   s     ))&11r   special_tokensc                 8    U R                   R                  U5      $ )a  Add the given special tokens to the vocabulary, and treat them as special tokens.

The special tokens will never be processed by the model, and will be
removed while decoding.

Args:
    tokens: List[Union[str, AddedToken]]:
        A list of special tokens to add to the vocabulary. Each token can either be
        a string, or an instance of AddedToken

Returns:
    The number of tokens that were added to the vocabulary
)r   add_special_tokens)r   r_   s     r   ra    BaseTokenizer.add_special_tokens   s     11.AAr   sequencec                 8    U R                   R                  U5      $ )z}Normalize the given sequence

Args:
    sequence: str:
        The sequence to normalize

Returns:
    The normalized string
)r   	normalize)r   rc   s     r   re   BaseTokenizer.normalize   s     ((22r   pairis_pretokenizedra   c                 V    Uc  [        S5      eU R                  R                  XX45      $ )a  Encode the given sequence and pair. This method can process raw text sequences as well
as already pre-tokenized sequences.

Args:
    sequence: InputSequence:
        The sequence we want to encode. This sequence can be either raw text or
        pre-tokenized, according to the `is_pretokenized` argument:

        - If `is_pretokenized=False`: `InputSequence` is expected to be `str`
        - If `is_pretokenized=True`: `InputSequence` is expected to be
            `Union[List[str], Tuple[str]]`

    is_pretokenized: bool:
        Whether the input is already pre-tokenized.

    add_special_tokens: bool:
        Whether to add the special tokens while encoding.

Returns:
    An Encoding
z"encode: `sequence` can't be `None`)
ValueErrorr   encode)r   rc   rg   rh   ra   s        r   rk   BaseTokenizer.encode   s-    8 ABB%%hoZZr   inputsc                 V    Uc  [        S5      eU R                  R                  XU5      $ )ai  Encode the given inputs. This method accept both raw text sequences as well as already
pre-tokenized sequences.

Args:
    inputs: List[EncodeInput]:
        A list of single sequences or pair sequences to encode. Each `EncodeInput` is
        expected to be of the following form:
            `Union[InputSequence, Tuple[InputSequence, InputSequence]]`

        Each `InputSequence` can either be raw text or pre-tokenized,
        according to the `is_pretokenized` argument:

        - If `is_pretokenized=False`: `InputSequence` is expected to be `str`
        - If `is_pretokenized=True`: `InputSequence` is expected to be
            `Union[List[str], Tuple[str]]`

    is_pretokenized: bool:
        Whether the input is already pre-tokenized.

    add_special_tokens: bool:
        Whether to add the special tokens while encoding.

Returns:
    A list of Encoding
z&encode_batch: `inputs` can't be `None`)rj   r   encode_batch)r   rm   rh   ra   s       r   ro   BaseTokenizer.encode_batch   s.    @ >EFF++FEWXXr   idsskip_special_tokensc                 P    Uc  [        S5      eU R                  R                  XS9$ )a  Decode the given list of ids to a string sequence

Args:
    ids: List[unsigned int]:
        A list of ids to be decoded

    skip_special_tokens: (`optional`) boolean:
        Whether to remove all the special tokens from the output string

Returns:
    The decoded string
z6None input is not valid. Should be a list of integers.rr   )rj   r   decode)r   rq   rr   s      r   ru   BaseTokenizer.decode  s-     ;UVV%%c%SSr   	sequencesc                 P    Uc  [        S5      eU R                  R                  XS9$ )a<  Decode the list of sequences to a list of string sequences

Args:
    sequences: List[List[unsigned int]]:
        A list of sequence of ids to be decoded

    skip_special_tokens: (`optional`) boolean:
        Whether to remove all the special tokens from the output strings

Returns:
    A list of decoded strings
z<None input is not valid. Should be list of list of integers.rt   )rj   r   decode_batch)r   rw   rr   s      r   ry   BaseTokenizer.decode_batch  s.     [\\++I+__r   tokenc                 8    U R                   R                  U5      $ )zConvert the given token to its corresponding id

Args:
    token: str:
        The token to convert

Returns:
    The corresponding id if it exists, None otherwise
)r   token_to_id)r   r{   s     r   r}   BaseTokenizer.token_to_id*  s     **511r   idc                 8    U R                   R                  U5      $ )zConvert the given token id to its corresponding string

Args:
    token: id:
        The token id to convert

Returns:
    The corresponding string if it exists, None otherwise
)r   id_to_token)r   r   s     r   r   BaseTokenizer.id_to_token6  s     **2..r   	directoryprefixc                 H    U R                   R                  R                  XS9$ )zSave the current model to the given directory

Args:
    directory: str:
        A path to the destination directory

    prefix: (Optional) str:
        An optional prefix, used to prefix each file name
)r   )r   modelsave)r   r   r   s      r   
save_modelBaseTokenizer.save_modelB  s"     $$))))CCr   pathprettyc                 8    U R                   R                  X5      $ )ztSave the current Tokenizer at the given path

Args:
    path: str:
        A path to the destination Tokenizer file
)r   r   )r   r   r   s      r   r   BaseTokenizer.saveN  s     ##D11r   c                 8    U R                   R                  U5      $ )zGet a serialized JSON version of the Tokenizer as a str

Args:
    pretty: bool:
        Whether the JSON string should be prettified

Returns:
    str
)r   to_str)r   r   s     r   r   BaseTokenizer.to_strW  s     %%f--r   encodingc                 :    U R                   R                  XU5      $ )a  Apply all the post-processing steps to the given encodings.

The various steps are:
    1. Truncate according to global params (provided to `enable_truncation`)
    2. Apply the PostProcessor
    3. Pad according to global params. (provided to `enable_padding`)

Args:
    encoding: Encoding:
        The main Encoding to post process

    pair: Optional[Encoding]:
        An optional pair Encoding

    add_special_tokens: bool:
        Whether to add special tokens

Returns:
    The resulting Encoding
)r   post_process)r   r   rg   ra   s       r   r   BaseTokenizer.post_processc  s    . ++H<NOOr   c                 .    U R                   R                  $ r   r   r   r-   s    r   r   BaseTokenizer.model|  s    $$$r   r   c                 $    XR                   l        g r   r   )r   r   s     r   r   r     s     %r   c                 .    U R                   R                  $ r   r   
normalizerr-   s    r   r   BaseTokenizer.normalizer  s    )))r   r   c                 $    XR                   l        g r   r   )r   r   s     r   r   r     s    %/"r   c                 .    U R                   R                  $ r   r   pre_tokenizerr-   s    r   r   BaseTokenizer.pre_tokenizer  s    ,,,r   r   c                 $    XR                   l        g r   r   )r   r   s     r   r   r     s    (5%r   c                 .    U R                   R                  $ r   r   post_processorr-   s    r   r   BaseTokenizer.post_processor  s    ---r   r   c                 $    XR                   l        g r   r   )r   r   s     r   r   r     s    )7&r   c                 .    U R                   R                  $ r   r   decoderr-   s    r   r   BaseTokenizer.decoder  s    &&&r   r   c                 $    XR                   l        g r   r   )r   r   s     r   r   r     s    ")r   )r   r   r   )T)rightNr   r   z[PAD]N)r   longest_first)NFT)FT)F)NT)7__name__
__module____qualname____firstlineno__r   r   r.   boolintr3   r   r#   r8   r   r<   r*   r   rG   rJ   propertydictrM   rS   rV   rY   r   r   r]   ra   re   r   r
   rk   r	   ro   ru   ry   r}   r   r   r   r   r   r   r   setterr   r   r   r   r   r   r   r   __static_attributes__ r   r   r   r      sP   H) H
B B# B
N4 
N4S> 
N:$sJ*? :
S 
S 
S $+,0 !%&#* $(
C=(
 %SM(
 	(

 c](
 C=(
 (
T, '$ ' '_C _# _V^_bVc _ / *HTN * *2eCO&<!= 2# 2BeCO6L1M BRU B 
3# 
3# 
3 )- %#'[[ }%[ 	[
 ![ 
[H !&#'	#Y[!#Y #Y !	#Y
 
h#YJT$s) T(4. TTW T$`d49o `HUYN `fi `$
2 
2# 
2
/c 
/hsm 
/
DC 
D# 
D2 2d 2
.T 
. _cP P(0(:PW[P	P2 %u % % \\&5 & & *J * * 0Z 0 0 -| - - 6< 6 6 . . . 8] 8 8 ' ' ' ^^*w * *r   r   N)typingr   r   r   r   r   
tokenizersr   r	   r
   r   r   tokenizers.decodersr   tokenizers.modelsr   tokenizers.normalizersr   tokenizers.pre_tokenizersr   tokenizers.processorsr   r   Offsetsr   r   r   r   <module>r      s;    5 5 R R ' # - 2 / S/T* T*r   