o
    rZŽhÙ  ã                   @   s0   d Z ddlmZ ddlmZ G dd„ deƒZdS )a(  
Multi-Word Expression Tokenizer

A ``MWETokenizer`` takes a string which has already been divided into tokens and
retokenizes it, merging multi-word expressions into single tokens, using a lexicon
of MWEs:


    >>> from nltk.tokenize import MWETokenizer

    >>> tokenizer = MWETokenizer([('a', 'little'), ('a', 'little', 'bit'), ('a', 'lot')])
    >>> tokenizer.add_mwe(('in', 'spite', 'of'))

    >>> tokenizer.tokenize('Testing testing testing one two three'.split())
    ['Testing', 'testing', 'testing', 'one', 'two', 'three']

    >>> tokenizer.tokenize('This is a test in spite'.split())
    ['This', 'is', 'a', 'test', 'in', 'spite']

    >>> tokenizer.tokenize('In a little or a little bit or a lot in spite of'.split())
    ['In', 'a_little', 'or', 'a_little_bit', 'or', 'a_lot', 'in_spite_of']

é    )Ú
TokenizerI)ÚTriec                   @   s*   e Zd ZdZd
dd„Zdd„ Zdd	„ ZdS )ÚMWETokenizerzhA tokenizer that processes tokenized text and merges multi-word expressions
    into single tokens.
    NÚ_c                 C   s   |sg }t |ƒ| _|| _dS )a¥  Initialize the multi-word tokenizer with a list of expressions and a
        separator

        :type mwes: list(list(str))
        :param mwes: A sequence of multi-word expressions to be merged, where
            each MWE is a sequence of strings.
        :type separator: str
        :param separator: String that should be inserted between words in a multi-word
            expression token. (Default is '_')

        N)r   Ú_mwesÚ
_separator)ÚselfZmwesÚ	separator© r
   ú@/var/www/auris/lib/python3.10/site-packages/nltk/tokenize/mwe.pyÚ__init__(   s   

zMWETokenizer.__init__c                 C   s   | j  |¡ dS )a–  Add a multi-word expression to the lexicon (stored as a word trie)

        We use ``util.Trie`` to represent the trie. Its form is a dict of dicts.
        The key True marks the end of a valid MWE.

        :param mwe: The multi-word expression we're adding into the word trie
        :type mwe: tuple(str) or list(str)

        :Example:

        >>> tokenizer = MWETokenizer()
        >>> tokenizer.add_mwe(('a', 'b'))
        >>> tokenizer.add_mwe(('a', 'b', 'c'))
        >>> tokenizer.add_mwe(('a', 'x'))
        >>> expected = {'a': {'x': {True: None}, 'b': {True: None, 'c': {True: None}}}}
        >>> tokenizer._mwes == expected
        True

        N)r   Úinsert)r   Zmwer
   r
   r   Úadd_mwe9   s   zMWETokenizer.add_mwec                 C   sö   d}t |ƒ}g }||k ry|| | jv rj|}| j}d}||k r?|| |v r?|||  }|d }tj|v r5|}||k r?|| |v s$|dkrE|}tj|v sN|dkr^| | j |||… ¡¡ |}n| || ¡ |d7 }n| || ¡ |d7 }||k s|S )a¥  

        :param text: A list containing tokenized text
        :type text: list(str)
        :return: A list of the tokenized text with multi-words merged together
        :rtype: list(str)

        :Example:

        >>> tokenizer = MWETokenizer([('hors', "d'oeuvre")], separator='+')
        >>> tokenizer.tokenize("An hors d'oeuvre tonight, sir?".split())
        ['An', "hors+d'oeuvre", 'tonight,', 'sir?']

        r   éÿÿÿÿé   )Úlenr   r   ZLEAFÚappendr   Újoin)r   ÚtextÚiÚnÚresultÚjZtrieZ
last_matchr
   r
   r   ÚtokenizeO   s2   
ü
çzMWETokenizer.tokenize)Nr   )Ú__name__Ú
__module__Ú__qualname__Ú__doc__r   r   r   r
   r
   r
   r   r   #   s
    
r   N)r   Znltk.tokenize.apir   Z	nltk.utilr   r   r
   r
   r
   r   Ú<module>   s   