o
    rZh                     @   s~   d Z ddlZddlmZ ddlmZ ddlmZ ddlm	Z	 edd Z
e
ed	d
 Ze
edd ZG dd dZdS )zLanguage Model Vocabulary    N)Counter)Iterable)singledispatch)chainc                 C   s   t dt|  )Nz/Unsupported type for looking up in vocabulary: )	TypeErrortypewordsvocab r   A/var/www/auris/lib/python3.10/site-packages/nltk/lm/vocabulary.py_dispatched_lookup   s   r   c                    s   t  fdd| D S )zcLook up a sequence of words in the vocabulary.

    Returns an iterator over looked up words.

    c                 3   s    | ]}t | V  qd S Nr   ).0wr
   r   r   	<genexpr>   s    z_.<locals>.<genexpr>)tupler   r   r   r   _   s   r   c                 C   s   | |v r| S |j S )z$Looks up one word in the vocabulary.)	unk_label)wordr
   r   r   r   _string_lookup   s   r   c                   @   sf   e Zd ZdZdddZedd Zd	d
 Zdd Zdd Z	dd Z
dd Zdd Zdd Zdd ZdS )
Vocabularya
  Stores language model vocabulary.

    Satisfies two common language modeling requirements for a vocabulary:

    - When checking membership and calculating its size, filters items
      by comparing their counts to a cutoff value.
    - Adds a special "unknown" token which unseen words are mapped to.

    >>> words = ['a', 'c', '-', 'd', 'c', 'a', 'b', 'r', 'a', 'c', 'd']
    >>> from nltk.lm import Vocabulary
    >>> vocab = Vocabulary(words, unk_cutoff=2)

    Tokens with counts greater than or equal to the cutoff value will
    be considered part of the vocabulary.

    >>> vocab['c']
    3
    >>> 'c' in vocab
    True
    >>> vocab['d']
    2
    >>> 'd' in vocab
    True

    Tokens with frequency counts less than the cutoff value will be considered not
    part of the vocabulary even though their entries in the count dictionary are
    preserved.

    >>> vocab['b']
    1
    >>> 'b' in vocab
    False
    >>> vocab['aliens']
    0
    >>> 'aliens' in vocab
    False

    Keeping the count entries for seen words allows us to change the cutoff value
    without having to recalculate the counts.

    >>> vocab2 = Vocabulary(vocab.counts, unk_cutoff=1)
    >>> "b" in vocab2
    True

    The cutoff value influences not only membership checking but also the result of
    getting the size of the vocabulary using the built-in `len`.
    Note that while the number of keys in the vocabulary's counter stays the same,
    the items in the vocabulary differ depending on the cutoff.
    We use `sorted` to demonstrate because it keeps the order consistent.

    >>> sorted(vocab2.counts)
    ['-', 'a', 'b', 'c', 'd', 'r']
    >>> sorted(vocab2)
    ['-', '<UNK>', 'a', 'b', 'c', 'd', 'r']
    >>> sorted(vocab.counts)
    ['-', 'a', 'b', 'c', 'd', 'r']
    >>> sorted(vocab)
    ['<UNK>', 'a', 'c', 'd']

    In addition to items it gets populated with, the vocabulary stores a special
    token that stands in for so-called "unknown" items. By default it's "<UNK>".

    >>> "<UNK>" in vocab
    True

    We can look up words in a vocabulary using its `lookup` method.
    "Unseen" words (with counts less than cutoff) are looked up as the unknown label.
    If given one word (a string) as an input, this method will return a string.

    >>> vocab.lookup("a")
    'a'
    >>> vocab.lookup("aliens")
    '<UNK>'

    If given a sequence, it will return an tuple of the looked up words.

    >>> vocab.lookup(["p", 'a', 'r', 'd', 'b', 'c'])
    ('<UNK>', 'a', '<UNK>', 'd', '<UNK>', 'c')

    It's possible to update the counts after the vocabulary has been created.
    In general, the interface is the same as that of `collections.Counter`.

    >>> vocab['b']
    1
    >>> vocab.update(["b", "b", "c"])
    >>> vocab['b']
    3
    N   <UNK>c                 C   sJ   || _ |dk rtd| || _t | _| |dur | dS d dS )a  Create a new Vocabulary.

        :param counts: Optional iterable or `collections.Counter` instance to
                       pre-seed the Vocabulary. In case it is iterable, counts
                       are calculated.
        :param int unk_cutoff: Words that occur less frequently than this value
                               are not considered part of the vocabulary.
        :param unk_label: Label for marking words not part of vocabulary.

        r   z)Cutoff value cannot be less than 1. Got: N )r   
ValueError_cutoffr   countsupdate)selfr   Z
unk_cutoffr   r   r   r   __init__   s    zVocabulary.__init__c                 C      | j S )ziCutoff value.

        Items with count below this value are not considered part of vocabulary.

        )r   r!   r   r   r   cutoff   s   zVocabulary.cutoffc                 O   s*   | j j|i | tdd | D | _dS )zWUpdate vocabulary counts.

        Wraps `collections.Counter.update` method.

        c                 s   s    | ]}d V  qdS )r   Nr   )r   r   r   r   r   r      s    z$Vocabulary.update.<locals>.<genexpr>N)r   r    sum_len)r!   Zcounter_argsZcounter_kwargsr   r   r   r       s   zVocabulary.updatec                 C   s
   t || S )a  Look up one or more words in the vocabulary.

        If passed one word as a string will return that word or `self.unk_label`.
        Otherwise will assume it was passed a sequence of words, will try to look
        each of them up and return an iterator over the looked up words.

        :param words: Word(s) to look up.
        :type words: Iterable(str) or str
        :rtype: generator(str) or str
        :raises: TypeError for types other than strings or iterables

        >>> from nltk.lm import Vocabulary
        >>> vocab = Vocabulary(["a", "b", "c", "a", "b"], unk_cutoff=2)
        >>> vocab.lookup("a")
        'a'
        >>> vocab.lookup("aliens")
        '<UNK>'
        >>> vocab.lookup(["a", "b", "c", ["x", "b"]])
        ('a', 'b', '<UNK>', ('<UNK>', 'b'))

        r   )r!   r	   r   r   r   lookup   s   
zVocabulary.lookupc                 C   s   || j kr| jS | j| S r   )r   r   r   r!   itemr   r   r   __getitem__   s   zVocabulary.__getitem__c                 C   s   | | | j kS )zPOnly consider items with counts GE to cutoff as being in the
        vocabulary.)r%   r)   r   r   r   __contains__   s   zVocabulary.__contains__c                    s*   t  fdd jD  jr jgS g S )zKBuilding on membership check define how to iterate over
        vocabulary.c                 3   s    | ]	}| v r|V  qd S r   r   )r   r*   r$   r   r   r      s    z&Vocabulary.__iter__.<locals>.<genexpr>)r   r   r   r$   r   r$   r   __iter__   s   zVocabulary.__iter__c                 C   r#   )z1Computing size of vocabulary reflects the cutoff.)r'   r$   r   r   r   __len__   s   zVocabulary.__len__c                 C   s$   | j |j ko| j|jko| j|jkS r   )r   r%   r   )r!   otherr   r   r   __eq__   s
   

zVocabulary.__eq__c                 C   s   d | jj| j| jt| S )Nz/<{} with cutoff={} unk_label='{}' and {} items>)format	__class____name__r%   r   lenr$   r   r   r   __str__   s   zVocabulary.__str__)Nr   r   )r3   
__module____qualname____doc__r"   propertyr%   r    r(   r+   r,   r-   r.   r0   r5   r   r   r   r   r   %   s    
Y
	r   )r8   syscollectionsr   collections.abcr   	functoolsr   	itertoolsr   r   registerr   strr   r   r   r   r   r   <module>   s   

	
