o
    Zh
                    @   s\  d dl Zd dlZd dlZd dlmZ d dlmZ d dlmZ d dl	m
Z
mZmZmZmZmZmZmZ d dlZddlmZmZ ddlmZ dd	lmZ ed
e
Z	 edeee geee
f f ZG dd dZdd Zd8dee deee
f fddZ eG dd deZ!dee deee
f fddZ"dee deee
f fddZ#dee deee
f fddZ$eG dd dZ%eG dd  d eZ&d9d!ee' fd"d#Z(d9d!ee' fd$d%Z)d9d!ee' fd&d'Z*eG d(d) d)eZ+eG d*d+ d+Z,eG d,d- d-eZ-eG d.d/ d/e-Z.d0d1 Z/eG d2d3 d3e-Z0eG d4d5 d5eZ1eG d6d7 d7e!Z2dS ):    N)Mapping)	dataclass)randint)AnyCallableDictListNewTypeOptionalTupleUnion   )BertTokenizerBertTokenizerFast)PreTrainedTokenizerBase)PaddingStrategyInputDataClassDataCollatorc                   @   s   e Zd ZdddZdS )DataCollatorMixinNc                 C   sT   |d u r| j }|dkr| |S |dkr| |S |dkr"| |S td| d)NtfptnpzFramework 'z' not recognized!)return_tensorstf_call
torch_call
numpy_call
ValueErrorselffeaturesr    r    N/var/www/auris/lib/python3.10/site-packages/transformers/data/data_collator.py__call__(   s   


zDataCollatorMixin.__call__N)__name__
__module____qualname__r"   r    r    r    r!   r   '   s    r   c              	   O   s`   t | ds| j|i |S | jdd}d| jd< z| j|i |}W || jd< |S || jd< w )zz
    Pads without triggering the warning about how using the pad function is sub-optimal when using a fast tokenizer.
    deprecation_warningszAsking-to-pad-a-fast-tokenizerFT)hasattrpadr'   get)	tokenizerZpad_argsZ
pad_kwargsZwarning_statepaddedr    r    r!   "pad_without_fast_tokenizer_warning5   s   


r-   r   r   returnc                 C   s4   |dkrt | S |dkrt| S |dkrt| S dS )a  
    Very simple data collator that simply collates batches of dict-like objects and performs special handling for
    potential keys named:

        - `label`: handles a single value (int or float) per object
        - `label_ids`: handles a list of values per object

    Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
    to the model. See glue and ner for example of how it's useful.
    r   r   r   N)torch_default_data_collatortf_default_data_collatornumpy_default_data_collator)r   r   r    r    r!   default_data_collatorK   s   r2   c                   @   sF   e Zd ZU dZdZeed< d	deeee	f  deee	f fddZ
dS )
DefaultDataCollatora*  
    Very simple data collator that simply collates batches of dict-like objects and performs special handling for
    potential keys named:

        - `label`: handles a single value (int or float) per object
        - `label_ids`: handles a list of values per object

    Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs
    to the model. See glue and ner for example of how it's useful.

    This is an object (like other data collators) rather than a pure function like default_data_collator. This can be
    helpful if you need to set a return_tensors value at initialization.

    Args:
        return_tensors (`str`, *optional*, defaults to `"pt"`):
            The type of Tensor to return. Allowable values are "np", "pt" and "tf".
    r   r   Nr   r.   c                 C   s   |d u r| j }t||S r#   )r   r2   r   r    r    r!   r"   z   s   
zDefaultDataCollator.__call__r#   )r$   r%   r&   __doc__r   str__annotations__r   r   r   r"   r    r    r    r!   r3   d   s   
 ,r3   c                    s  dd l }t| d tsdd | D } | d }i }d|v rN|d d urNt|d |jr0|d  n|d }t|tr<|jn|j}|jdd | D |d|d< n<d|v r|d d urt|d |jrm|	d	d | D |d< nt|d d try|jn|j}|jd
d | D |d|d< |
 D ]L\ } dvr|d urt|tst||jr|	 fdd| D | < qt|tjr|t	 fdd| D | < q| fdd| D | < q|S )Nr   c                 S      g | ]}t |qS r    vars.0fr    r    r!   
<listcomp>       z/torch_default_data_collator.<locals>.<listcomp>labelc                 S      g | ]}|d  qS r?   r    r:   r    r    r!   r=      r>   dtypelabels	label_idsc                 S   r@   rE   r    r:   r    r    r!   r=      r>   c                 S   r@   rF   r    r:   r    r    r!   r=      r>   r?   rE   c                       g | ]}|  qS r    r    r:   kr    r!   r=      r>   c                    rH   r    r    r:   rI   r    r!   r=      r>   c                    rH   r    r    r:   rI   r    r!   r=      r>   )torch
isinstancer   Tensoritemintlongfloattensorstackitemsr5   r   ndarrayZ
from_numpy)r   rK   firstbatchr?   rC   vr    rI   r!   r/      s.   $$r/   c                    s  dd l }t| d tsdd | D } | d }i }d|v r%|d d ur%dnd|v r2|d d ur2dnd|v r?|d d ur?dnd d urt| |jrZ| jjrV|jn|j}nGt| tj	sjt| tj
r{t| jtjrw|jn|j}n&t| ttfrt| d tr|jn|j}nt| tr|jn|j}|jfdd| D |d|d< | D ]7\ } d	vr|d urt|tst||jtj	fr| fd
d| D | < q| fdd| D | < q|S )Nr   c                 S   r7   r    r8   r:   r    r    r!   r=      r>   z,tf_default_data_collator.<locals>.<listcomp>r?   rE   rD   c                    rH   r    r    r:   )label_col_namer    r!   r=      r>   rB   )r?   rE   rD   c                    rH   r    r    r:   rI   r    r!   r=      r>   c                    rH   r    r    r:   rI   r    r!   r=      r>   )
tensorflowrL   r   rM   rC   
is_integerint64float32r   rU   ZgenericZ
issubdtypeintegertuplelistrO   convert_to_tensorrT   r5   rS   )r   r   rV   rW   rC   rX   r    )rJ   rY   r!   r0      s8    "  r0   c                    s  t | d tsdd | D } | d }i }d|v rJ|d d urJt |d tjr,|d  n|d }t |tr8tjntj}tjdd | D |d|d< n<d|v r|d d urt |d tjrit	d	d | D |d< nt |d d trutjntj}tjd
d | D |d|d< |
 D ]4\ } dvr|d urt |tst |tjrt	 fdd| D | < qt fdd| D | < q|S )Nr   c                 S   r7   r    r8   r:   r    r    r!   r=      r>   z/numpy_default_data_collator.<locals>.<listcomp>r?   c                 S   r@   rA   r    r:   r    r    r!   r=      r>   rB   rD   rE   c                 S   r@   rF   r    r:   r    r    r!   r=      r>   c                 S   r@   rF   r    r:   r    r    r!   r=      r>   rG   c                    rH   r    r    r:   rI   r    r!   r=      r>   c                    rH   r    r    r:   rI   r    r!   r=      r>   )rL   r   r   rU   rN   rO   r\   r]   arrayrS   rT   r5   )r   rV   rW   r?   rC   rX   r    rI   r!   r1      s(   $r1   c                   @   s   e Zd ZU dZeed< dZeee	e
f ed< dZee ed< dZee ed< dZe	ed	< d
eee	ef  dee	ef fddZdS )DataCollatorWithPaddinga  
    Data collator that will dynamically pad the inputs received.

    Args:
        tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
            The tokenizer used for encoding the data.
        padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
            Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
            among:

            - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
              sequence is provided).
            - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
              acceptable input length for the model if that argument is not provided.
            - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
        max_length (`int`, *optional*):
            Maximum length of the returned list and optionally padding length (see above).
        pad_to_multiple_of (`int`, *optional*):
            If set will pad the sequence to a multiple of the provided value.

            This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
            7.0 (Volta).
        return_tensors (`str`, *optional*, defaults to `"pt"`):
            The type of Tensor to return. Allowable values are "np", "pt" and "tf".
    r+   TpaddingN
max_lengthpad_to_multiple_ofr   r   r   r.   c                 C   sV   t | j|| j| j| j| jd}d|v r|d |d< |d= d|v r)|d |d< |d= |S )Nrd   re   rf   r   r?   rD   rE   )r-   r+   rd   re   rf   r   )r   r   rW   r    r    r!   r"     s   z DataCollatorWithPadding.__call__)r$   r%   r&   r4   r   r6   rd   r   boolr5   r   re   r
   rO   rf   r   r   r   r   r"   r    r    r    r!   rc      s   
 *rc   c                   @   s   e Zd ZU dZeed< dZeee	e
f ed< dZee ed< dZee ed< dZeed	< d
Ze	ed< dd Zdd Zdd ZdS )"DataCollatorForTokenClassificationa  
    Data collator that will dynamically pad the inputs received, as well as the labels.

    Args:
        tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
            The tokenizer used for encoding the data.
        padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
            Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
            among:

            - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
              sequence is provided).
            - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
              acceptable input length for the model if that argument is not provided.
            - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
        max_length (`int`, *optional*):
            Maximum length of the returned list and optionally padding length (see above).
        pad_to_multiple_of (`int`, *optional*):
            If set will pad the sequence to a multiple of the provided value.

            This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
            7.0 (Volta).
        label_pad_token_id (`int`, *optional*, defaults to -100):
            The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).
        return_tensors (`str`, *optional*, defaults to `"pt"`):
            The type of Tensor to return. Allowable values are "np", "pt" and "tf".
    r+   Trd   Nre   rf   label_pad_token_idr   r   c                    s   dd l d|d  v rdnd  |d  v r! fdd|D nd } fdd|D }tj|jjjdd}|d u r@|S |d	 jd
 jj}fdd|dkrcfdd|D | < nfdd|D | < j	|  j
d| < |S )Nr   r?   rD   c                    rH   r    r    r;   feature
label_namer    r!   r=   J  r>   zADataCollatorForTokenClassification.torch_call.<locals>.<listcomp>c                    "   g | ]} fd d|  D qS )c                       i | ]\}}| kr||qS r    r    r;   rJ   rX   rn   r    r!   
<dictcomp>L      zLDataCollatorForTokenClassification.torch_call.<locals>.<listcomp>.<dictcomp>rT   rl   rn   r    r!   r=   L     " r   rg   	input_ids   c                    s   t |  jr
|  S t| S r#   )rL   rM   tolistr`   )Ztensor_or_iterablerK   r    r!   to_list]  s   z>DataCollatorForTokenClassification.torch_call.<locals>.to_listrightc                    s(   g | ]}| j gt|   qS r    rk   lenr;   r?   r   sequence_lengthr{   r    r!   r=   c      c                    s(   g | ]} j gt|  | qS r    r}   r   r   r    r!   r=   g  r   rB   )rK   keysr-   r+   rd   re   rf   shapepadding_siderR   r\   )r   r   rD   Zno_labels_featuresrW   r   r    )ro   r   r   r{   rK   r!   r   F  s4   &	
z-DataCollatorForTokenClassification.torch_callc                    s   dd l d|d  v rdnd  |d  v r! fdd|D nd }tj|jjj|d u r3dnd d}|d u r=|S |d jd	 jj	}|d
kr\fdd|D |d< nfdd|D |d< fdd|
 D }|S )Nr   r?   rD   c                    rH   r    r    rl   rn   r    r!   r=   r  r>   z>DataCollatorForTokenClassification.tf_call.<locals>.<listcomp>r   rg   rw   rx   r|   c                    (   g | ]}t | jgt|   qS r    r`   rk   r~   r   r   r   r    r!   r=     r   c                    (   g | ]} j gt|  t| qS r    rk   r~   r`   r   r   r    r!   r=     r   c                    s"   i | ]\}}| j | jd qS rB   ra   r\   rr   r   r    r!   rs     rv   z>DataCollatorForTokenClassification.tf_call.<locals>.<dictcomp>)rZ   r   r-   r+   rd   re   rf   ra   r   r   rT   r   r   rD   rW   r   r    )ro   r   r   r   r!   r   n  s0   &

z*DataCollatorForTokenClassification.tf_callc                    s   d|d   v r
dnd  |d   v r fdd|D nd }tj|jjj|d u r/dnd d}|d u r9|S t|d jd	 jj	}|d
krXfdd|D |d< nfdd|D |d< dd |
 D }|S )Nr?   r   rD   c                    rH   r    r    rl   rn   r    r!   r=     r>   zADataCollatorForTokenClassification.numpy_call.<locals>.<listcomp>r   rg   rw   rx   r|   c                    r   r    r   r   r   r    r!   r=     r   c                    r   r    r   r   r   r    r!   r=     r   c                 S   s"   i | ]\}}|t j|t jd qS r   r   rb   r\   rr   r    r    r!   rs     rv   zADataCollatorForTokenClassification.numpy_call.<locals>.<dictcomp>)r   r-   r+   rd   re   rf   r   rb   r   r   rT   r   r    )ro   r   r   r!   r     s.   &

z-DataCollatorForTokenClassification.numpy_call)r$   r%   r&   r4   r   r6   rd   r   rh   r5   r   re   r
   rO   rf   rk   r   r   r   r   r    r    r    r!   ri   !  s   
 ( ri   rf   c                    sD  ddl t| d tttjfrfdd| D } | d d t fdd| D }|rC|du s6 | dkrCt| jsCj	| ddS |j
du rRtd|jj d	td
d | D }|durm|| dkrm|| d | }| d t| |g|j}t| D ]!\}}|jdkr|||d|jd f< q~||||jd  df< q~|S )_Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary.r   Nc                       g | ]
} j | jd qS r   )rR   rP   r;   erz   r    r!   r=         z(_torch_collate_batch.<locals>.<listcomp>c                 3   s    | ]
}| d  kV  qdS r   Nsizer;   xlength_of_firstr    r!   	<genexpr>  s    z'_torch_collate_batch.<locals>.<genexpr>)dimCYou are attempting to pad samples but the tokenizer you are using () does not have a pad token.c                 s   s    | ]}| d V  qdS r   r   r   r    r    r!   r     s    rx   r|   )rK   rL   r`   r_   r   rU   r   allrM   rS   	pad_tokenr   	__class__r$   maxZnew_fullr~   pad_token_id	enumerater   r   examplesr+   rf   are_tensors_same_lengthre   resultiexampler    )r   rK   r!   _torch_collate_batch  s.   

r   c           	         sL  dd l 	 t| d ttfrfdd| D } t| d  t fdd| D }|r;|d u s4 | dkr;j| ddS |jd u rJtd|j	j
 dtd	d | D }|d ure|| dkre|| d
 | }g }| d }tj|dftjd}| D ]$}|jdkr|t| |d< n|t| |d< |j|||jd qzj|ddS )Nr   c                    r   r   r   r   r   r    r!   r=     r   z%_tf_collate_batch.<locals>.<listcomp>c                 3       | ]	}t | kV  qd S r#   r~   r   r   r    r!   r         z$_tf_collate_batch.<locals>.<genexpr>Zaxisr   r   c                 s       | ]}t |V  qd S r#   r   r   r    r    r!   r         rx   r   rB   r|   )r   rx   )r   r   )Zconstant_values)rZ   rL   r`   r_   r~   r   rS   r   r   r   r$   r   rankr   zerosint32r   appendr)   r   )	r   r+   rf   r   re   r   r   Zpaddingsr   r    )r   r   r!   _tf_collate_batch  s4   

r   c                    s,  t | d ttfrdd | D } t| d  t fdd| D }|r4|du s- | dkr4tj| ddS |jdu rCtd|j	j
 d	td
d | D }|dur^|| dkr^|| d | }tjt| |f|j| d jd}t| D ]!\}}|jdkr|||d|jd f< qr||||jd  df< qr|S )r   r   c                 S   s   g | ]
}t j|t jd qS r   r   r   r    r    r!   r=     r   z(_numpy_collate_batch.<locals>.<listcomp>c                 3   r   r#   r   r   r   r    r!   r      r   z'_numpy_collate_batch.<locals>.<genexpr>Nr   r   r   c                 s   r   r#   r   r   r    r    r!   r     r   rx   )r   Z
fill_valuerC   r|   )rL   r`   r_   r~   r   r   rS   r   r   r   r$   r   fullr   rC   r   r   r   r   r    r   r!   _numpy_collate_batch  s*   
 
r   c                   @   s~   e Zd ZU dZeed< dZeee	e
f ed< dZee ed< dZee ed< dZe	ed	< d
eee	ef  fddZdd ZdS )DataCollatorForMultipleChoicea  
    Data collator that dynamically pads a batch of nested examples for multiple choice, so that all choices
    of all examples have the same length.

    Args:
        tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
            The tokenizer used for encoding the data.
        padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
            Select a strategy to pad the returned sequences according to the model's padding side and padding index
            among:

            - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
              is provided).
            - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
              acceptable input length for the model if that argument is not provided.
            - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
              lengths).
        max_length (`int`, *optional*):
            Maximum length of the returned list and optionally padding length (see above).
        pad_to_multiple_of (`int`, *optional*):
            Pad the sequence to a multiple of the provided value.

            This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
            7.5 (Volta).
        return_tensors (`str`, *optional*, defaults to `"pt"`):
            The type of Tensor to return. Allowable values are "np", "pt" and "tf".
    r+   Trd   Nre   rf   r   r   r   c                    s   dd l }d|d  v rdndfdd|D }t| t|d d tfdd|D g d	}| jj|| j| j| jd
d} fdd|	 D }|j
||jd|d< |S )Nr   r?   rD   c                       g | ]}|  qS r    popr;   r   rn   r    r!   r=   A      z<DataCollatorForMultipleChoice.torch_call.<locals>.<listcomp>rw   c                 3   s&    | ]  fd dt D V  qdS )c                    "   g | ]  fd d  D qS )c                       i | ]	\}}||  qS r    r    rr   r   r    r!   rs   I      zQDataCollatorForMultipleChoice.torch_call.<locals>.<genexpr>.<listcomp>.<dictcomp>ru   r;   r   r   r!   r=   I  rv   zFDataCollatorForMultipleChoice.torch_call.<locals>.<genexpr>.<listcomp>Nranger   num_choicesr   r!   r   I  s   $ z;DataCollatorForMultipleChoice.torch_call.<locals>.<genexpr>)startr   rg   c                    s    i | ]\}}||  d qS )viewrr   )
batch_sizer   r    r!   rs   V  s     z<DataCollatorForMultipleChoice.torch_call.<locals>.<dictcomp>rB   )rK   r   r~   sumr+   r)   rd   re   rf   rT   rR   r\   )r   r   rK   rD   Zflat_examplesrW   r    )r   ro   r   r!   r   <  s$   	z(DataCollatorForMultipleChoice.torch_callc                    s   dd l d|d  v rdndfdd|D }t| t|d d fdd|D }t|g }| jj|| j| j| jdd	} fd
d|	 D }j
|jd|d< |S )Nr   r?   rD   c                    r   r    r   rl   rn   r    r!   r=   ^  r   z9DataCollatorForMultipleChoice.tf_call.<locals>.<listcomp>rw   c                    s"   g | ]  fd dt D qS )c                    r   )c                    r   r    r    rr   r   r    r!   rs   b  r   zODataCollatorForMultipleChoice.tf_call.<locals>.<listcomp>.<listcomp>.<dictcomp>ru   r   rm   r   r!   r=   b  rv   zDDataCollatorForMultipleChoice.tf_call.<locals>.<listcomp>.<listcomp>r   r   r   r   r!   r=   a  s    r   rg   c              	      s$   i | ]\}}| | d fqS r   )reshaperr   )r   r   r   r    r!   rs   n  s   $ z9DataCollatorForMultipleChoice.tf_call.<locals>.<dictcomp>rB   )rZ   r   r~   r   r+   r)   rd   re   rf   rT   ra   r\   )r   r   rD   Zflattened_featuresrW   r    )r   ro   r   r   r!   r   Z  s&   

z%DataCollatorForMultipleChoice.tf_call)r$   r%   r&   r4   r   r6   rd   r   rh   r5   r   re   r
   rO   rf   r   r   r   r   r   r   r    r    r    r!   r     s   
 r   c                   @   s   e Zd ZU dZeed< dZee ed< dZ	e
eeef ed< dZee ed< dZee ed< d	Zeed
< dZeed< dddZdS )DataCollatorForSeq2Seqa  
    Data collator that will dynamically pad the inputs received, as well as the labels.

    Args:
        tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
            The tokenizer used for encoding the data.
        model ([`PreTrainedModel`], *optional*):
            The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
            prepare the *decoder_input_ids*

            This is useful when using *label_smoothing* to avoid calculating loss twice.
        padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
            Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
            among:

            - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
              sequence is provided).
            - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
              acceptable input length for the model if that argument is not provided.
            - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
        max_length (`int`, *optional*):
            Maximum length of the returned list and optionally padding length (see above).
        pad_to_multiple_of (`int`, *optional*):
            If set will pad the sequence to a multiple of the provided value.

            This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
            7.0 (Volta).
        label_pad_token_id (`int`, *optional*, defaults to -100):
            The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
        return_tensors (`str`, *optional*, defaults to `"pt"`):
            The type of Tensor to return. Allowable values are "np", "pt" and "tf".
    r+   NmodelTrd   re   rf   rj   rk   r   r   c                    s^  |d u rj }d|d  v rdnd  |d  v r$ fdd|D nd }|d ur5tdd |D r5d } fdd|D }tj|jjj|d	}jd
u pVjtj	k}|d ur|rwt
|d   trmt||d< n]dd |D |d< nSjtjkojd u}|stdd |D njjd urj d j j jjt
|d   trfdd|D |d< nfdd|D |d< |dd d ur
|dkrdd l}|j|d |jd|d< n&|dkrdd l}	|	j|d |	jd|d< ntj|d tjd|d< nd |d< |d ur-jd ur-tjdr-jj|d d}
|
|d< |S )Nr?   r   rD   c                    rH   r    r    rl   rn   r    r!   r=     r>   z3DataCollatorForSeq2Seq.__call__.<locals>.<listcomp>c                 s   s    | ]}|d u V  qd S r#   r    r   r    r    r!   r     r   z2DataCollatorForSeq2Seq.__call__.<locals>.<genexpr>c                    rp   )c                    rq   r    r    rr   rn   r    r!   rs     rt   z>DataCollatorForSeq2Seq.__call__.<locals>.<listcomp>.<dictcomp>ru   rl   rn   r    r!   r=     rv   rg   Fc                 S   s   g | ]	}t |g gqS r    )r   concatenater   r    r    r!   r=     r   c                 s   r   r#   r   )r;   lr    r    r!   r     r   rx   c                    sD   g | ]}d kr|j g t|   nj g t|  | qS )r|   r}   r   max_label_lengthr   r   r    r!   r=     s    c              
      sh   g | ]0}d krt |t jjg t|  t jdgnt t jjg t|  t jd|gqS )r|   rB   )r   r   rb   rk   r~   r\   r   r   r    r!   r=     s    r   rB   r   %prepare_decoder_input_ids_from_labels)rD   decoder_input_ids)r   r   r   r-   r+   rd   re   rf   r   Z
DO_NOT_PADrL   r`   Z
MAX_LENGTHr   r   r*   rK   rR   r\   rZ   Zconstantr   rb   r   r(   r   )r   r   r   rD   Znon_labels_featuresrW   Z
no_paddingZmax_paddingrK   r   r   r    )ro   r   r   r   r!   r"     sl   &




zDataCollatorForSeq2Seq.__call__r#   )r$   r%   r&   r4   r   r6   r   r
   r   rd   r   rh   r5   r   re   rO   rf   rk   r   r"   r    r    r    r!   r   s  s   
 !r   c                	   @   s  e Zd ZU dZeed< dZeed< dZe	ed< dZ
e	ed< d	Ze	ed
< dZee ed< dZeed< dZeed< dZee ed< dd Zdd Zdd Zed*ddZ	d*dedee deeef fddZdeeee eeeef f  deeef fd d!Zdeeee eeeef f  deeef fd"d#Zd*dedee deeef fd$d%Z deeee eeeef f  deeef fd&d'Z!d*dedee deeef fd(d)Z"dS )+DataCollatorForLanguageModelingar  
    Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they
    are not all of the same length.

    Args:
        tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
            The tokenizer used for encoding the data.
        mlm (`bool`, *optional*, defaults to `True`):
            Whether or not to use masked language modeling. If set to `False`, the labels are the same as the inputs
            with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked
            tokens and the value to predict for the masked token.
        mlm_probability (`float`, *optional*, defaults to 0.15):
            The probability with which to (randomly) mask tokens in the input, when `mlm` is set to `True`.
        mask_replace_prob (`float`, *optional*, defaults to 0.8):
            The probability with which masked tokens are replaced by the tokenizer's mask token (e.g., `[MASK]`).
            Defaults to 0.8, meaning 80% of the masked tokens will be replaced with `[MASK]`.
            Only works when `mlm` is set to `True`.
        random_replace_prob (`float`, *optional*, defaults to 0.1):
            The probability with which masked tokens are replaced by random tokens from the tokenizer's vocabulary.
            Defaults to 0.1, meaning 10% of the masked tokens will be replaced with random tokens. The remaining
            masked tokens (1 - mask_replace_prob - random_replace_prob) are left unchanged.
            Only works when `mlm` is set to `True`.
        pad_to_multiple_of (`int`, *optional*):
            If set, will pad the sequence to a multiple of the provided value.
        return_tensors (`str`):
            The type of Tensor to return. Allowable values are "np", "pt" and "tf".
        seed (`int`, *optional*):
            The seed to use for the random number generator for masking. If not provided, the global RNG will be used.

    <Tip>

    For best performance, this data collator should be used with a dataset having items that are dictionaries or
    BatchEncoding, with the `"special_tokens_mask"` key, as returned by a [`PreTrainedTokenizer`] or a
    [`PreTrainedTokenizerFast`] with the argument `return_special_tokens_mask=True`.

    <Example Options and Expectations>

    1. Default Behavior:
        - `mask_replace_prob=0.8`, `random_replace_prob=0.1`.
        - Expect 80% of masked tokens replaced with `[MASK]`, 10% replaced with random tokens, and 10% left unchanged.

    2. All masked tokens replaced by `[MASK]`:
        - `mask_replace_prob=1.0`, `random_replace_prob=0.0`.
        - Expect all masked tokens to be replaced with `[MASK]`. No tokens are left unchanged or replaced with random tokens.

    3. No `[MASK]` replacement, only random tokens:
        - `mask_replace_prob=0.0`, `random_replace_prob=1.0`.
        - Expect all masked tokens to be replaced with random tokens. No `[MASK]` replacements or unchanged tokens.

    4. Balanced replacement:
        - `mask_replace_prob=0.5`, `random_replace_prob=0.4`.
        - Expect 50% of masked tokens replaced with `[MASK]`, 40% replaced with random tokens, and 10% left unchanged.

    Note:
        The sum of `mask_replace_prob` and `random_replace_prob` must not exceed 1. If their sum is less than 1, the
        remaining proportion will consist of masked tokens left unchanged.

    </Tip>
    r+   Tmlmg333333?mlm_probability皙?mask_replace_probg?random_replace_probNrf   Ftf_experimental_compiler   r   seedc                 C   s   | j r| jjd u rtd| jdk s| jdkrtd| j| j dkr'td| jdk s1| jdkr5td| jdk s?| jdkrCtdt| j| _t| j| _t| j| _| jredd l	}|j
| jdd	| _d | _d S )
NzThis tokenizer does not have a mask token which is necessary for masked language modeling. You should pass `mlm=False` to train on causal language modeling instead.r   rx   z*mlm_probability should be between 0 and 1.zHThe sum of mask_replace_prob and random_replace_prob should not exceed 1z,mask_replace_prob should be between 0 and 1.z.random_replace_prob should be between 0 and 1.T)Zjit_compile)r   r+   
mask_tokenr   r   r   r   rQ   r   rZ   functiontf_mask_tokens	generator)r   r   r    r    r!   __post_init__C  s&   
z-DataCollatorForLanguageModeling.__post_init__c                 C   sT   | j dkrdd l}| |S | j dkr dd l}|jj|S dd l}|j|S )Nr   r   r   )	r   rK   	GeneratorZmanual_seedrZ   randomZ	from_seednumpyZdefault_rng)r   r   rK   r   r   r    r    r!   get_generator]  s   

z-DataCollatorForLanguageModeling.get_generatorc                 C   s`   t  jdkr| | j| _d S dd l}|jj	 }|d u r$d}t
|| | j|j | _d S )NMainProcessr   )zZWorker process information is not available for seeding the generator. This may be becausezZyou are using multiprocessing without using a PyTorch DataLoader. The `seed` parameter canzVonly be used when using multiprocessing with a PyTorch DataLoader. Please either use azAsingle process or use a PyTorch DataLoader with multiple workers.)mpcurrent_processnamer   r   r   rK   utilsdataZget_worker_infor   id)r   rK   Zworker_infoZerror_stringr    r    r!   
create_rngk  s   z*DataCollatorForLanguageModeling.create_rngc                 C   sZ   dd l }|| |}|r|||| dd dk|jS |||j| dd dk|jS )Nr   rx   )rZ   fillcastuniformrh   r   )r   Zprobabilityr   r   Zprob_matrixr    r    r!   tf_bernoulli  s
    "z,DataCollatorForLanguageModeling.tf_bernoulliinputsspecial_tokens_maskr.   c                 C   s   ddl }|||j}||}| || j| j| @ }|||d}| || j| j|@ }	||	||}| jdks?| j	dkrC||fS d| j }
| j	|
 }| ||| j|@ |	 @ }| jrh| jj
|||jd}n
|jj
|||jd}||||}||fS )w
        Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
        r   Nrj   rx   maxvalrC   )rZ   r   rC   r   r   r   r   wherer   r   r   r   )r   r   
vocab_sizemask_token_idr   r   input_shapemasked_indicesrD   indices_replacedremaining_probrandom_replace_prob_scaledindices_randomrandom_wordsr    r    r!   r     s,   


z.DataCollatorForLanguageModeling.tf_mask_tokensr   c                    s8  dd l } jr jd u r   t|d tr"t j|d jd}ndt	| j jdi}|
dd } jr{|d u rW fdd|d   D }||j||jd	|j}n|||j} j||d |j| jjt jd
\|d< |d< |S |d } jjd ur|| jjkd|}n||}||d< |S )Nr   r   r   rf   rw   rf   r   c                       g | ]
} j j|d dqS T)Zalready_has_special_tokensr+   Zget_special_tokens_maskr;   valr   r    r!   r=         z;DataCollatorForLanguageModeling.tf_call.<locals>.<listcomp>rB   )r   r   r   rD   rj   )rZ   r   r   r   rL   r   r-   r+   rf   r   r   r   r   ry   r   ra   r\   rh   r   r   r~   r   r   identity)r   r   r   rW   r   rD   r    r  r!   r     s<   

z'DataCollatorForLanguageModeling.tf_callc                 C   s   | j r| jd u r|   t|d trt| j|d| jd}ndt|| j| jdi}|	dd }| j
rC| j|d |d\|d< |d< |S |d  }| jjd urWd	||| jjk< ||d< |S )
Nr   r   r  rw   r  r   r   rD   rj   )r   r   r   rL   r   r-   r+   rf   r   r   r   torch_mask_tokenscloner   r   r   rW   r   rD   r    r    r!   r     s&   z*DataCollatorForLanguageModeling.torch_callc                    sF  ddl }| }||j j}|du r( fdd| D }|j||jd}n| }|j|dd |j	| j
d }d	|| < |j	||j j j
d |@ } j jj||<  jd
ksh jdkrl||fS d
 j } j| }	|j	||j|	 j
d |@ | @ }
|jt j|j|j j
d}||
 ||
< ||fS )r   r   Nc                    r  r	  r
  r  r  r    r!   r=         zEDataCollatorForLanguageModeling.torch_mask_tokens.<locals>.<listcomp>rB           valuer   rj   rx   rC   r   )rK   r  r   r   r   ry   rR   rh   masked_fill_	bernoullir   r   r+   convert_tokens_to_idsr   r   r   r~   rP   )r   r   r   rK   rD   probability_matrixr   r  r  r  r  r  r    r  r!   r  
  s<   



z1DataCollatorForLanguageModeling.torch_mask_tokensc                 C   s   | j r| jd u r|   t|d trt| j|d| jd}ndt|| j| jdi}|	dd }| j
rC| j|d |d\|d< |d< |S t|d }| jjd urXd	||| jjk< ||d< |S )
Nr   r   r  rw   r  r   r  rD   rj   )r   r   r   rL   r   r-   r+   rf   r   r   r   numpy_mask_tokensr   copyr   r  r    r    r!   r   ;  s&   z*DataCollatorForLanguageModeling.numpy_callc                    s  t |}t |j j}|du r$ fdd| D }t j|td}n|t}d||<  j	r> j	j
d||jdt}nt jj
d||jdt}d|| <  j	rd j	j
d j|jdt|@ }nt jj
d j|jdt|@ } jj||<  jdks jdkr||fS d j } j| } j	r j	j
d||jdt|@ | @ }	 j	jdt jt |	t jd	}
n#t jj
d||jdt|@ | @ }	t jjdt jt |	t jd	}
|
||	< ||fS )
r   Nc                    r  r	  r
  r  r  r    r!   r=   a  r  zEDataCollatorForLanguageModeling.numpy_mask_tokens.<locals>.<listcomp>rB   r   rx   r   rj   lowhighr   rC   )r   r  r   r   r   ry   rb   rh   astyper   binomialr   r   r+   r   r   integersr~   Zcount_nonzeror\   r   )r   r   r   rD   r  r   r  r  r  r  r  r    r  r!   r  Y  sX   





z1DataCollatorForLanguageModeling.numpy_mask_tokensr#   )#r$   r%   r&   r4   r   r6   r   rh   r   rQ   r   r   rf   r
   rO   r   r   r5   r   r   r   r   staticmethodr   r   r   r   r   r   r   r   r   r  r   r  r    r    r    r!   r     s:   
 <

4/4.$41(r   c                   @   s"  e Zd ZdZdeeee eee	ef f  dee	ef fddZ
deeee eee	ef f  dee	ef fddZdeeee eee	ef f  dee	ef fdd	Zd
d Zddee	 fddZdededeeef fddZdededeeef fddZdededeeef fddZdS )DataCollatorForWholeWordMaska#  
    Data collator used for language modeling that masks entire words.

    - collates batches of tensors, honoring their tokenizer's pad_token
    - preprocesses batches for masked language modeling

    <Tip>

    This collator relies on details of the implementation of subword tokenization by [`BertTokenizer`], specifically
    that subword tokens are prefixed with *##*. For tokenizers that do not adhere to this scheme, this collator will
    produce an output that is roughly equivalent to [`.DataCollatorForLanguageModeling`].

    </Tip>r   r.   c                 C     | j r| jd u r|   t|d trdd |D }n	|}dd |D }t|| j| jd}g }|D ]C}g }t|d D ]}| j	|}|
| q;d|v rlt|d }	t|d }
t|
D ]}||	v rkd||  ||< q]|
| | q1t|| j| jd}| ||\}}||d	S )
Nr   c                 S   r@   rw   r    r   r    r    r!   r=     r>   z;DataCollatorForWholeWordMask.torch_call.<locals>.<listcomp>c                 S      g | ]}d |iqS r)  r    r   r    r    r!   r=     r>   r  rw   chinese_ref##rw   rD   )r   r   r   rL   r   r   r+   rf   ry   _convert_id_to_tokenr   r~   r   _whole_word_maskr  r   r   rw   batch_inputmask_labelsr   
ref_tokensr   tokenref_poslen_seqr   
batch_maskr   rD   r    r    r!   r     0   
z'DataCollatorForWholeWordMask.torch_callc                 C   s(  dd l }| jr| jd u r|   t|d trdd |D }n	|}dd |D }t|| j| jd}g }|D ]C}g }t	|d D ]}| j
|}	||	 q?d|v rpt	|d }
t|d }t|D ]}||
v rod||  ||< qa|| | q5t|| j| jd}| |||j|\}}||d	S )
Nr   c                 S   r@   r)  r    r   r    r    r!   r=     r>   z8DataCollatorForWholeWordMask.tf_call.<locals>.<listcomp>c                 S   r*  r)  r    r   r    r    r!   r=     r>   r  rw   r+  r,  r-  )rZ   r   r   r   rL   r   r   r+   rf   ry   r.  r   r~   r   r/  r   r   r\   )r   r   r   rw   r1  r2  r   r3  r   r4  r5  r6  r   r7  r   rD   r    r    r!   r     s2   
z$DataCollatorForWholeWordMask.tf_callc                 C   r(  )
Nr   c                 S   r@   r)  r    r   r    r    r!   r=     r>   z;DataCollatorForWholeWordMask.numpy_call.<locals>.<listcomp>c                 S   r*  r)  r    r   r    r    r!   r=     r>   r  rw   r+  r,  r-  )r   r   r   rL   r   r   r+   rf   ry   r.  r   r~   r   r/  r  r0  r    r    r!   r     r8  z'DataCollatorForWholeWordMask.numpy_callc                    s   | j d u rt   S | jdkr(dd l}|jt | jd} fdd|D S | jdkrTdd l}| j	dd }|jj
j|t |d  } fd	d|D S | jd
kra| j   S d S )Nr   r   r  c                       g | ]} | qS r    r    r;   r   cand_indexesr    r!   r=     r>   z9DataCollatorForWholeWordMask._shuffle.<locals>.<listcomp>r   r   )r   c                    r9  r    r    r:  r;  r    r!   r=   "  r>   r   )r   r   shuffler   rK   randpermr~   r   rZ   Z
make_seedsZexperimentalZstateless_shuffler   r   ry   )r   r<  rK   indicesr   r   r    r;  r!   _shuffle  s    



$
z%DataCollatorForWholeWordMask._shuffle   input_tokensc                    s.  t | jttfstd g }t|D ]&\}}|dks|dkr qt|dkr3|dr3|d 	| q|	|g q| 
|}t|tdttt|| j }g }t  |D ]$}t||kra nt|t| |krlqW|D ]}	 |	 |	|	 qnqWt t|krtd fdd	tt|D }
|
S )
zM
        Get 0/1 labels for masked tokens with whole word mask proxy
        zDataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. Please refer to the documentation for more information.z[CLS]z[SEP]rx   r,  r   z?Length of covered_indexes is not equal to length of masked_lms.c                    s   g | ]
}| v r
d ndqS )rx   r   r    r:  Zcovered_indexesr    r!   r=   M  r   zADataCollatorForWholeWordMask._whole_word_mask.<locals>.<listcomp>)rL   r+   r   r   warningswarnr   r~   
startswithr   r@  minr   rO   roundr   setaddr   r   )r   rB  Zmax_predictionsr<  r   r4  Znum_to_predictZ
masked_lmsZ	index_setindexr2  r    rC  r!   r/  (  s8   
"
z-DataCollatorForWholeWordMask._whole_word_maskr   r2  c                    sT  ddl } jjdu rtd| }|} fdd| D }|j|j||jddd  jj	dur@|
 jj}|j|dd | }d	|| < |j||j j jd
 |@ }	 j jj||	<  jdkso jdkrs||fS d j }
 j|
 }|j||j| jd
 |@ |	 @ }|jt j|j|j jd}|| ||< ||fS )
        Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
        'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
        r   NThis tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.c                    r  r	  r
  r  r  r    r!   r=   a  r  zBDataCollatorForWholeWordMask.torch_mask_tokens.<locals>.<listcomp>rB   r  r  rj   r  rx   r  )rK   r+   r   r   r  ry   r  rR   rh   r   eqr   r  r   r   r   r   r  r   r   r~   rP   )r   r   r2  rK   rD   r  r   padding_maskr   r  r  r  r  r  r    r  r!   r  P  sD   



z.DataCollatorForWholeWordMask.torch_mask_tokensc                    sZ  ddl }||} jjdu rtd||}|||j} fdd|D }||j||jd @ } jjdurD| jj	k}|| @ }|
||d} | j j|@ }	|
|	 jj|} jdksi jdkrm||fS d j }
 j|
 } || j|@ |	 @ } jr jj|t j|jd	}n|jj|t j|jd	}|
|||}||fS )
rL  r   NrM  c                    r  r	  r
  r  r  r    r!   r=     r  z?DataCollatorForWholeWordMask.tf_mask_tokens.<locals>.<listcomp>rB   rj   rx   r   )rZ   r   r+   r   r   r  r   rh   r   r   r   r   r   r   r   r   r   r~   r\   r   )r   r   r2  r   r   rD   r   r   rO  r  r  r  r  r  r    r  r!   r     sB   





z+DataCollatorForWholeWordMask.tf_mask_tokensc                    s   j jdu r
tdt|}|t} fdd| D }d|tj|td<  j j	dur8| j j
k}d||< d|| <  jrQ jjd j|jd	t|@ }ntjjd j|jd	t|@ } j  j j||<  jdksu jdkry||fS d j } j| }	 jr jjd|	|jd	t|@ | @ }
 jjdt j |jtjd
}n!tjjd|	|jd	t|@ | @ }
tjjdt j |jtjd
}||
 ||
< ||fS )rL  NrM  c                    r  r	  r
  r  r  r    r!   r=     r  zBDataCollatorForWholeWordMask.numpy_mask_tokens.<locals>.<listcomp>r   rB   rj   rx   r   r   )r+   r   r   r   r  r#  rh   ry   rb   r   r   r   r$  r   r   r   r  r   r%  r~   r\   r   )r   r   r2  rD   r   r   rO  r  r  r  r  r  r    r  r!   r    sR   





 z.DataCollatorForWholeWordMask.numpy_mask_tokensN)rA  )r$   r%   r&   r4   r   r   rO   r   r   r5   r   r   r   r@  r/  r   r  r   r  r    r    r    r!   r'    s    44!4#!(8"<r'  c                 C   s(   t | tr| S t| dr|  } |  S )Nr   )rL   r`   r(   r   ry   )r   r    r    r!   ry     s
   

ry   c                   @   sZ   e Zd ZdZdd Zdeeeef  deeef fddZ	dede
eeef fd	d
ZdS )DataCollatorForSOPz
    Data collator used for sentence order prediction task.

    - collates batches of tensors, honoring their tokenizer's pad_token
    - preprocesses batches for both masked language modeling and sentence order prediction
    c                 O   s   t dt d S )NzDataCollatorForSOP is deprecated and will be removed in a future version, you can now use DataCollatorForLanguageModeling instead.)rD  rE  FutureWarning)r   argskwargsr    r    r!   __init__  s   zDataCollatorForSOP.__init__r   r.   c           
      C   s   dd l }ddlm} dd |D }t|| j}| |\}}}dd |D }||d| jjd}dd |D }||}	|||||	d	S )
Nr   )pad_sequencec                 S   r@   r)  r    r   r    r    r!   r=   "  r>   z/DataCollatorForSOP.__call__.<locals>.<listcomp>c                 S   r@   )token_type_idsr    r   r    r    r!   r=   &  r>   T)Zbatch_firstZpadding_valuec                 S   r@   )sentence_order_labelr    r   r    r    r!   r=   *  r>   )rw   rD   attention_maskrV  rW  )rK   Ztorch.nn.utils.rnnrU  r   r+   mask_tokensr   rS   )
r   r   rK   rU  rw   rD   rX  rV  Zsop_label_listrW  r    r    r!   r"     s   
zDataCollatorForSOP.__call__r   c                    sX  ddl } jjdu rtd| }||j j} fdd| D }|j	|j
||jddd  jjdurF| jj}|j	|dd || }|  } jjdurf| jj}	|j	|	d	d d
|| < |||jd |@ }
 j jj||
< |||jd |@ |
 @ }|jt j|j|jd}|| ||< |||fS )z
        Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10%
        original. N-gram not applied yet.
        r   NrM  c                    r  r	  r
  r  r  r    r!   r=   E  r  z2DataCollatorForSOP.mask_tokens.<locals>.<listcomp>rB   r  r  g      ?rj   r   g      ?)rK   r+   r   r   r  r   r   r   ry   r  rR   rh   r   rN  r   r  rQ   r  r   r~   rP   )r   r   rK   rD   r  r   rO  r   rX  Zattention_padding_maskr  r  r  r    r  r!   rY  5  s4   


"
zDataCollatorForSOP.mask_tokensN)r$   r%   r&   r4   rT  r   r   r5   r   r"   r   rY  r    r    r    r!   rP    s
    & rP  c                   @   s4  e Zd ZU dZeed< dZeed< dZe	ed< dZ
eed< d	eeee	 eeeef f  d
eeef fddZd	eeee	 eeeef f  d
eeef fddZd	eeee	 eeeef f  d
eeef fddZded
eeeeef fddZded
eeeeef fddZded
eeeeef fddZdS )*DataCollatorForPermutationLanguageModelingz
    Data collator used for permutation language modeling.

    - collates batches of tensors, honoring their tokenizer's pad_token
    - preprocesses batches for permutation language modeling with procedures specific to XLNet
    r+   gUUUUUU?plm_probability   max_span_lengthr   r   r   r.   c                 C   H   t |d trdd |D }t|| j}| |\}}}}||||dS )Nr   c                 S   r@   r)  r    r   r    r    r!   r=   q  r>   zIDataCollatorForPermutationLanguageModeling.torch_call.<locals>.<listcomp>rw   	perm_masktarget_mappingrD   )rL   r   r   r+   r  r   r   rW   r   r`  ra  rD   r    r    r!   r   o  
   z5DataCollatorForPermutationLanguageModeling.torch_callc                 C   r^  )Nr   c                 S   r@   r)  r    r   r    r    r!   r=   x  r>   zFDataCollatorForPermutationLanguageModeling.tf_call.<locals>.<listcomp>r_  )rL   r   r   r+   r   rb  r    r    r!   r   v  rc  z2DataCollatorForPermutationLanguageModeling.tf_callc                 C   r^  )Nr   c                 S   r@   r)  r    r   r    r    r!   r=     r>   zIDataCollatorForPermutationLanguageModeling.numpy_call.<locals>.<listcomp>r_  )rL   r   r   r+   r  rb  r    r    r!   r   }  rc  z5DataCollatorForPermutationLanguageModeling.numpy_callr   c                    s  ddl } jjdu rtd|dd dkrtd| }|j|jd|jd}|j	|d|d|df|j
d}t|dD ]J}d}|d}||k r|d jd d }	t|	 j }
|||
|	 d d  }d|||||	 f< ||
7 }||k sQ||d||< qD|j fd	d
| D |jd}|j|dd  jjdur| jj}|j|dd ||B  } jj||< d|| < |j	|d|d|df|j
d}t|dD ]V}||d}|d|dd fdd}|||dd  }||dd}|||  || @ d ||ddf|d|dfk|| @ ||< q| ||| fS )g  
        The masked tokens to be predicted for a particular sequence are determined by the following algorithm:

            0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far).
            1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked)
            2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be
               masked
            3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length -
               span_length]` and mask tokens `start_index:start_index + span_length`
            4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the
               sequence to be processed), repeat from Step 1.
        r   NThis tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer.rx   r   This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details.rB   )rx   c                    r  r	  r
  r  r  r    r!   r=     r   zPDataCollatorForPermutationLanguageModeling.torch_mask_tokens.<locals>.<listcomp>r  r  rj   r   )rK   r+   r   r   r   r  r   r   rh   r   r]   r   r   r]  rN   rO   r[  eyerR   ry   r  r   rN  r   r   aranger   	transposer>  flattenrP   )r   r   rK   rD   r   ra  r   cur_lenmax_lenspan_lengthcontext_lengthstart_indexr   rO  non_func_maskr`  
perm_indexr    r  r!   r    sZ   (


(	 &
z<DataCollatorForPermutationLanguageModeling.torch_mask_tokensc              
      s  ddl } jjdu rtd||d d dkrtd||}tj|j dt	d}||}tj
|d |d |d ftjd}tt|D ]D}d}||d }	||	k rtd jd }
t|
 j }|td||
 d  }d|||||
 f< ||7 }||	k sXt|d ||< qI|j|||j	d}||}| fdd	|  D }|j||j	d}|| @ } jjdur| jjk}|| @ }||B  }|| jj|}|||d
}g }tt|D ]P}||d }|||d|d d f}|j|}|||d}|||  || @ d|}||||d df||d|d fk|| @  q|j |dd}|||j!|||j||||j!fS )rd  r   Nre  rx   r   rf  rB   c                    r  r	  r
  r  r  r    r!   r=     r  zMDataCollatorForPermutationLanguageModeling.tf_mask_tokens.<locals>.<listcomp>rj   r   r   r   )"rZ   r+   r   r   r   r  r   r   as_listrh   r   r]   r   r~   r   r]  rO   r[  rg  r   ra   r   ry   r   r   r   r   ri  r   r   r=  r   rS   r\   )r   r   r   rD   r   Zlabels_shapera  r   rk  rl  rm  rn  ro  r   rO  rp  r`  rq  r    r  r!   r     sj   

"






&*z9DataCollatorForPermutationLanguageModeling.tf_mask_tokensc                    sR   j jdu r
td|jd d dkrtdt|}tj|jdtd}tj|jd |jd |jd ftj	d}t
|jd D ]C}d}|jd }||k rytd jd }t| j }	|td|	| d  }
d|||
|
| f< ||	7 }||k sMt|jd ||< q@tj fdd	| D td}d||<  j jdur| j jk}d
||< ||B  } j j||< d|| < tj|jd |jd |jd ftj	d}t
|jd D ]G}t|jd }|d|jd d fj}tj| |j }d|||  || @ < ||jd df|d|jd fk|| @ ||< q|tj|||tjfS )rd  Nre  rx   r   r   rf  rB   c                    r  r	  r
  r  r  r    r!   r=     r   zPDataCollatorForPermutationLanguageModeling.numpy_mask_tokens.<locals>.<listcomp>r  rj   r   )r+   r   r   r   r   r  r   rh   r   r]   r   r   r]  rO   r[  rg  rb   ry   r   r   r   rh  r   Tr   r=  rj  r#  r\   )r   r   rD   r   ra  r   rk  rl  rm  rn  ro  r   rO  rp  r`  rq  r    r  r!   r  R  sX   
(


(	
&
z<DataCollatorForPermutationLanguageModeling.numpy_mask_tokensN)r$   r%   r&   r4   r   r6   r[  rQ   r]  rO   r   r5   r   r   r   r   r   r   r   r   r  r   r  r    r    r    r!   rZ  a  s   
 444c"krZ  c                       s6   e Zd ZdZddddd fdd
Zdd	d
Z  ZS )DataCollatorWithFlatteninga  
    Data collator used for padding free approach. Does the following:

    - concatenates the entire mini batch into single long sequence of shape [1, total_tokens]
    - uses `separator_id` to separate sequences within the concatenated `labels`, default value is -100
    - no padding will be added, returns `input_ids`, `labels` and `position_ids` by default
    - optionally returns the kwargs contained in FlashAttentionKwargs
    - optionally returns seq_idx indicating which sequence each token belongs to

    <Tip warning={true}>

    Using `DataCollatorWithFlattening` will flatten the entire mini batch into single long sequence.
    Make sure your attention computation is able to handle it!

    </Tip>
    Trj   F)return_position_idsseparator_idreturn_flash_attn_kwargsreturn_seq_idxc                   sL   t  j|i | || _|| _|| _|| _h d| _h d| _ddh| _d S )N>   rw   position_idsrD   >   seq_idxrw   ry  rD   max_length_qmax_length_k)	superrT  ru  rv  rw  rx  _int_64_keys_batch_dim_keys_py_int_keys)r   ru  rv  rw  rx  rR  rS  r   r    r!   rT    s   	

z#DataCollatorWithFlattening.__init__Nc                    s  |d u r| j }|d u r| j}d|d v }g g d}| jr#|dg i | jr-|dg i | jr5dg}d}t|D ]o\ }|d }	|d  |	7  < |r]|d  |g|d dd   7  < n|d  |g|	dd   7  < | jr}|d  ttt	|	7  < | jr|d   fdd	tt	|	D 7  < | jr|
|d
 t	|	  t|t	|	}q9| jr| |d< |d< | |d< |d< |dkrdd l}
|
j}|
j}|
j}n|dkrtj}tj}tj}ntd|d| D ]"\}}|| jv r|g}|| jvr
|||| jv r|n|d||< q|S )NrD   r   r-  ry  rz  rw   rx   c                    s   g | ]} qS r    r    )r;   _rz  r    r!   r=     s    z7DataCollatorWithFlattening.__call__.<locals>.<listcomp>r   Zcu_seq_lens_qZcu_seq_lens_kr{  r|  r   r   z;return_tensors must be one of ("pt", "np"), return_tensors=z not suportedrB   )r   rv  ru  updaterx  rw  r   r`   r   r~   r   r   rK   rR   r\   r   r   rb   r   rT   r  r  r~  )r   r   r   rv  Zis_labels_providedrW   Zcu_seq_lensre   samplerw   rK   Zdata_clsZdtype_64Zdtype_32rJ   rX   r    r  r!   r"     s^   
$&
 z#DataCollatorWithFlattening.__call__)NN)r$   r%   r&   r4   rT  r"   __classcell__r    r    r  r!   rt    s    rt  )r   r#   )3multiprocessingr   r   rD  collections.abcr   dataclassesr   r   typingr   r   r   r   r	   r
   r   r   r   r   Zmodels.bertr   r   Ztokenization_utils_baser   r   r   r   r5   r   r   r-   r2   r3   r/   r0   r1   rc   ri   rO   r   r   r   r   r   r   r'  ry   rP  rZ  rt  r    r    r    r!   <module>   sj   (
  $) 3 %& Z    "  lR  T