o
    ZhS5                    @   s  d Z ddlZddlZddlmZmZmZmZ ddlZ	ddl
Z
ddlZ
ddl
mZ ddlmZ ddlmZ ddlmZmZ dd	lmZ dd
lmZmZ ddlmZmZmZmZmZ ddlm Z  ddl!m"Z"m#Z#m$Z$m%Z% ddl&m'Z' e# rddl(m)Z) ddl*m+Z+ e%,e-Z.de
j/de0de0fddZ1G dd dej2Z3G dd dej4Z5G dd dej4Z6de5iZ7G dd dej4Z8e"G d d! d!e Z9G d"d# d#e9Z:G d$d% d%e9Z;e"G d&d' d'e9Z<e"d(d)G d*d+ d+e9eZ=G d,d- d-e9Z>G d.d/ d/e9eZ?g d0Z@dS )1z=PyTorch MarianMTModel model, ported from the Marian C++ repo.    N)ListOptionalTupleUnion)nn)CrossEntropyLoss   )ACT2FN)CacheEncoderDecoderCache)GenerationMixin)AttentionMaskConverter_prepare_4d_attention_mask)BaseModelOutput)BaseModelOutputWithPastAndCrossAttentions!CausalLMOutputWithCrossAttentionsSeq2SeqLMOutputSeq2SeqModelOutput)PreTrainedModel)auto_docstringis_torch_flex_attn_availableis_torchdynamo_compilinglogging   )MarianConfig)	BlockMask)make_flex_block_causal_mask	input_idspad_token_iddecoder_start_token_idc                 C   sh   |  | j}| ddddf  |ddddf< ||dddf< |du r*td||dk| |S )z1
    Shift input ids one token to the right.
    Nr   r   z1self.model.config.pad_token_id has to be defined.i)Z	new_zerosshapeclone
ValueErrorZmasked_fill_)r   r   r   Zshifted_input_ids r$   Y/var/www/auris/lib/python3.10/site-packages/transformers/models/marian/modeling_marian.pyshift_tokens_right=   s   (r&   c                
       st   e Zd ZdZddededee ddf fddZd	d
 Ze	 	ddej
dedeej dejf fddZ  ZS )#MarianSinusoidalPositionalEmbeddingzDThis module produces sinusoidal positional embeddings of any length.Nnum_positionsembedding_dimpadding_idxreturnc                    s   t  || d S N)super__init__)selfr(   r)   r*   	__class__r$   r%   r.   P   s   z,MarianSinusoidalPositionalEmbedding.__init__c              	      s   | j j\} t fddt|D }tj| | j jdd} d dkr) d n d d }tt	|dddddf |ddd|f< tt
|dddddf |dd|df< tj|dd	| _ dS )
z
        Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
        the 2nd half of the vector. [dim // 2:]
        c                    s$   g | ]  fd dt D qS )c              	      s(   g | ]}t d d|d     qS )i'     )nppower).0j)dimposr$   r%   
<listcomp>Z   s   ( zOMarianSinusoidalPositionalEmbedding._init_weight.<locals>.<listcomp>.<listcomp>)range)r5   r7   )r8   r%   r9   Z   s   $ zDMarianSinusoidalPositionalEmbedding._init_weight.<locals>.<listcomp>F)dtyperequires_gradr2   r   r   N)r=   )weightr!   r3   arrayr:   torchemptyr<   FloatTensorsincosr   	Parameter)r/   Zn_posZposition_encoutsentinelr$   r;   r%   _init_weightS   s    22z0MarianSinusoidalPositionalEmbedding._init_weightr   input_ids_shapepast_key_values_lengthposition_idsc                    s@   |du r|dd \}}t j||| t j| jjd}t |S )z3`input_ids_shape` is expected to be [bsz x seqlen].Nr2   )r<   device)r@   arangelongr>   rL   r-   forward)r/   rI   rJ   rK   bszZseq_lenr0   r$   r%   rO   b   s   z+MarianSinusoidalPositionalEmbedding.forwardr,   r   N)__name__
__module____qualname____doc__intr   r.   rH   r@   Zno_gradSizeTensorrO   __classcell__r$   r$   r0   r%   r'   M   s    $r'   c                       s   e Zd ZdZ						ddededed	ed
ededee dee f fddZ							dde
jdee
j dee dee
j dee
j dedee
j dee
jee
j eee
j  f fddZ  ZS )MarianAttentionz=Multi-headed attention from 'Attention Is All You Need' paper        FTN	embed_dim	num_headsdropout
is_decoderbias	is_causalconfig	layer_idxc	           	         s   t    || _|| _|| _|| | _|| _| j| | jkr*td| j d| d| jd | _|| _	|| _
|| _|d u rK| j	rKtd| jj d tj|||d| _tj|||d| _tj|||d| _tj|||d| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      zInstantiating a decoder z without passing `layer_idx` is not recommended and will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.r`   )r-   r.   r\   r]   r^   head_dimrb   r#   scalingr_   ra   rc   loggerwarning_oncer1   rR   r   Lineark_projv_projq_projout_proj)	r/   r\   r]   r^   r_   r`   ra   rb   rc   r0   r$   r%   r.   s   s0   


zMarianAttention.__init__hidden_stateskey_value_statespast_key_valueattention_masklayer_head_maskoutput_attentionscache_positionr+   c                 C   sv  |du}|  \}	}
}| ||	d| j| jdd}|| j }|dur=t|tr;|j	
| j}|r7|j}n|j}n|}|rA|n|}|rX|durX|rX|j| j }|j| j }nE| |}| |}||	d| j| jdd}||	d| j| jdd}|dur|s|nd}|||| jd|i\}}|rd|j	| j< |	| j d| jf}|j| }|j| }|j| }| d}t||dd}|  |	| j |
|fkrtd|	| j |
|f d|   |dur|ddddddd|jd	 f }||	| j|
|| }||	| j |
|}tjj|dd
}|durN|  | jfkr3td| jf d|   |dddd||	| j|
| }||	| j |
|}|re||	| j|
|}||	| j |
|}nd}tjj|| j| jd}t||}|  |	| j |
| jfkrtd|	| j |
| jf d|   ||	| j|
| j}|dd}||	|
| j}| |}|||fS )z#Input shape: Batch x Time x ChannelNr    r   r2   rt   Tz$Attention weights should be of size z	, but is r;   z/Head mask for a single layer should be of size ptrainingz `attn_output` should be of size )sizerl   viewr]   re   Z	transposerf   
isinstancer   
is_updatedgetrc   Zcross_attention_cacheself_attention_cacheZ	key_cacheZvalue_cacherj   rk   updatereshaper@   Zbmmr#   r!   r   
functionalZsoftmaxr^   rx   r\   rm   )r/   rn   ro   rp   rq   rr   rs   rt   Zis_cross_attentionrP   tgt_len_Zquery_statesr|   Zcurr_past_key_valueZcurrent_statesZ
key_statesZvalue_statesZ
proj_shapeZsrc_lenattn_weightsZattn_weights_reshapedZ
attn_probsZattn_outputr$   r$   r%   rO      s   "








&
"

zMarianAttention.forward)r[   FTFNN)NNNNFN)rR   rS   rT   rU   rV   floatboolr   r   r.   r@   rX   r
   r   rO   rY   r$   r$   r0   r%   rZ   p   s`    	*	rZ   c                       sh   e Zd Zddedee f fddZ	ddejdejd	ejd
ee	 de
ejeej f f
ddZ  ZS )MarianEncoderLayerNrb   rc   c                    s   t    |j| _t|j | j|j|j||d| _t	
| j| _|j| _t|j | _|j| _t	| j|j| _t	|j| j| _t	
| j| _d S )N)r\   r]   r^   rb   rc   )r-   r.   d_modelr\   MARIAN_ATTENTION_CLASSES_attn_implementationZencoder_attention_headsattention_dropout	self_attnr   	LayerNormself_attn_layer_normr^   r	   activation_functionactivation_fnactivation_dropoutri   Zencoder_ffn_dimfc1fc2final_layer_normr/   rb   rc   r0   r$   r%   r.     s    
zMarianEncoderLayer.__init__Frn   rq   rr   rs   r+   c           
      C   s  |}| j ||||d\}}}tjj|| j| jd}|| }| |}|}| | |}tjj|| j| jd}| 	|}tjj|| j| jd}|| }| 
|}|jtjkrvt| sdt| rvt|jjd }tj|| |d}|f}	|r|	|f7 }	|	S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )rn   rq   rr   rs   rv   i  )minmax)r   r   r   r^   rx   r   r   r   r   r   r   r<   r@   Zfloat16isinfanyisnanfinfor   clamp)
r/   rn   rq   rr   rs   residualr   r   Zclamp_valueoutputsr$   r$   r%   rO   "  s8   



zMarianEncoderLayer.forwardr,   F)rR   rS   rT   r   r   rV   r.   r@   rB   r   r   rO   rY   r$   r$   r0   r%   r     s    r   eagerc                       s   e Zd Zddedee f fddZ									ddejd	eej d
eej deej deej deej dee	 dee
 dee
 deej deejeeejejf  f fddZ  ZS )MarianDecoderLayerNrb   rc   c              	      s   t    |j| _t|j | j|j|jdd||d| _|j	| _	t
|j | _|j| _t| j| _t|j | j|j|jd||d| _t| j| _t| j|j| _t|j| j| _t| j| _d S )NT)r\   r]   r^   r_   ra   rb   rc   )r^   r_   rb   rc   )r-   r.   r   r\   r   r   Zdecoder_attention_headsr   r   r^   r	   r   r   r   r   r   r   encoder_attnencoder_attn_layer_normri   Zdecoder_ffn_dimr   r   r   r   r0   r$   r%   r.   Z  s6   
	zMarianDecoderLayer.__init__FTrn   rq   encoder_hidden_statesencoder_attention_maskrr   cross_attn_layer_head_maskrp   rs   	use_cachert   r+   c                 C   s   |}| j ||||||
d\}}}tjj|| j| jd}|| }| |}d}|durN|}| j||||||d\}}}tjj|| j| jd}|| }| |}|}| | 	|}tjj|| j
| jd}| |}tjj|| j| jd}|| }| |}|f}|r|||f7 }|	r||f7 }|S )a8  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            encoder_hidden_states (`torch.FloatTensor`):
                cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
            encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
                size `(decoder_attention_heads,)`.
            past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
                cache in the correct position and to infer the complete sequence length.
        )rn   rp   rq   rr   rs   rt   rv   N)rn   ro   rq   rr   rp   rs   )r   r   r   r^   rx   r   r   r   r   r   r   r   r   )r/   rn   rq   r   r   rr   r   rp   rs   r   rt   r   Zself_attn_weightsZcross_attn_weightsr   r$   r$   r%   rO   y  sN   "




zMarianDecoderLayer.forwardr,   )	NNNNNNFTN)rR   rS   rT   r   r   rV   r.   r@   rX   r
   r   r   rB   rO   rY   r$   r$   r0   r%   r   Y  sD    "	
r   c                   @   s   e Zd ZeZdZdZdZdZde	e
je
jef fddZ	dde	ejdf d	ejd
ejdedef
ddZedejdededejd
ejdefddZedd ZdS )MarianPreTrainedModelmodelTmodulec                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|t
r-|  d S t|tjrN|jjjd|d |jd urL|jj|j 	  d S d S t|tjrc|jjd |jj	  d S d S )Nr[   )meanstd      ?)rb   Zinit_stdr{   r   ri   r>   dataZnormal_r`   Zzero_r'   rH   	Embeddingr*   r   Zfill_)r/   r   r   r$   r$   r%   _init_weights  s"   


z#MarianPreTrainedModel._init_weightsFrq   r   input_tensorrt   past_key_valuesrs   c                 C   s:  | j jdkr|d ur|dk r|S d S | j jdkr&t|tjr$t|}|S |d ur.| nd}|d ur7|jnd}| j jdkrO|sO|sOt	j
|||| jdrOd S |j}|jd }	|r^| }
nt|tjri|jd	 n||	 d }
| j||	|
|||jd d
}| j jdkr|d ur|jjdv r|st|j}t	||}|S )NZflash_attention_2r[   Zflex_attentionr   FZsdpa)inputs_embedsrJ   Zis_trainingr   r    )sequence_lengthtarget_lengthr<   rt   
batch_size)cudaZxpuZnpu)rb   r   r   r{   r@   rX   r   get_seq_lengthZis_compileabler   Z_ignore_causal_mask_sdparx   r<   r!   Zget_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionrL   typer   r   Z_unmask_unattended)r/   rq   r   rt   r   rs   Zpast_seen_tokensZusing_compilable_cacher<   r   r   causal_mask	min_dtyper$   r$   r%   _update_causal_mask  sT   




z)MarianPreTrainedModel._update_causal_maskr   r   r<   r   c                 K   sD  | dur|   dkr| }|S t|j}tj||f|||jd}|dkr+tj|dd}|tj||jd|ddk9 }|ddddddf 	|ddd}| dur|
 }| jd }	|ddddddd|	f | ddddddf |j }
|
dk}
|ddddddd|	f |
||ddddddd|	f< |S )	aM  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )Z
fill_valuer<   rL   r   )ZdiagonalrL   r    r   )r7   r@   r   r   fullrL   ZtriurM   r   expandr"   r!   toZmasked_fill)rq   r   r   r<   rt   r   kwargsr   r   Zmask_lengthZpadding_maskr$   r$   r%   r   ,  s,    $
6  zKMarianPreTrainedModel._prepare_4d_causal_attention_mask_with_cache_positionc                 C   s@   | j j}tjg ddddd|gg| jd}||||d}|S )N)r      
   r   r2   r         r2   r   )rq   r   decoder_input_ids)rb   r   r@   ZtensorrL   ne)r/   Z	pad_tokenr   dummy_inputsr$   r$   r%   r   d  s   "z"MarianPreTrainedModel.dummy_inputsNr   )rR   rS   rT   r   Zconfig_classbase_model_prefixZsupports_gradient_checkpointingZ_supports_cache_classZ_supports_static_cacher   r   ri   r   r'   r   r@   rX   r
   r   r   staticmethodrV   r<   r   propertyr   r$   r$   r$   r%   r     sF    
D6r   c                       s   e Zd ZdZddedeej f fddZdd Z	d	d
 Z
							ddeej deej deej deej dee dee dee deeej ef fddZ  ZS )MarianEncoderz
    Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
    [`MarianEncoderLayer`].

    Args:
        config: MarianConfig
        embed_tokens (nn.Embedding): output embedding
    Nrb   embed_tokensc                    s   t     j| _ j| _ j} j| _ j| _	 j
r!t|nd| _|d ur,|| _n
t j|| j| _t j|| j| _t fddt jD | _d| _|   d S )Nr   c                    s   g | ]}t  qS r$   )r   )r5   r   rb   r$   r%   r9     s    z*MarianEncoder.__init__.<locals>.<listcomp>F)r-   r.   r^   Zencoder_layerdrop	layerdropr   r   r*   max_position_embeddingsZmax_source_positionsscale_embeddingmathsqrtembed_scaler   r   r   
vocab_sizer'   embed_positions
ModuleListr:   Zencoder_layerslayersgradient_checkpointing	post_init)r/   rb   r   r\   r0   r   r%   r.   z  s    
 zMarianEncoder.__init__c                 C      | j S r,   r   r/   r$   r$   r%   get_input_embeddings     z"MarianEncoder.get_input_embeddingsc                 C   
   || _ d S r,   r   r/   valuer$   r$   r%   set_input_embeddings     
z"MarianEncoder.set_input_embeddingsr   rq   	head_maskr   rs   output_hidden_statesreturn_dictr+   c                 C   sJ  |dur|n| j j}|dur|n| j j}|dur|n| j j}|dur*|dur*td|durA| || | }|d|d }n|durN| dd }ntd|du r^| || j	 }| 
|}	||	 }
tjj|
| j| jd}
|dur|t||j}|rdnd}|rdnd}|dur| d t| jksJ dt| j d	| d  d
t| jD ]X\}}|r||
f }d}| jrtg }|| jk rd}|rd}n-| jr| jr| |j|
||dur|| nd|}n||
||dur|| nd|d}|d }
|r||d f }q|r||
f }|stdd |
||fD S t|
||dS )a~  
        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
                provide it.

                Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
                [`PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
                Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
                This is useful if you want more control over how to convert `input_ids` indices into associated vectors
                than the model's internal embedding lookup matrix.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        NzDYou cannot specify both input_ids and inputs_embeds at the same timer    z5You have to specify either input_ids or inputs_embedsrv   r$   r   z&The head_mask should be specified for  layers, but it is for .FT)NN)rr   rs   r   c                 s       | ]	}|d ur|V  qd S r,   r$   r5   vr$   r$   r%   	<genexpr>  s    z(MarianEncoder.forward.<locals>.<genexpr>last_hidden_statern   
attentions)rb   rs   r   use_return_dictr#   Z%warn_if_padding_and_no_attention_maskry   rz   r   r   r   r   r   r^   rx   r   r<   lenr   	enumerater@   randr   r   _gradient_checkpointing_func__call__tupler   )r/   r   rq   r   r   rs   r   r   Zinput_shapeZ	embed_posrn   Zencoder_statesZall_attentionsidxZencoder_layerZto_dropdropout_probabilitylayer_outputsr$   r$   r%   rO     sz   .




zMarianEncoder.forwardr,   )NNNNNNN)rR   rS   rT   rU   r   r   r   r   r.   r   r   r@   
LongTensorrX   rB   r   r   r   r   rO   rY   r$   r$   r0   r%   r   p  s:    		r   c                       s   e Zd ZdZddedeej f fddZdd Z	d	d
 Z
													ddeej deej deej deej deej deej deeeej   deej dee dee dee dee deej deeej ef fddZ  ZS )MarianDecoderz
    Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MarianDecoderLayer`]

    Args:
        config: MarianConfig
        embed_tokens (nn.Embedding): output embedding
    Nrb   r   c                    s   t     j| _ j| _ j| _ j| _ j	rt
 jnd| _|d ur*|| _nt j j| j| _t j j| j| _t fddt jD | _d| _|   d S )Nr   c                    s   g | ]}t  |d qS ))rc   )r   )r5   ir   r$   r%   r9   3  s    z*MarianDecoder.__init__.<locals>.<listcomp>F)r-   r.   r^   Zdecoder_layerdropr   r   r*   r   Zmax_target_positionsr   r   r   r   r   r   r   r   decoder_vocab_sizer'   r   r   r:   Zdecoder_layersr   r   r   )r/   rb   r   r0   r   r%   r.   #  s    zMarianDecoder.__init__c                 C   r   r,   r   r   r$   r$   r%   r   9  r   z"MarianDecoder.get_input_embeddingsc                 C   r   r,   r   r   r$   r$   r%   r   <  r   z"MarianDecoder.set_input_embeddingsr   rq   r   r   r   cross_attn_head_maskr   r   r   rs   r   r   rt   r+   c           "      C   s  |
dur|
n| j j}
|dur|n| j j}|	dur|	n| j j}	|dur$|n| j j}| jr7| jr7|	r7td d}	|du |duA rCt	d|durP|
d|jd }|du rY| |}|| j }d}|	rst|tssd}td t|}| dd \}}|dur| nd}|du rtj||| |jd	}|du rt s|| }tj|||jd	}t|tr|jn|}| |||||
}|dur|durt||j|d
}| j||f||d}|| }tjj || j | jd}|rdnd}|
rdnd}|
r|durdnd}d}t!||gddgD ]+\}}|dur1| d t"| j#ks1J d| dt"| j# d| d  dqt$| j#D ]\}}|rD||f7 }| jrUt%g }|| j&k rUq8| jr| jr| '|j(|||||durn|| nd|durx|| ndd|
|	|} n ||||||dur|| nd|dur|| nd||
|	|d
} | d }|	r| |
rdnd }|
r|| d f7 }|dur|| d f7 }q8|r||f7 }|	r|nd}!|r|) }!|st*dd ||!|||fD S t+||!|||dS )a4  
        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
                provide it.

                Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
                [`PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
                Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
                of the decoder.
            encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
                Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
                selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
                cross-attention on hidden heads. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
                Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
                shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
                shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.

                Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
                cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.

                If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
                that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
                all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
                Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
                This is useful if you want more control over how to convert `input_ids` indices into associated vectors
                than the model's internal embedding lookup matrix.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
                cache in the correct position and to infer the complete sequence length.
        NzZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...FzTYou cannot specify both decoder_input_ids and decoder_inputs_embeds at the same timer    TzPassing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.r   r   )r   )rK   rv   r$   r   r  zThe `z` should be specified for r   r   )	rq   r   r   rr   r   rp   rs   r   rt   r   r   r2   c                 s   r   r,   r$   r   r$   r$   r%   r   )  s    z(MarianDecoder.forward.<locals>.<genexpr>)r   r   rn   r   cross_attentions),rb   rs   r   r   r   r   rx   rg   rh   r#   rz   r!   r   r   r{   r
   r   Zfrom_legacy_cachery   r   r@   rM   rL   r   Zonesr~   r   r   r<   r   r   r   r^   zipr   r   r   r   r   r   r   Zto_legacy_cacher   r   )"r/   r   rq   r   r   r   r  r   r   r   rs   r   r   rt   Zreturn_legacy_cacher   Z
seq_lengthrJ   Zmask_seq_lengthZself_attn_cacher   rK   rn   Zall_hidden_statesZall_self_attnsZall_cross_attentionsZnext_decoder_cacheZ	attn_maskZ	mask_namer   Zdecoder_layerr   r   Z
next_cacher$   r$   r%   rO   ?  s   T


	






zMarianDecoder.forwardr,   )NNNNNNNNNNNNN)rR   rS   rT   rU   r   r   r   r   r.   r   r   r@   r   rX   rB   r   r   r   r   rO   rY   r$   r$   r0   r%   r     s^    	
r   c                $       sN  e Zd ZddgZdef fddZdd Zdd	 Zd
d Zdd Z	dd Z
dd ZdedejfddZe																d)deej deej deej deej deej deej deej deeeej ef  deeeej   d eej d!eej d"ee d#ee d$ee d%ee d&eej def"d'd(Z  ZS )*MarianModelzencoder.embed_tokens.weightzdecoder.embed_tokens.weightrb   c                    s   t  | |j|j}}t||j|| _| jj	r | j }}nt
| j}t
| j}d | _t||| _t||| _|   d S r,   )r-   r.   r   r   r   r   r   sharedrb    share_encoder_decoder_embeddingscopydeepcopyr   encoderr   decoderr   )r/   rb   r*   r   Zencoder_embed_tokensZdecoder_embed_tokensr0   r$   r%   r.   ;  s   zMarianModel.__init__c                 C   s   |    S r,   )get_encoderr   r   r$   r$   r%   r   Q  s   z MarianModel.get_input_embeddingsc                 C   s2   | j jr|| _| j| j_| j| j_d S || j_d S r,   )rb   r  r  r	  r   r
  r   r$   r$   r%   r   U  s
   
z MarianModel.set_input_embeddingsc                 C   s   | j jrtd|   S )Nz`get_decoder_input_embeddings` should not be called if `config.share_encoder_decoder_embeddings` is `True`. Please use `get_input_embeddings` instead.)rb   r  r#   get_decoderr   r   r$   r$   r%   get_decoder_input_embeddings]  s
   z(MarianModel.get_decoder_input_embeddingsc                 C   s   | j jrtd|| j_d S )Na   `config.share_encoder_decoder_embeddings` is set to `True` meaning the decoder input embeddings are shared with the encoder. In order to set the decoder input embeddings, you should simply set the encoder input embeddings by calling `set_input_embeddings` with the appropriate embeddings.)rb   r  r#   r
  r   r   r$   r$   r%   set_decoder_input_embeddingse  s
   z(MarianModel.set_decoder_input_embeddingsc                 C   r   r,   )r	  r   r$   r$   r%   r  n  r   zMarianModel.get_encoderc                 C   r   r,   r
  r   r$   r$   r%   r  q  r   zMarianModel.get_decodernew_num_tokensr+   c                 C   sV   | j jrtd|  }| ||}| | |  }|d u r!|S || j _|   |S Nz`resize_decoder_token_embeddings` should not be called if `config.share_encoder_decoder_embeddings` is `True`. Please use `resize_token_embeddings` instead.)rb   r  r#   r  _get_resized_embeddingsr  r   tie_weights)r/   r  old_embeddingsnew_embeddingsmodel_embedsr$   r$   r%   resize_decoder_token_embeddingst  s   
z+MarianModel.resize_decoder_token_embeddingsNr   rq   r   decoder_attention_maskr   decoder_head_maskr  encoder_outputsr   r   decoder_inputs_embedsr   rs   r   r   rt   c                 C   s  |dur|n| j j}|dur|n| j j}|dur|n| j j}|dur$|n| j j}|du r9| j||||
|||d}n$|r]t|ts]t|d t|dkrN|d ndt|dkrY|d ndd}| j	|||d ||||	||||||d}|sw|| S t
|j|j|j|j|j|j|j|jdS )	a?  
        decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Indices of decoder input sequence tokens in the vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are decoder input IDs?](../glossary#decoder-input-ids)

            Marian uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
            `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
            `past_key_values`).
        decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
            be used by default.
        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
            1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, MarianModel

        >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de")
        >>> model = MarianModel.from_pretrained("Helsinki-NLP/opus-mt-en-de")

        >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
        >>> decoder_inputs = tokenizer(
        ...     "<pad> Studien haben gezeigt dass es hilfreich ist einen Hund zu besitzen",
        ...     return_tensors="pt",
        ...     add_special_tokens=False,
        ... )
        >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)

        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 26, 512]
        ```N)r   rq   r   r   rs   r   r   r   r   r2   r   r   rq   r   r   r   r  r   r   r   rs   r   r   rt   )r   r   decoder_hidden_statesdecoder_attentionsr  encoder_last_hidden_stater   encoder_attentions)rb   rs   r   r   r   r	  r{   r   r   r
  r   r   r   rn   r   r  )r/   r   rq   r   r  r   r  r  r  r   r   r  r   rs   r   r   rt   Zdecoder_outputsr$   r$   r%   rO     s`   >
zMarianModel.forward)NNNNNNNNNNNNNNNN)rR   rS   rT   _tied_weights_keysr   r.   r   r   r  r  r  r  rV   r   r   r  r   r   r@   r   rX   r   r   r   rB   r   r   rO   rY   r$   r$   r0   r%   r  7  s|    		
r  zX
    The Marian Model with a language modeling head. Can be used for summarization.
    )Zcustom_introc                &       s  e Zd ZdZg dZddgZg dZdef fddZd	d
 Z	dd Z
	d9dedee dedejf fddZd:dedejfddZdd ZdeddfddZdd ZdejfddZd d! Ze																	d;d"eej d#eej d$eej d%eej d&eej d'eej d(eej d)eeeej ef  d*eeeej   d+eej d,eej d-eej d.ee d/ee d0ee d1ee d2eej def$d3d4Z d-ejfd5d6Z!e"d7d8 Z#  Z$S )<MarianMTModelr   )final_logits_biaszencoder.embed_positions.weightzdecoder.embed_positions.weightz$model.encoder.embed_positions.weightz$model.decoder.embed_positions.weight)z!model.encoder.embed_tokens.weightz!model.decoder.embed_tokens.weightlm_head.weightrb   c                    s^   t  | t|| _|jr|jn|j}| dt	d|f t
j|j|dd| _|   d S )Nr#  r   Frd   )r-   r.   r  r   r  r   r   register_bufferr@   zerosr   ri   r   lm_headr   )r/   rb   Ztarget_vocab_sizer0   r$   r%   r.     s   
zMarianMTModel.__init__c                 C   
   | j  S r,   )r   r  r   r$   r$   r%   r    r   zMarianMTModel.get_encoderc                 C   r(  r,   )r   r  r   r$   r$   r%   r     r   zMarianMTModel.get_decoderNTr  pad_to_multiple_ofmean_resizingr+   c                    s&   t  |||}| jjr| | |S r,   )r-   resize_token_embeddingsrb   r  _resize_final_logits_bias)r/   r  r)  r*  r  r0   r$   r%   r+  #  s   
z%MarianMTModel.resize_token_embeddingsc                 G   s~   |   }| |||}| | |jjd }| jjr|| j_| jjr;|  d ur;| jj	s;|  }| 
||}| | |   S )Nr   )r   r  r   r>   r!   rb   r  r   get_output_embeddingstie_word_embeddings_get_resized_lm_headset_output_embeddings)r/   r  r)  argsr  r  old_lm_headnew_lm_headr$   r$   r%   _resize_token_embeddings,  s   

z&MarianMTModel._resize_token_embeddingsc                 C   s   | j jrtd| j }| ||}| j| |  d ur2| j js2|  }| 	||}| 
| | j }|d u r=|S || j _|   | | |S r  )rb   r  r#   r   r  r  r  r-  r.  r/  r0  r   r  r,  )r/   r  r  r  r2  r3  r  r$   r$   r%   r  B  s$   



z-MarianMTModel.resize_decoder_token_embeddingsc                 C   sj   | j jd }||kr| j d d d |f }ntjd|| f| j jd}tj| j |gdd}| d| d S )Nr    r   r   r;   r#  )r#  r!   r@   r&  rL   catr%  )r/   r  Zold_num_tokensZnew_biasZ
extra_biasr$   r$   r%   r,  b  s   z'MarianMTModel._resize_final_logits_biasc                 C   r   r,   r'  r   r$   r$   r%   r-  k  r   z#MarianMTModel.get_output_embeddingsr  c                 C   r   r,   r6  r/   r  r$   r$   r%   r0  n  r   z#MarianMTModel.set_output_embeddingsc                 C   s   |   }|durt| jddr|   }| || t| jddrCt| jddrCt| | jr5t| | j} | | j	| j
| jd}|| _|  D ]}t|drR|  qGdS )	z
        Tie the weights between the input embeddings and the output embeddings.

        If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the
        weights instead.
        Nr.  Tis_encoder_decoderFZtie_encoder_decoderr	  _tie_weights)r-  getattrrb   r  r   Z_tie_or_clone_weightshasattrr   Z_tie_encoder_decoder_weightsr	  r
  Z_dynamic_tied_weights_keysmodulesr9  )r/   Zoutput_embeddingsZword_embeddingsZtied_weightsr   r$   r$   r%   r  q  s    
zMarianMTModel.tie_weightsr   rq   r   r  r   r  r  r  r   r   r  labelsr   rs   r   r   rt   c                 C   s  |dur|n| j j}|dur)|rtd d}|du r)|du r)t|| j j| j j}| j|f||||||||	|
||||||d}| |d | j	 }d}|durat
 }||d| j j|d}|sw|f|dd  }|duru|f| S |S t|||j|j|j|j|j|j|jd	S )	uH	  
        decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Indices of decoder input sequence tokens in the vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are decoder input IDs?](../glossary#decoder-input-ids)

            Marian uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If
            `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
            `past_key_values`).
        decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
            be used by default.
        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
            1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, MarianMTModel

        >>> src = "fr"  # source language
        >>> trg = "en"  # target language

        >>> model_name = f"Helsinki-NLP/opus-mt-{src}-{trg}"
        >>> model = MarianMTModel.from_pretrained(model_name)
        >>> tokenizer = AutoTokenizer.from_pretrained(model_name)

        >>> sample_text = "où est l'arrêt de bus ?"
        >>> batch = tokenizer([sample_text], return_tensors="pt")

        >>> generated_ids = model.generate(**batch)
        >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
        "Where's the bus stop?"
        ```
        NzJThe `use_cache` argument is changed to `False` since `labels` is provided.F)rq   r   r  r  r   r  r  r   r   r  r   rs   r   r   rt   r   r    r   )	losslogitsr   r  r  r  r  r   r   )rb   r   rg   warningr&   r   r   r   r'  r#  r   rz   r   r   r   r  r  r  r  r   r   )r/   r   rq   r   r  r   r  r  r  r   r   r  r=  r   rs   r   r   rt   r   Z	lm_logitsZmasked_lm_lossloss_fctoutputr$   r$   r%   rO     s^   C
zMarianMTModel.forwardc                 C   s   t || jj| jjS r,   )r&   rb   r   r   )r/   r=  r$   r$   r%   %prepare_decoder_input_ids_from_labels  s   z3MarianMTModel.prepare_decoder_input_ids_from_labelsc                    sB   d}| D ]}|t  fdd|d d D |dd   f7 }q|S )Nr$   c                 3   $    | ]}| d  |jV  qdS rQ   Zindex_selectr   rL   r5   Z
past_statebeam_idxr$   r%   r        " z/MarianMTModel._reorder_cache.<locals>.<genexpr>r2   r   r   rH  Zreordered_pastZ
layer_pastr$   rG  r%   _reorder_cache  s   
zMarianMTModel._reorder_cache)NTr,   )NNNNNNNNNNNNNNNNN)%rR   rS   rT   r   Z_keys_to_ignore_on_load_missingZ_keys_to_ignore_on_saver!  r   r.   r  r  rV   r   r   r   r   r+  r4  r  r,  r-  r0  r  r   r@   r   rX   r   r   r   rB   r   rO   rC  r   rL  rY   r$   r$   r0   r%   r"    s    	 		
vr"  c                       s(   e Zd ZdZ fddZdd Z  ZS )MarianDecoderWrapperz
    This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
    used in combination with the [`EncoderDecoderModel`] framework.
    c                    s   t  | t|| _d S r,   )r-   r.   r   r
  r/   rb   r0   r$   r%   r.     s   zMarianDecoderWrapper.__init__c                 O   s   | j |i |S r,   r  )r/   r1  r   r$   r$   r%   rO     s   zMarianDecoderWrapper.forward)rR   rS   rT   rU   r.   rO   rY   r$   r$   r0   r%   rM    s    rM  c                "       s  e Zd ZdgZ fddZdd Zdd Zdd	 Zd
d Zdd Z	dd Z
e														d$deej deej deej deej deej deej deeej  deej deej dee dee dee dee deej deeef fd d!Zed"d# Z  ZS )%MarianForCausalLMr$  c                    sN   t |}d|_d|_t | t|| _tj	|j
|jdd| _|   d S )NTFrd   )r  r  r_   r8  r-   r.   rM  r   r   ri   Zhidden_sizer   r'  r   rN  r0   r$   r%   r.   &  s   

zMarianForCausalLM.__init__c                 C   s
   | j jjS r,   r   r
  r   r   r$   r$   r%   r   2  r   z&MarianForCausalLM.get_input_embeddingsc                 C   s   || j j_d S r,   rP  r   r$   r$   r%   r   5  s   z&MarianForCausalLM.set_input_embeddingsc                 C   r   r,   r6  r   r$   r$   r%   r-  8  r   z'MarianForCausalLM.get_output_embeddingsc                 C   r   r,   r6  r7  r$   r$   r%   r0  ;  r   z'MarianForCausalLM.set_output_embeddingsc                 C   s   || j _d S r,   r   r
  )r/   r
  r$   r$   r%   set_decoder>  s   zMarianForCausalLM.set_decoderc                 C   s   | j jS r,   rQ  r   r$   r$   r%   r  A  s   zMarianForCausalLM.get_decoderNr   rq   r   r   r   r  r   r   r=  r   rs   r   r   rt   r+   c                 C   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}| jj|||||||||
||||d}| |d }d}|	durU|	|j}	t	 }||
d| j j|	
d}|sk|f|dd  }|duri|f| S |S t|||j|j|j|jdS )a  
        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> from transformers import AutoTokenizer, MarianForCausalLM

        >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-fr-en")
        >>> model = MarianForCausalLM.from_pretrained("Helsinki-NLP/opus-mt-fr-en", add_cross_attention=False)
        >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
        >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
        >>> outputs = model(**inputs)

        >>> logits = outputs.logits
        >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
        >>> list(logits.shape) == expected_shape
        True
        ```Nr  r   r    r   )r>  r?  r   rn   r   r  )rb   rs   r   r   r   r
  r'  r   rL   r   rz   r   r   r   rn   r   r  )r/   r   rq   r   r   r   r  r   r   r=  r   rs   r   r   rt   r   r?  r>  rA  rB  r$   r$   r%   rO   D  sH   .zMarianForCausalLM.forwardc                    s.   d}| D ]}|t  fdd|D f7 }q|S )Nr$   c                 3   rD  rQ   rE  rF  rG  r$   r%   r     rI  z3MarianForCausalLM._reorder_cache.<locals>.<genexpr>rJ  rK  r$   rG  r%   rL    s   z MarianForCausalLM._reorder_cache)NNNNNNNNNNNNNN)rR   rS   rT   r!  r.   r   r   r-  r0  rR  r  r   r   r@   r   rX   rB   r   r   r   r   r   rO   r   rL  rY   r$   r$   r0   r%   rO  #  sr    	

YrO  )rO  r  r"  r   )ArU   r  r   typingr   r   r   r   numpyr3   r@   Ztorch.utils.checkpointr   Ztorch.nnr   Zactivationsr	   Zcache_utilsr
   r   Z
generationr   Zmodeling_attn_mask_utilsr   r   Zmodeling_outputsr   r   r   r   r   Zmodeling_utilsr   utilsr   r   r   r   Zconfiguration_marianr   Z!torch.nn.attention.flex_attentionr   Zintegrations.flex_attentionr   Z
get_loggerrR   rg   rX   rV   r&   r   r'   ModulerZ   r   r   r   r   r   r   r  r"  rM  rO  __all__r$   r$   r$   r%   <module>   sb   
# Gv ! +   L   