o
    Zh                     @   s  d dl mZmZmZ d dlmZ d dlZd dlm	Z
 d dl	Zd dlmZmZmZ d dlmZmZ d dlmZ d dlmZ d dlmZmZ d dlmZ d	d
lmZmZmZmZmZm Z m!Z!m"Z"m#Z# d	dl$m%Z%m&Z&m'Z'm(Z( d	dl)m*Z*m+Z+m,Z, ddl-m.Z. e,/e0Z1dZ2dZ3ej4Z4dd Z5dZ6dZ7G dd dej8Z9G dd dej8Z:G dd dej8Z;G dd dej8Z<G dd dej8Z=G dd  d ej8Z>G d!d" d"ej8Z?G d#d$ d$ej8Z@G d%d& d&ej8ZAG d'd( d(ej8ZBG d)d* d*ej8ZCG d+d, d,ej8ZDG d-d. d.e&ZEG d/d0 d0ej8ZFe*d1e6G d2d3 d3eEZGe'eGe2ee3 G d4d5 d5ej8ZHe*d6e6G d7d8 d8eEZIe'eIe2ee3d9d: G d;d< d<ej8ZJe*d=e6G d>d? d?eEZKe'eKe2e"e3 G d@dA dAej8ZLe*dBe6G dCdD dDeEZMe(eMe7NdE e'eMe2e e3 G dFdG dGej8ZOe*dHe6G dIdJ dJeEZPe'ePe2e#e3 G dKdL dLej8ZQe*dMe6G dNdO dOeEZRe'eRe2e!e3 G dPdQ dQej8ZSe*dRe6G dSdT dTeEZTe'eTe2ee3 g dUZUdS )V    )CallableOptionalTupleN)
FrozenDictfreezeunfreeze)combine_masksmake_causal_mask)partitioning)dot_product_attention_weights)flatten_dictunflatten_dict)lax   )	-FlaxBaseModelOutputWithPastAndCrossAttentionsFlaxBaseModelOutputWithPooling0FlaxBaseModelOutputWithPoolingAndCrossAttentions%FlaxCausalLMOutputWithCrossAttentionsFlaxMaskedLMOutputFlaxMultipleChoiceModelOutput FlaxQuestionAnsweringModelOutputFlaxSequenceClassifierOutputFlaxTokenClassifierOutput)ACT2FNFlaxPreTrainedModelappend_call_sample_docstringoverwrite_call_docstring)add_start_docstrings%add_start_docstrings_to_model_forwardlogging   )RobertaConfigzFacebookAI/roberta-baser!   c                 C   sx   | |k d}|jdkr)|d|jd f}tj|dd d| }|| j}ntj|dd d| }| d| S )a!  
    Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
    are ignored. This is modified from fairseq's `utils.make_positions`.

    Args:
        input_ids: jnp.ndarray
        padding_idx: int

    Returns: jnp.ndarray
    i4   r    Zaxis)astypendimreshapeshapejnpcumsum)	input_idsZpadding_idxmaskZincremental_indices r.   `/var/www/auris/lib/python3.10/site-packages/transformers/models/roberta/modeling_flax_roberta.py"create_position_ids_from_input_ids4   s   
r0   a   

    This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading, saving and converting weights from PyTorch models)

    This model is also a
    [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
    a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
    behavior.

    Finally, this model supports inherent JAX features such as:

    - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
    - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
    - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
    - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)

    Parameters:
        config ([`RobertaConfig`]): Model configuration class with all the parameters of the
            model. Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
a  
    Args:
        input_ids (`numpy.ndarray` of shape `({0})`):
            Indices of input sequence tokens in the vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
            1]`:

            - 0 corresponds to a *sentence A* token,
            - 1 corresponds to a *sentence B* token.

            [What are token type IDs?](../glossary#token-type-ids)
        position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.
        head_mask (`numpy.ndarray` of shape `({0})`, `optional):
            Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
c                   @   sB   e Zd ZU dZeed< ejZejed< dd Z	dde
fdd	Zd
S )FlaxRobertaEmbeddingszGConstruct the embeddings from word, position and token_type embeddings.configdtypec                 C   s   t j| jj| jjtj jj| jjd| j	d| _
t j| jj| jjtj jj| jjd| j	d| _t j| jj| jjtj jj| jjd| j	d| _t j| jj| j	d| _t j| jjd| _d S )N)Zstddev)Zembedding_initr3   epsilonr3   Zrate)nnZEmbedr2   
vocab_sizehidden_sizejaxinitializersnormalinitializer_ranger3   word_embeddingsmax_position_embeddingsposition_embeddingsZtype_vocab_sizetoken_type_embeddings	LayerNormlayer_norm_epsDropouthidden_dropout_probdropoutselfr.   r.   r/   setup   s(   zFlaxRobertaEmbeddings.setupTdeterministicc           
      C   sX   |  |d}| |d}| |d}|| | }	| |	}	| j|	|d}	|	S )Nr"   rJ   )r>   r&   r@   rA   rB   rF   )
rH   r,   token_type_idsposition_idsattention_maskrJ   Zinputs_embedsZposition_embedsrA   hidden_statesr.   r.   r/   __call__   s   
zFlaxRobertaEmbeddings.__call__NT)__name__
__module____qualname____doc__r!   __annotations__r*   float32r3   rI   boolrP   r.   r.   r.   r/   r1      s   
 r1   c                   @   s~   e Zd ZU eed< dZeed< ejZ	ej	ed< dd Z
dd Zd	d
 Zejdd Z				ddeej dedefddZdS )FlaxRobertaSelfAttentionr2   Fcausalr3   c                 C   s   | j j| j j | _| j j| j j dkrtdtj| j j| jtjj	
| j jd| _tj| j j| jtjj	
| j jd| _tj| j j| jtjj	
| j jd| _| jrettjd| j jfdddd| _d S d S )Nr   z`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads`                    : {self.config.num_attention_heads}r3   kernel_initr    rX   r3   )r2   r9   num_attention_headshead_dim
ValueErrorr7   Denser3   r:   r;   r<   r=   querykeyvaluerZ   r	   r*   onesr?   causal_maskrG   r.   r.   r/   rI      s2   zFlaxRobertaSelfAttention.setupc                 C   s"   | |jd d | jj| jf S Nr#   )r(   r)   r2   r^   r_   rH   rO   r.   r.   r/   _split_heads   s   "z%FlaxRobertaSelfAttention._split_headsc                 C   s   | |jd d | jjf S rg   )r(   r)   r2   r9   rh   r.   r.   r/   _merge_heads   s   z%FlaxRobertaSelfAttention._merge_headsc                 C   s   |  dd}| ddtj|j|j}| ddtj|j|j}| dddd }|rz|jj^ }	}
}}|j}dt|	 |ddf }t	|j||}t	|j||}||_||_|jd	 }|j| |_t
t|
|| k t|	d	||
f }t||}|||fS )
a\  
        This function takes projected key, value states from a single input token and concatenates the states to cached
        states from previous steps. This function is slightly adapted from the official Flax repository:
        https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
        cache
cached_keycached_valuecache_indexc                   S   s   t jdt jdS )Nr   r]   )r*   arrayZint32r.   r.   r.   r/   <lambda>   s    z@FlaxRobertaSelfAttention._concatenate_to_cache.<locals>.<lambda>)r   r   r    )has_variablevariabler*   zerosr)   r3   rd   lenr   dynamic_update_slicebroadcast_toarangetupler   )rH   rc   rd   rb   rN   Zis_initializedrl   rm   rn   Z
batch_dims
max_lengthZ	num_headsZdepth_per_headZ	cur_indexindicesZnum_updated_cache_vectorsZpad_maskr.   r.   r/   _concatenate_to_cache   s(   	


z.FlaxRobertaSelfAttention._concatenate_to_cacheNTkey_value_states
init_cacheoutput_attentionsc                 C   sx  |d u}|j d }	| |}
|r| |}| |}n
| |}| |}| |
}
| |}| |}| jr|
j d |j d }}| ddrj| jd d }| jd d j d }t	| j
dd|dfdd||f}n| j
d d d d d |d |f }t||	f|j dd   }|d ur| jrttj|dd|j }t||}n| jr|}n|d urtj|dd}| jr| dds|r| |||
|\}}}|d urt|dkt|j d| jt|j t| jj| j}nd }d }|s| jjdkr| d	}t|
|||| jjd
|| jd d	}|d urtd||}td||}||j d d d }|r7||f}|S |f}|S )Nr   r    rk   rl   rn   )r%   g        rF   T)biasdropout_rngZdropout_rateZbroadcast_dropoutrJ   r3   	precisionz...hqk,h->...hqkz...hqk,...khd->...qhdr#   )r$   )r)   rb   rc   rd   ri   rZ   rq   	variablesr   Zdynamic_slicerf   r*   rv   Zexpand_dimsr   r{   selectfullr&   r3   Zfinfominr2   Zattention_probs_dropout_probZmake_rngr   Zeinsumr(   )rH   rO   rN   layer_head_maskr|   r}   rJ   r~   Zis_cross_attention
batch_sizeZquery_statesZ
key_statesZvalue_statesZquery_lengthZ
key_lengthZ
mask_shiftZmax_decoder_lengthrf   Zattention_biasr   Zattn_weightsattn_outputoutputsr.   r.   r/   rP      sz   







"


z!FlaxRobertaSelfAttention.__call__NFTF)rR   rS   rT   r!   rV   rZ   rX   r*   rW   r3   rI   ri   rj   r7   compactr{   r   ndarrayrP   r.   r.   r.   r/   rY      s(   
 
$rY   c                   @   >   e Zd ZU eed< ejZejed< dd Zd
de	fddZ
d	S )FlaxRobertaSelfOutputr2   r3   c                 C   sR   t j| jjtj j| jj| jd| _	t j
| jj| jd| _
t j| jjd| _d S )Nr\   r3   r4   r6   )r7   ra   r2   r9   r:   r;   r<   r=   r3   denserB   rC   rD   rE   rF   rG   r.   r.   r/   rI   f  s   zFlaxRobertaSelfOutput.setupTrJ   c                 C   *   |  |}| j||d}| || }|S NrK   r   rF   rB   )rH   rO   Zinput_tensorrJ   r.   r.   r/   rP   o     
zFlaxRobertaSelfOutput.__call__NrQ   rR   rS   rT   r!   rV   r*   rW   r3   rI   rX   rP   r.   r.   r.   r/   r   b  
   
 	r   c                   @   sR   e Zd ZU eed< dZeed< ejZ	ej	ed< dd Z
				dd	efd
dZdS )FlaxRobertaAttentionr2   FrZ   r3   c                 C   s,   t | j| j| jd| _t| j| jd| _d S )NrZ   r3   r]   )rY   r2   rZ   r3   rH   r   outputrG   r.   r.   r/   rI   |  s   zFlaxRobertaAttention.setupNTr~   c              	   C   sL   | j |||||||d}|d }	| j|	||d}|f}
|r$|
|d f7 }
|
S )N)r   r|   r}   rJ   r~   r   rK   r    )rH   r   )rH   rO   rN   r   r|   r}   rJ   r~   Zattn_outputsr   r   r.   r.   r/   rP     s   	zFlaxRobertaAttention.__call__r   )rR   rS   rT   r!   rV   rZ   rX   r*   rW   r3   rI   rP   r.   r.   r.   r/   r   w  s   
 	r   c                   @   6   e Zd ZU eed< ejZejed< dd Zdd Z	dS )FlaxRobertaIntermediater2   r3   c                 C   s8   t j| jjtj j| jj| jd| _	t
| jj | _d S Nr   )r7   ra   r2   Zintermediate_sizer:   r;   r<   r=   r3   r   r   Z
hidden_act
activationrG   r.   r.   r/   rI     s   zFlaxRobertaIntermediate.setupc                 C   s   |  |}| |}|S N)r   r   rh   r.   r.   r/   rP     s   

z FlaxRobertaIntermediate.__call__N
rR   rS   rT   r!   rV   r*   rW   r3   rI   rP   r.   r.   r.   r/   r     s
   
 r   c                   @   r   )FlaxRobertaOutputr2   r3   c                 C   sR   t j| jjtj j| jj| jd| _	t j
| jjd| _t j| jj| jd| _d S )Nr   r6   r4   )r7   ra   r2   r9   r:   r;   r<   r=   r3   r   rD   rE   rF   rB   rC   rG   r.   r.   r/   rI     s   zFlaxRobertaOutput.setupTrJ   c                 C   r   r   r   )rH   rO   attention_outputrJ   r.   r.   r/   rP     r   zFlaxRobertaOutput.__call__NrQ   r   r.   r.   r.   r/   r     r   r   c                   @   sd   e Zd ZU eed< ejZejed< dd Z					dde	ej
 d	e	ej
 d
ededef
ddZdS )FlaxRobertaLayerr2   r3   c                 C   s`   t | j| jj| jd| _t| j| jd| _t| j| jd| _| jj	r.t | jd| jd| _
d S d S )Nr   r]   F)r   r2   Z
is_decoderr3   	attentionr   intermediater   r   add_cross_attentioncrossattentionrG   r.   r.   r/   rI     s   zFlaxRobertaLayer.setupNFTencoder_hidden_statesencoder_attention_maskr}   rJ   r~   c	                 C   s   | j ||||||d}	|	d }
|d ur"| j|
|||||d}|d }
| |
}| j||
|d}|f}|rF||	d f7 }|d urF||d f7 }|S )N)r   r}   rJ   r~   r   )rN   r   r|   rJ   r~   rK   r    )r   r   r   r   )rH   rO   rN   r   r   r   r}   rJ   r~   Zattention_outputsr   Zcross_attention_outputsr   r.   r.   r/   rP     s6   
zFlaxRobertaLayer.__call__)NNFTF)rR   rS   rT   r!   rV   r*   rW   r3   rI   r   r   rX   rP   r.   r.   r.   r/   r     s(   
 	r   c                   @   |   e Zd ZU eed< ejZejed< dZe	ed< dd Z
							dd	eej d
eej de	de	de	de	de	fddZdS )FlaxRobertaLayerCollectionr2   r3   Fgradient_checkpointingc                    sT   j rttdd  fddtjjD _d S fddtjjD _d S )N)         )Zstatic_argnumsc                    s"   g | ]} j t|jd qS )namer3   )r2   strr3   .0iZFlaxRobertaCheckpointLayerrH   r.   r/   
<listcomp>      z4FlaxRobertaLayerCollection.setup.<locals>.<listcomp>c                    s"   g | ]}t  jt| jd qS r   )r   r2   r   r3   r   rG   r.   r/   r     r   )r   rematr   ranger2   num_hidden_layerslayersrG   r.   r   r/   rI   	  s   


z FlaxRobertaLayerCollection.setupNTr   r   r}   rJ   r~   output_hidden_statesreturn_dictc              
   C   s"  |rdnd }|	r
dnd }|r|d urdnd }|d ur5|j d t| jkr5tdt| j d|j d  dt| jD ]6\}}|	rE||f7 }||||d urP|| nd |||||}|d }|rp||d f7 }|d urp||d f7 }q:|	rx||f7 }||||f}|
stdd	 |D S t||||d
S )Nr.   r   z&The head_mask should be specified for z/ layers, but it is for                         .r    r#   c                 s   s    | ]	}|d ur|V  qd S r   r.   )r   vr.   r.   r/   	<genexpr>L  s    z6FlaxRobertaLayerCollection.__call__.<locals>.<genexpr>)last_hidden_staterO   
attentionscross_attentions)r)   rt   r   r`   	enumeraterx   r   )rH   rO   rN   	head_maskr   r   r}   rJ   r~   r   r   Zall_attentionsZall_hidden_statesZall_cross_attentionsr   layerZlayer_outputsr   r.   r.   r/   rP     sP   

z#FlaxRobertaLayerCollection.__call__NNFTFFTrR   rS   rT   r!   rV   r*   rW   r3   r   rX   rI   r   r   rP   r.   r.   r.   r/   r     s6   
 	
r   c                   @   r   )FlaxRobertaEncoderr2   r3   Fr   c                 C   s   t | j| j| jd| _d S )Nr3   r   )r   r2   r3   r   r   rG   r.   r.   r/   rI   \  s
   zFlaxRobertaEncoder.setupNTr   r   r}   rJ   r~   r   r   c                 C   s   | j |||||||||	|
d
S )N)r   r   r   r}   rJ   r~   r   r   )r   )rH   rO   rN   r   r   r   r}   rJ   r~   r   r   r.   r.   r/   rP   c  s   zFlaxRobertaEncoder.__call__r   r   r.   r.   r.   r/   r   W  s6   
 	
r   c                   @   r   )FlaxRobertaPoolerr2   r3   c                 C   s*   t j| jjtj j| jj| jd| _	d S r   )
r7   ra   r2   r9   r:   r;   r<   r=   r3   r   rG   r.   r.   r/   rI     s
   zFlaxRobertaPooler.setupc                 C   s$   |d d df }|  |}t|S )Nr   )r   r7   tanh)rH   rO   Zcls_hidden_stater.   r.   r/   rP     s   

zFlaxRobertaPooler.__call__Nr   r.   r.   r.   r/   r     s
   
 r   c                   @   sT   e Zd ZU eed< ejZejed< ej	j
jZedejf ed< dd Zd
dd	ZdS )FlaxRobertaLMHeadr2   r3   .	bias_initc                 C   s   t j| jj| jtj j| jjd| _	t j
| jj| jd| _t j| jj| jdtj j| jjd| _| d| j| jjf| _d S )Nr[   r4   F)r3   Zuse_biasr\   r   )r7   ra   r2   r9   r3   r:   r;   r<   r=   r   rB   rC   
layer_normr8   decoderparamr   r   rG   r.   r.   r/   rI     s   zFlaxRobertaLMHead.setupNc                 C   sh   |  |}td |}| |}|d ur!| jdd|jii|}n| |}t| j| j	}||7 }|S )NZgeluparamsZkernel)
r   r   r   r   applyTr*   Zasarrayr   r3   )rH   rO   shared_embeddingr   r.   r.   r/   rP     s   


zFlaxRobertaLMHead.__call__r   )rR   rS   rT   r!   rV   r*   rW   r3   r:   r7   r;   rs   r   r   npr   rI   rP   r.   r.   r.   r/   r     s   
 r   c                   @   s8   e Zd ZU eed< ejZejed< dd Zd	ddZ	dS )
FlaxRobertaClassificationHeadr2   r3   c                 C   sz   t j| jj| jtj j| jjd| _	| jj
d ur| jj
n| jj}t j|d| _t j| jj| jtj j| jjd| _d S )Nr[   r6   )r7   ra   r2   r9   r3   r:   r;   r<   r=   r   classifier_dropoutrE   rD   rF   
num_labelsout_projrH   r   r.   r.   r/   rI     s   z#FlaxRobertaClassificationHead.setupTc                 C   sT   |d d dd d f }| j ||d}| |}t|}| j ||d}| |}|S )Nr   rK   )rF   r   r7   r   r   )rH   rO   rJ   r.   r.   r/   rP     s   


z&FlaxRobertaClassificationHead.__call__NrQ   r   r.   r.   r.   r/   r     s
   
 r   c                       s   e Zd ZU dZeZdZdZej	e
d< ddejddfd	ed
ededejdedef fddZdd Zd#dejjd
ededefddZdd Zeed													d$dee dejjdedee dee dee d ee fd!d"Z  ZS )%FlaxRobertaPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    robertaNmodule_class)r    r    r   TFr2   input_shapeseedr3   _do_initr   c           	         s4   | j d|||d|}t j||||||d d S )Nr2   r3   r   )r   r   r3   r   r.   )r   super__init__)	rH   r2   r   r   r3   r   r   kwargsmodule	__class__r.   r/   r     s   
z#FlaxRobertaPreTrainedModel.__init__c                 C   s   | j | j| jdd| _d S )NTr   )r   r2   r3   _modulerG   r.   r.   r/   enable_gradient_checkpointing  s
   z8FlaxRobertaPreTrainedModel.enable_gradient_checkpointingrngr   returnc                 C   s  t j|dd}t |}t|| jj}t |}t | jj| jjf}t	j
|\}	}
|	|
d}| jjrPt || jjf }|}| jj||||||||dd	}n| jj||||||dd}|d }|d urtt|}tt|}| jD ]}|| ||< qtt | _tt|S |S )Nr"   r]   )r   rF   F)r   r   )r*   rs   	ones_liker0   r2   pad_token_idre   r   r^   r:   randomsplitr   r9   r   initr   r   Z_missing_keyssetr   r   )rH   r   r   r   r,   rL   rM   rN   r   Z
params_rngr   rngsr   r   Zmodule_init_outputsZrandom_paramsZmissing_keyr.   r.   r/   init_weights  sB   



z'FlaxRobertaPreTrainedModel.init_weightsc                 C   sl   t j||fdd}t j|dd}t t t |jd |j}| jjt	j
d|||ddd}t|d S )	aW  
        Args:
            batch_size (`int`):
                batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
            max_length (`int`):
                maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
                cache.
        r"   r]   r$   r   FT)r   r}   rk   )r*   re   r   rv   rw   
atleast_2dr)   r   r   r:   r   PRNGKeyr   )rH   r   ry   r,   rN   rM   Zinit_variablesr.   r.   r/   r}     s   
 z%FlaxRobertaPreTrainedModel.init_cachezbatch_size, sequence_lengthr   trainr~   r   r   past_key_valuesc                 C   s  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u r't|}|d u r2t|| j j}|d u r;t|}|d u rJt	| j j
| j jf}i }|	d urT|	|d< d|pY| ji}| j jr|ri||d< dg}nd}| jj|tj|ddtj|ddtj|ddtj|ddtj|dd|||
 |||||d}|d ur|r|\}}t|d |d< |S |d ur|s|\}}|d d	 t|d f |d	d   }|S | jj|tj|ddtj|ddtj|ddtj|ddtj|dd|
 ||||d
}|S )NrF   r   rk   Fr"   r]   )rL   rM   r   r   r   rJ   r~   r   r   r   mutabler   r    )rL   rM   r   rJ   r~   r   r   r   )r2   r~   r   r   r*   
zeros_liker0   r   r   re   r   r^   r   r   r   r   ro   r   )rH   r,   rN   rL   rM   r   r   r   r   r   r   r~   r   r   r   r   Zinputsr   r   r.   r.   r/   rP   1  sv   

&z#FlaxRobertaPreTrainedModel.__call__r   )NNNNNNNNFNNNN) rR   rS   rT   rU   r!   Zconfig_classZbase_model_prefixr   r7   ModulerV   r*   rW   r   intr3   rX   r   r   r:   r   r   r   r   r}   r   ROBERTA_INPUTS_DOCSTRINGformatr   dictrP   __classcell__r.   r.   r   r/   r     sn   
  +	
r   c                   @   s   e Zd ZU eed< ejZejed< dZe	ed< dZ
e	ed< dd Z															dd
eej deej deej deej deej de	de	de	de	de	fddZd	S )FlaxRobertaModuler2   r3   Tadd_pooling_layerFr   c                 C   s>   t | j| jd| _t| j| j| jd| _t| j| jd| _d S )Nr]   r   )	r1   r2   r3   
embeddingsr   r   encoderr   poolerrG   r.   r.   r/   rI     s   zFlaxRobertaModule.setupNrL   rM   r   r   r   r}   rJ   r~   r   r   c                 C   s   |d u r	t |}|d u rt t t |jd |j}| j|||||	d}| j||||	||||
||d
}|d }| jrB| 	|nd }|s]|d u rS|f|dd   S ||f|dd   S t
|||j|j|jdS )Nr$   rK   )r   rJ   r   r   r}   r~   r   r   r   r    )r   Zpooler_outputrO   r   r   )r*   r   rv   rw   r   r)   r  r  r  r  r   rO   r   r   )rH   r,   rN   rL   rM   r   r   r   r}   rJ   r~   r   r   rO   r   Zpooledr.   r.   r/   rP     s@   
 
zFlaxRobertaModule.__call__)
NNNNNFTFFT)rR   rS   rT   r!   rV   r*   rW   r3   r  rX   r   rI   r   r   rP   r.   r.   r.   r/   r    sJ   
 	
r  zaThe bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.c                   @      e Zd ZeZdS )FlaxRobertaModelN)rR   rS   rT   r  r   r.   r.   r.   r/   r	    s    r	  c                	   @   ^   e Zd ZU eed< ejZejed< dZe	ed< dd Z
				dde	d	e	d
e	de	fddZdS )FlaxRobertaForMaskedLMModuler2   r3   Fr   c                 C   .   t | jd| j| jd| _t| j| jd| _d S NF)r2   r  r3   r   r2   r3   r  r2   r3   r   r   r   lm_headrG   r.   r.   r/   rI        z"FlaxRobertaForMaskedLMModule.setupTrJ   r~   r   r   c
                 C   s   | j |||||||||	d	}
|
d }| jjr#| j jd d d d }nd }| j||d}|	s7|f|
dd   S t||
j|
jd	S )
NrJ   r~   r   r   r   r   r  r>   	embeddingr   r    logitsrO   r   )r   r2   tie_word_embeddingsr   r  r   rO   r   )rH   r,   rN   rL   rM   r   rJ   r~   r   r   r   rO   r   r  r.   r.   r/   rP     s.   z%FlaxRobertaForMaskedLMModule.__call__NTFFTrR   rS   rT   r!   rV   r*   rW   r3   r   rX   rI   rP   r.   r.   r.   r/   r    $   
 	
r  z5RoBERTa Model with a `language modeling` head on top.c                   @   r  )FlaxRobertaForMaskedLMN)rR   rS   rT   r  r   r.   r.   r.   r/   r     s    r  z<mask>)r-   c                	   @   r
  )*FlaxRobertaForSequenceClassificationModuler2   r3   Fr   c                 C   s.   t | j| jd| jd| _t| j| jd| _d S )NFr2   r3   r  r   r  )r  r2   r3   r   r   r   
classifierrG   r.   r.   r/   rI   3  s   z0FlaxRobertaForSequenceClassificationModule.setupTrJ   r~   r   r   c
                 C   sZ   | j |||||||||	d	}
|
d }| j||d}|	s$|f|
dd   S t||
j|
jdS Nr  r   rK   r    r  )r   r  r   rO   r   )rH   r,   rN   rL   rM   r   rJ   r~   r   r   r   Zsequence_outputr  r.   r.   r/   rP   <  s(   z3FlaxRobertaForSequenceClassificationModule.__call__Nr  r  r.   r.   r.   r/   r  .  r  r  z
    Roberta Model transformer with a sequence classification/regression head on top (a linear layer on top of the
    pooled output) e.g. for GLUE tasks.
    c                   @   r  )$FlaxRobertaForSequenceClassificationN)rR   rS   rT   r  r   r.   r.   r.   r/   r   b      r   c                	   @   r
  )"FlaxRobertaForMultipleChoiceModuler2   r3   Fr   c                 C   s>   t | j| j| jd| _tj| jjd| _tj	d| jd| _
d S )Nr   r6   r    r]   )r  r2   r3   r   r   r7   rD   rE   rF   ra   r  rG   r.   r.   r/   rI   {  s   z(FlaxRobertaForMultipleChoiceModule.setupTrJ   r~   r   r   c
                 C   s   |j d }
|d ur|d|j d nd }|d ur!|d|j d nd }|d ur0|d|j d nd }|d ur?|d|j d nd }| j|||||||||	d	}|d }| j||d}| |}|d|
}|	sp|f|dd   S t||j|jdS )Nr    r$   r  rK   r#   r  )r)   r(   r   rF   r  r   rO   r   )rH   r,   rN   rL   rM   r   rJ   r~   r   r   Znum_choicesr   Zpooled_outputr  Zreshaped_logitsr.   r.   r/   rP     s6   

z+FlaxRobertaForMultipleChoiceModule.__call__Nr  r  r.   r.   r.   r/   r"  v  r  r"  z
    Roberta Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
    softmax) e.g. for RocStories/SWAG tasks.
    c                   @   r  )FlaxRobertaForMultipleChoiceN)rR   rS   rT   r"  r   r.   r.   r.   r/   r#    r!  r#  z(batch_size, num_choices, sequence_lengthc                	   @   r
  )'FlaxRobertaForTokenClassificationModuler2   r3   Fr   c                 C   s\   t | j| jd| jd| _| jjd ur| jjn| jj}tj|d| _	tj
| jj| jd| _d S )NFr  r6   r]   )r  r2   r3   r   r   r   rE   r7   rD   rF   ra   r   r  r   r.   r.   r/   rI     s   z-FlaxRobertaForTokenClassificationModule.setupTrJ   r~   r   r   c
                 C   sd   | j |||||||||	d	}
|
d }| j||d}| |}|	s)|f|
dd   S t||
j|
jdS r  )r   rF   r  r   rO   r   )rH   r,   rN   rL   rM   r   rJ   r~   r   r   r   rO   r  r.   r.   r/   rP     s*   
z0FlaxRobertaForTokenClassificationModule.__call__Nr  r  r.   r.   r.   r/   r$    s$   
 	
r$  z
    Roberta Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
    Named-Entity-Recognition (NER) tasks.
    c                   @   r  )!FlaxRobertaForTokenClassificationN)rR   rS   rT   r$  r   r.   r.   r.   r/   r%    r!  r%  c                	   @   r
  )%FlaxRobertaForQuestionAnsweringModuler2   r3   Fr   c                 C   s2   t | j| jd| jd| _tj| jj| jd| _d S )NFr  r]   )	r  r2   r3   r   r   r7   ra   r   
qa_outputsrG   r.   r.   r/   rI     s   z+FlaxRobertaForQuestionAnsweringModule.setupTrJ   r~   r   r   c
                 C   s   | j |||||||||	d	}
|
d }| |}tj|| jjdd\}}|d}|d}|	s9||f|
dd   S t|||
j|
j	dS )Nr  r   r$   r%   r    )start_logits
end_logitsrO   r   )
r   r'  r*   r   r2   r   Zsqueezer   rO   r   )rH   r,   rN   rL   rM   r   rJ   r~   r   r   r   rO   r  r(  r)  r.   r.   r/   rP   '  s0   


z.FlaxRobertaForQuestionAnsweringModule.__call__Nr  r  r.   r.   r.   r/   r&    r  r&  z
    Roberta Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
    layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
    c                   @   r  )FlaxRobertaForQuestionAnsweringN)rR   rS   rT   r&  r   r.   r.   r.   r/   r*  R  r!  r*  c                   @   s   e Zd ZU eed< ejZejed< dZe	ed< dd Z
									dd	eej d
eej deej deej de	de	de	de	de	fddZdS )FlaxRobertaForCausalLMModuler2   r3   Fr   c                 C   r  r  r  rG   r.   r.   r/   rI   j  r  z"FlaxRobertaForCausalLMModule.setupNTrL   r   r   r   r}   rJ   r~   r   r   c                 C   s   | j |||||||||	|
||d}|d }| jjr&| j jd d d d }nd }| j||d}|s:|f|dd   S t||j|j|jd	S )
N)r   r   r}   rJ   r~   r   r   r   r   r  r>   r  r  r    )r  rO   r   r   )	r   r2   r  r   r  r   rO   r   r   )rH   r,   rN   rM   rL   r   r   r   r}   rJ   r~   r   r   r   rO   r   r  r.   r.   r/   rP   s  s6   z%FlaxRobertaForCausalLMModule.__call__)	NNNNFTFFTr   r.   r.   r.   r/   r+  e  sB   
 	
r+  z
    Roberta Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for
    autoregressive tasks.
    c                   @   s.   e Zd ZeZddeej fddZdd Z	dS )FlaxRobertaForCausalLMNrN   c           	      C   s   |j \}}| ||}tj||fdd}|d ur(|jddd }t||d}nttj|ddd d d f ||f}|||dS )Nr"   r]   r$   r%   r    )r   r   )r   rN   rM   )	r)   r}   r*   re   r+   r   ru   rv   rw   )	rH   r,   ry   rN   r   Z
seq_lengthr   Zextended_attention_maskrM   r.   r.   r/   prepare_inputs_for_generation  s   
&z4FlaxRobertaForCausalLM.prepare_inputs_for_generationc                 C   s.   |j |d< |d d d dd f d |d< |S )Nr   rM   r$   r    )r   )rH   Zmodel_outputsZmodel_kwargsr.   r.   r/   update_inputs_for_generation  s   
 z3FlaxRobertaForCausalLM.update_inputs_for_generationr   )
rR   rS   rT   r+  r   r   r:   ZArrayr-  r.  r.   r.   r.   r/   r,    s    r,  )r,  r  r#  r*  r   r%  r	  r   )Vtypingr   r   r   Z
flax.linenZlinenr7   r:   Z	jax.numpynumpyr*   r   Zflax.core.frozen_dictr   r   r   r   r	   r
   Znn_partitioningZflax.linen.attentionr   Zflax.traverse_utilr   r   r   Zmodeling_flax_outputsr   r   r   r   r   r   r   r   r   Zmodeling_flax_utilsr   r   r   r   utilsr   r   r   Zconfiguration_robertar!   Z
get_loggerrR   loggerZ_CHECKPOINT_FOR_DOCZ_CONFIG_FOR_DOCr   r0   ZROBERTA_START_DOCSTRINGr   r   r1   rY   r   r   r   r   r   r   r   r   r   r   r   r  r	  r  r  r  r   r"  r#  r   r$  r%  r&  r*  r+  r,  __all__r.   r.   r.   r/   <module>   s   ,
', -+:S(#  BG:	4	=
	;	9A