o
    Zh                    @  s  d Z ddlmZ ddlZddlmZmZmZmZm	Z	 ddl
ZddlZddlmZ ddlmZmZmZmZmZ ddlmZmZmZmZmZmZmZmZmZ dd	l m!Z!m"Z"m#Z# dd
l$m%Z%m&Z&m'Z'm(Z( ddl)m*Z* e(+e,Z-dZ.dZ/G dd dej0j1Z2G dd dej0j1Z3G dd dej0j1Z4G dd dej0j1Z5G dd dej0j1Z6G dd dej0j1Z7G dd dej0j1Z8G dd dej0j1Z9G dd  d ej0j1Z:G d!d" d"ej0j1Z;d#d$ Z<d%d& Z=d'd( Z>d)d* Z?d+d, Z@G d-d. d.ej0j1ZAG d/d0 d0ej0j1ZBG d1d2 d2ej0j1ZCG d3d4 d4ej0j1ZDG d5d6 d6ej0j1ZEG d7d8 d8ej0j1ZFG d9d: d:eZGd;ZHd<ZIe&d=eHG d>d? d?eGZJe&d@eHG dAdB dBeGeZKe&dCeHG dDdE dEeGeZLe&dFeHG dGdH dHeGeZMe&dIeHG dJdK dKeGeZNg dLZOdS )MzTF 2.0 DeBERTa model.    )annotationsN)DictOptionalSequenceTupleUnion   )get_tf_activation)TFBaseModelOutputTFMaskedLMOutputTFQuestionAnsweringModelOutputTFSequenceClassifierOutputTFTokenClassifierOutput)	TFMaskedLanguageModelingLossTFModelInputTypeTFPreTrainedModelTFQuestionAnsweringLossTFSequenceClassificationLossTFTokenClassificationLossget_initializerkerasunpack_inputs)check_embeddings_within_bounds
shape_liststable_softmax)add_code_sample_docstringsadd_start_docstrings%add_start_docstrings_to_model_forwardlogging   )DebertaConfigr    zkamalkraj/deberta-basec                      sB   e Zd Zd fddZdddd	ZedddZdddZ  ZS )TFDebertaContextPoolerconfigr    c                   s@   t  jdi | tjj|jdd| _t|jdd| _	|| _
d S )Ndensenamedropout )super__init__r   layersDensepooler_hidden_sizer#   TFDebertaStableDropoutZpooler_dropoutr&   r"   selfr"   kwargs	__class__r'   ^/var/www/auris/lib/python3.10/site-packages/transformers/models/deberta/modeling_tf_deberta.pyr)   9   s   
zTFDebertaContextPooler.__init__Ftrainingboolc                 C  s<   |d d df }| j ||d}| |}t| jj|}|S )Nr   r4   )r&   r#   r	   r"   Zpooler_hidden_act)r/   hidden_statesr4   Zcontext_tokenpooled_outputr'   r'   r3   call?   s
   
zTFDebertaContextPooler.callreturnintc                 C     | j jS N)r"   hidden_sizer/   r'   r'   r3   
output_dimH   s   z!TFDebertaContextPooler.output_dimNc                 C  s   | j rd S d| _ t| dd d ur2t| jj | jd d | jjg W d    n1 s-w   Y  t| dd d urZt| j	j | j	d  W d    d S 1 sSw   Y  d S d S )NTr#   r&   )
builtgetattrtf
name_scoper#   r%   buildr"   r,   r&   r/   input_shaper'   r'   r3   rE   L   s   "zTFDebertaContextPooler.buildr"   r    Fr4   r5   )r:   r;   r=   )	__name__
__module____qualname__r)   r9   propertyr@   rE   __classcell__r'   r'   r1   r3   r!   8   s    	r!   c                      s,   e Zd ZdZd
 fdd	Zddd	Z  ZS )TFDebertaXSoftmaxa>  
    Masked Softmax which is optimized for saving memory

    Args:
        input (`tf.Tensor`): The input tensor that will apply softmax.
        mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
        dim (int): The dimension that will apply softmax
    c                      t  jdi | || _d S Nr'   )r(   r)   axis)r/   rT   r0   r1   r'   r3   r)   b      
zTFDebertaXSoftmax.__init__inputs	tf.Tensormaskc                 C  s\   t t |t j}t |t jtd| jd|}tt j|t jd| j	}t |d|}|S )Nz-infdtype        )
rC   Zlogical_notcastr5   wherefloatcompute_dtyper   Zfloat32rT   )r/   rV   rX   Zrmaskoutputr'   r'   r3   r9   f   s
   zTFDebertaXSoftmax.call)rQ   )rV   rW   rX   rW   )rK   rL   rM   __doc__r)   r9   rO   r'   r'   r1   r3   rP   X   s    	rP   c                      s:   e Zd ZdZ fddZejdd Zddd
dZ  Z	S )r-   z
    Optimized dropout module for stabilizing the training

    Args:
        drop_prob (float): the dropout probabilities
    c                   rR   rS   )r(   r)   	drop_prob)r/   rb   r0   r1   r'   r3   r)   v   rU   zTFDebertaStableDropout.__init__c                   s   t dt jjjjdj djt|d t j	 t j
ddj  jdjdkr:t  t jdjd| } fdd	}||fS )
z~
        Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob.
        r   g      ?)Zprobs)Zsample_shaperY   r   r[   c                   s,   j dkrt tjdjd|  S | S )Nr   r[   rY   )rb   rC   r]   r\   r_   )ZupstreamrX   scaler/   r'   r3   grad   s   
z-TFDebertaStableDropout.xdropout.<locals>.grad)rC   r\   compatv1distributionsZ	Bernoullirb   sampler   r5   Zconvert_to_tensorr_   r]   )r/   rV   re   r'   rc   r3   xdropoutz   s   "
zTFDebertaStableDropout.xdropoutFrV   rW   r4   c                 C  s   |r|  |S |S r=   )rj   )r/   rV   r4   r'   r'   r3   r9      s   
zTFDebertaStableDropout.callrI   )rV   rW   r4   rW   )
rK   rL   rM   ra   r)   rC   Zcustom_gradientrj   r9   rO   r'   r'   r1   r3   r-   n   s    
r-   c                      s8   e Zd ZdZd fdd	Z fddZdd
dZ  ZS )TFDebertaLayerNormzBLayerNorm module in the TF style (epsilon inside the square root).-q=c                   s"   t  jdi | || _|| _d S rS   )r(   r)   sizeeps)r/   rm   rn   r0   r1   r'   r3   r)      s   
zTFDebertaLayerNorm.__init__c                   s@   | j | jgt dd| _| j | jgt dd| _t |S )Nweight)shapeinitializerr%   bias)	
add_weightrm   rC   Zones_initializergammaZzeros_initializerbetar(   rE   rF   r1   r'   r3   rE      s   zTFDebertaLayerNorm.buildxrW   r:   c                 C  sX   t j|dgdd}t jt || dgdd}t j|| j }| j||  | | j S )NrQ   T)rT   Zkeepdims)rC   Zreduce_meanZsquaremathsqrtrn   rt   ru   )r/   rv   meanZvarianceZstdr'   r'   r3   r9      s   zTFDebertaLayerNorm.call)rl   )rv   rW   r:   rW   rK   rL   rM   ra   r)   rE   r9   rO   r'   r'   r1   r3   rk      s
    rk   c                      s4   e Zd Zd fddZdddd	ZdddZ  ZS )TFDebertaSelfOutputr"   r    c                   sT   t  jdi | tjj|jdd| _tjj|jdd| _	t
|jdd| _|| _d S )Nr#   r$   	LayerNormepsilonr%   r&   r'   )r(   r)   r   r*   r+   r>   r#   LayerNormalizationlayer_norm_epsr|   r-   hidden_dropout_probr&   r"   r.   r1   r'   r3   r)      s
   
zTFDebertaSelfOutput.__init__Fr4   r5   c                 C  s*   |  |}| j||d}| || }|S )Nr6   r#   r&   r|   r/   r7   input_tensorr4   r'   r'   r3   r9      s   
zTFDebertaSelfOutput.callNc                 C  s  | j rd S d| _ t| dd d ur2t| jj | jd d | jjg W d    n1 s-w   Y  t| dd d ur\t| j	j | j	d d | jjg W d    n1 sWw   Y  t| dd d urt| j
j | j
d  W d    d S 1 s}w   Y  d S d S NTr#   r|   r&   )rA   rB   rC   rD   r#   r%   rE   r"   r>   r|   r&   rF   r'   r'   r3   rE          "zTFDebertaSelfOutput.buildrH   rI   rJ   r=   rK   rL   rM   r)   r9   rE   rO   r'   r'   r1   r3   r{      s    r{   c                      >   e Zd Zd fddZ					ddddZdddZ  ZS )TFDebertaAttentionr"   r    c                   s8   t  jdi | t|dd| _t|dd| _|| _d S )Nr/   r$   r`   r'   )r(   r)   "TFDebertaDisentangledSelfAttentionr/   r{   dense_outputr"   r.   r1   r'   r3   r)      s   
zTFDebertaAttention.__init__NFr   rW   attention_maskquery_statesOptional[tf.Tensor]relative_posrel_embeddingsoutput_attentionsr5   r4   r:   Tuple[tf.Tensor]c              	   C  sN   | j |||||||d}|d u r|}| j|d ||d}	|	f|dd   }
|
S )Nr7   r   r   r   r   r   r4   r   r7   r   r4   r   )r/   r   )r/   r   r   r   r   r   r   r4   Zself_outputsattention_outputr`   r'   r'   r3   r9      s    
	
zTFDebertaAttention.callc                 C     | j rd S d| _ t| dd d ur-t| jj | jd  W d    n1 s(w   Y  t| dd d urUt| jj | jd  W d    d S 1 sNw   Y  d S d S )NTr/   r   )rA   rB   rC   rD   r/   r%   rE   r   rF   r'   r'   r3   rE         "zTFDebertaAttention.buildrH   NNNFF)r   rW   r   rW   r   r   r   r   r   r   r   r5   r4   r5   r:   r   r=   r   r'   r'   r1   r3   r      s    
r   c                      2   e Zd Zd fddZddd	ZdddZ  ZS )TFDebertaIntermediater"   r    c                   sZ   t  jdi | tjj|jt|jdd| _t	|j
tr$t|j
| _n|j
| _|| _d S )Nr#   Zunitskernel_initializerr%   r'   )r(   r)   r   r*   r+   intermediate_sizer   initializer_ranger#   
isinstance
hidden_actstrr	   intermediate_act_fnr"   r.   r1   r'   r3   r)      s   
zTFDebertaIntermediate.__init__r7   rW   r:   c                 C  s   | j |d}| |}|S NrV   )r#   r   r/   r7   r'   r'   r3   r9     s   
zTFDebertaIntermediate.callNc                 C  sn   | j rd S d| _ t| dd d ur5t| jj | jd d | jjg W d    d S 1 s.w   Y  d S d S )NTr#   )	rA   rB   rC   rD   r#   r%   rE   r"   r>   rF   r'   r'   r3   rE     s   "zTFDebertaIntermediate.buildrH   r7   rW   r:   rW   r=   r   r'   r'   r1   r3   r      s    
r   c                      s4   e Zd Zd fddZddddZdddZ  ZS )TFDebertaOutputr"   r    c                   s\   t  jdi | tjj|jt|jdd| _tjj	|j
dd| _t|jdd| _|| _d S )Nr#   r   r|   r}   r&   r$   r'   )r(   r)   r   r*   r+   r>   r   r   r#   r   r   r|   r-   r   r&   r"   r.   r1   r'   r3   r)     s   
zTFDebertaOutput.__init__Fr7   rW   r   r4   r5   r:   c                 C  s,   | j |d}| j||d}| || }|S )Nr   r6   r   r   r'   r'   r3   r9     s   zTFDebertaOutput.callNc                 C  s  | j rd S d| _ t| dd d ur2t| jj | jd d | jjg W d    n1 s-w   Y  t| dd d ur\t| j	j | j	d d | jj
g W d    n1 sWw   Y  t| dd d urt| jj | jd  W d    d S 1 s}w   Y  d S d S r   )rA   rB   rC   rD   r#   r%   rE   r"   r   r|   r>   r&   rF   r'   r'   r3   rE   &  r   zTFDebertaOutput.buildrH   rI   )r7   rW   r   rW   r4   r5   r:   rW   r=   r   r'   r'   r1   r3   r     s    
r   c                      r   )TFDebertaLayerr"   r    c                   s@   t  jdi | t|dd| _t|dd| _t|dd| _d S )N	attentionr$   intermediater`   r'   )r(   r)   r   r   r   r   r   bert_outputr.   r1   r'   r3   r)   6  s   zTFDebertaLayer.__init__NFr7   rW   r   r   r   r   r   r   r5   r4   r:   r   c              	   C  sR   | j |||||||d}|d }	| j|	d}
| j|
|	|d}|f|dd   }|S )N)r   r   r   r   r   r   r4   r   r7   r   r   )r   r   r   )r/   r7   r   r   r   r   r   r4   Zattention_outputsr   Zintermediate_outputZlayer_outputoutputsr'   r'   r3   r9   =  s    
	zTFDebertaLayer.callc                 C  s   | j rd S d| _ t| dd d ur-t| jj | jd  W d    n1 s(w   Y  t| dd d urRt| jj | jd  W d    n1 sMw   Y  t| dd d urzt| jj | jd  W d    d S 1 ssw   Y  d S d S )NTr   r   r   )	rA   rB   rC   rD   r   r%   rE   r   r   rF   r'   r'   r3   rE   Y  s    "zTFDebertaLayer.buildrH   r   r7   rW   r   rW   r   r   r   r   r   r   r   r5   r4   r5   r:   r   r=   r   r'   r'   r1   r3   r   5  s    r   c                      sZ   e Zd Zd fddZd ddZdd	 Zd
d Zd!ddZ						d"d#ddZ  Z	S )$TFDebertaEncoderr"   r    c                   sr   t  jdi |  fddt jD | _t dd| _ | _| jr5t dd| _| jdk r7 j	| _d S d S d S )	Nc                   s   g | ]}t  d | dqS )zlayer_._r$   )r   ).0ir"   r'   r3   
<listcomp>l  s    z-TFDebertaEncoder.__init__.<locals>.<listcomp>relative_attentionFmax_relative_positionsrQ   r   r'   )
r(   r)   rangeZnum_hidden_layerslayerrB   r   r"   r   max_position_embeddingsr.   r1   r   r3   r)   i  s   
zTFDebertaEncoder.__init__Nc              	   C  s   | j rd S d| _ | jr| jd| jd | jjgt| jjd| _t	| dd d urH| j
D ]}t|j |d  W d    n1 sBw   Y  q*d S d S )NTzrel_embeddings.weight   r%   rp   rq   r   )rA   r   rs   r   r"   r>   r   r   r   rB   r   rC   rD   r%   rE   )r/   rG   r   r'   r'   r3   rE   t  s"   

zTFDebertaEncoder.buildc                 C  s   | j r| j}|S d }|S r=   )r   r   )r/   r   r'   r'   r3   get_rel_embedding  s   z"TFDebertaEncoder.get_rel_embeddingc                 C  sn   t t|dkr'tt|dd}|tt|dd }t|tj}|S t t|dkr5t|d}|S )Nr   r   rQ   r   )lenr   rC   expand_dimssqueezer\   Zuint8)r/   r   Zextended_attention_maskr'   r'   r3   get_attention_mask  s   z#TFDebertaEncoder.get_attention_maskc                 C  sD   | j r |d u r |d urt|d nt|d }t|t|d }|S )Nr   )r   r   build_relative_position)r/   r7   r   r   qr'   r'   r3   get_rel_pos  s    zTFDebertaEncoder.get_rel_posFTr7   rW   r   r   r   r   r   r5   output_hidden_statesreturn_dictr4   r:   *Union[TFBaseModelOutput, Tuple[tf.Tensor]]c	              
   C  s  |rdnd }	|r
dnd }
|  |}| |||}t|tr"|d }n|}|  }t| jD ]B\}}|r8|	|f }	||||||||d}|d }|d urd|}t|trc|d t| jk ra||d  nd }n|}|ro|
|d f }
q-|rw|	|f }	|stdd ||	|
fD S t	||	|
dS )Nr'   r   r   r   c                 s  s    | ]	}|d ur|V  qd S r=   r'   )r   vr'   r'   r3   	<genexpr>  s    z(TFDebertaEncoder.call.<locals>.<genexpr>Zlast_hidden_stater7   
attentions)
r   r   r   r   r   	enumerater   r   tupler
   )r/   r7   r   r   r   r   r   r   r4   Zall_hidden_statesZall_attentionsZnext_kvr   r   Zlayer_moduleZlayer_outputsr'   r'   r3   r9     sJ   



	
"
zTFDebertaEncoder.callrH   r=   )NN)NNFFTF)r7   rW   r   rW   r   r   r   r   r   r5   r   r5   r   r5   r4   r5   r:   r   )
rK   rL   rM   r)   rE   r   r   r   r9   rO   r'   r'   r1   r3   r   h  s    



r   c                 C  s|   t j| t jd}t j|t jd}|dddf t t |ddg| dg }|d| ddf }t j|dd}t |t jS )a  
    Build relative position according to the query and key

    We assume the absolute position of query \(P_q\) is range from (0, query_size) and the absolute position of key
    \(P_k\) is range from (0, key_size), The relative positions from query to key is \(R_{q \rightarrow k} = P_q -
    P_k\)

    Args:
        query_size (int): the length of query
        key_size (int): the length of key

    Return:
        `tf.Tensor`: A tensor with shape [1, query_size, key_size]

    rY   Nr   rQ   r   rT   )rC   r   Zint32Ztilereshaper   r\   int64)Z
query_sizeZkey_sizeZq_idsZk_idsZrel_pos_idsr'   r'   r3   r     s   ,r   c                 C  s8   t |d t |d t |d t |d g}t| |S )Nr   r   r   rQ   r   rC   Zbroadcast_to)c2p_posquery_layerr   shapesr'   r'   r3   c2p_dynamic_expand     



r   c                 C  s8   t |d t |d t |d t |d g}t| |S )Nr   r   r   r   )r   r   	key_layerr   r'   r'   r3   p2c_dynamic_expand  r   r   c                 C  s4   t |d d t | d t |d g }t| |S )Nr   r   r   )	pos_indexp2c_attr   r   r'   r'   r3   pos_dynamic_expand   s   (r   c                 C  s  |dk rt | | }|t | d kr:t | d | }t jt t | |dd}t j| |d} t j||d}nd}t | dt | d f}t |dt |d f}t j||dd}t |t |}|dkrt jt t | | dd}t j||d}|S )Nr   r   r   permrQ   )Z
batch_dims)rC   ZrankZrollr   	transposer   rp   gather)rv   indicesZgather_axisZpre_rollZpermutationZflat_xZflat_indicesZgatheredr'   r'   r3   torch_gather  s    r   c                      sT   e Zd ZdZd fddZdddZdddZ					d d!ddZdd Z  Z	S )"r   a  
    Disentangled self-attention module

    Parameters:
        config (`str`):
            A model config class instance with the configuration to build a new model. The schema is similar to
            *BertConfig*, for more details, please refer [`DebertaConfig`]

    r"   r    c                   s  t  jdi | |j|j dkrtd|j d|j d|j| _t|j|j | _| j| j | _tj	j
| jd t|jddd| _|jd urK|jng | _t|d	d| _t|d
d| _| jr}tj	j
| jt|jddd| _tj	j
| jt|jddd| _tdd| _| jrt|dd| _| jdk r|j| _t|jdd| _d| jv rtj	j
| jt|jddd| _d| jv rtj	j
| jt|jdd| _t|jdd| _|| _d S )Nr   zThe hidden size (z6) is not a multiple of the number of attention heads ()r   in_projFr   r%   Zuse_biasr   talking_headhead_logits_projhead_weights_projrQ   r   r   r   pos_dropoutr$   c2ppos_projp2c
pos_q_proj)r   r%   r&   r'   ) r(   r)   r>   num_attention_heads
ValueErrorr;   Zattention_head_sizeall_head_sizer   r*   r+   r   r   r   pos_att_typerB   r   r   r   r   rP   softmaxr   r   r-   r   r   r   r   Zattention_probs_dropout_probr&   r"   r.   r1   r'   r3   r)   (  sf   




z+TFDebertaDisentangledSelfAttention.__init__Nc                 C  sr  | j rd S d| _ | jd| jtj d| _| jd| jtj d| _t| dd d urLt	
| jj | jd d | jjg W d    n1 sGw   Y  t| dd d urqt	
| jj | jd  W d    n1 slw   Y  t| dd d urt	
| jj | jd  W d    n1 sw   Y  t| dd d urt	
| jj | jd  W d    n1 sw   Y  t| d	d d urt	
| jj | jd  W d    n1 sw   Y  t| d
d d ur
t	
| jj | j| jjg W d    n	1 sw   Y  t| dd d ur7t	
| jj | j| jjg W d    d S 1 s0w   Y  d S d S )NTq_biasr   v_biasr   r&   r   r   r   r   r   )rA   rs   r   r   ZinitializersZZerosr   r   rB   rC   rD   r   r%   rE   r"   r>   r&   r   r   r   r   r   rF   r'   r'   r3   rE   a  sL   $z(TFDebertaDisentangledSelfAttention.buildtensorrW   r:   c                 C  s:   t |d d | jdg }tj||d}tj|g ddS )NrQ   r   rp   r   r   r   r   r   )r   r   rC   r   r   )r/   r   rp   r'   r'   r3   transpose_for_scores  s   z7TFDebertaDisentangledSelfAttention.transpose_for_scoresFr7   r   r   r   r   r   r   r5   r4   r   c              	   C  s  |du r|  |}tj| |ddd\}	}
}n~dd }tjt| j jd | jd dd}tj| jdd}t	dD ](}tj| j| jd}t	| jD ]}|
|||d |  }qL|
|| }q;dgd }||d |d |}||d	 |d	 |}||d
 |d
 |}| |}	| |}
| |}|	| | jddddf  }	|| | jddddf  }d}d	t| j }tt|	d | }|	| }	t|	t|
g d}| jr| j||d}| |	|
|||}|dur|| }| jrt| t|g dg d}| ||}| j||d}| jr*t| t|g dg d}t||}t|g d}t|}|dd |d |d  g }t||}|rZ||f}|S |f}|S )a  
        Call the module

        Args:
            hidden_states (`tf.Tensor`):
                Input states to the module usually the output from previous layer, it will be the Q,K and V in
                *Attention(Q,K,V)*

            attention_mask (`tf.Tensor`):
                An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
                sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
                th token.

            return_att (`bool`, *optional*):
                Whether return the attention matrix.

            query_states (`tf.Tensor`, *optional*):
                The *Q* state in *Attention(Q,K,V)*.

            relative_pos (`tf.Tensor`):
                The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
                values ranging in [*-max_relative_positions*, *max_relative_positions*].

            rel_embeddings (`tf.Tensor`):
                The embedding of relative distances. It's a tensor of shape [\(2 \times
                \text{max_relative_positions}\), *hidden_size*].


        Nr   rQ   )num_or_size_splitsrT   c                 S  s*   t j|| dd}|d ur|t |7 }|S )NT)transpose_b)rC   matmulr   )wbrv   outr'   r'   r3   linear  s   z7TFDebertaDisentangledSelfAttention.call.<locals>.linearr   )rZ   rm   r   r   r   r   r   r   r6   )r   r   r   r   )r   r   r   r   r   r   )r   rC   splitr   r   ro   r   ZTensorArrayrZ   r   writeconcatr   r   r   r   rw   rx   r   r   r   r   disentangled_att_biasr   r   r   r&   r   r   )r/   r7   r   r   r   r   r   r4   Zqpr   r   Zvalue_layerr   wsZqkvwkZqkvw_insider   Zqkvbr   r   Zrel_attscale_factorrd   Zattention_scoresZattention_probsZcontext_layerZcontext_layer_shapeZnew_context_layer_shaper   r'   r'   r3   r9     sj   '




z'TFDebertaDisentangledSelfAttention.callc              	   C  s  |d u rt |d }t|t |d }t |}t|dkr(tt|dd}nt|dkr5t|d}nt|dkrDtdt| tttt |d t |d | j	tj
}t|| j	| | j	| d d f d}d}	d| jv r| |}
| |
}
t|t|
g d	}t|| d|d d }t|t|||d
}|	|7 }	d| jv r?| |}| |}|tjtjt |d
 | | jd }t |d t |d krtt |d t |d }n|}t| | d|d d }t|t|g d	}tt|t|||d
g d	}t |d t |d kr;t|d d d d d d df d
}t|t|||d}|	|7 }	|	S )Nr   r   r   r   r      z2Relative position ids must be of dim 2 or 3 or 4. r   r   rQ   r   rY   )r   r   r   rC   r   r   r\   minimummaximumr   r   r   r   r   r   r   Zclip_by_valuer   r   r   rw   rx   r_   r   r   )r/   r   r   r   r   r  r   Zshape_list_posZatt_spanZscoreZpos_key_layerZc2p_attr   Zpos_query_layerZr_posZp2c_posr   r   r'   r'   r3   r    s\    




$z8TFDebertaDisentangledSelfAttention.disentangled_att_biasrH   r=   )r   rW   r:   rW   r   r   )
rK   rL   rM   ra   r)   rE   r   r9   r  rO   r'   r'   r1   r3   r     s    

9
 or   c                      sB   e Zd ZdZ fddZdddZ						ddddZ  ZS )TFDebertaEmbeddingszGConstruct the embeddings from word, position and token_type embeddings.c                   s   t  jdi | || _t|d|j| _|j| _|j| _t|dd| _|j| _| j|jkr<t	j
j|jt|jddd| _t	j
j|jdd| _t|jd	d
| _d S )Nembedding_sizeposition_biased_inputT
embed_projFr   r|   r}   r&   r$   r'   )r(   r)   r"   rB   r>   r
  r   r  r   r   r*   r+   r   r  r   r   r|   r-   r   r&   r.   r1   r'   r3   r)   5  s    zTFDebertaEmbeddings.__init__Nc                 C  s  t d | jd| jj| jgt| jd| _W d    n1 s!w   Y  t d# | jj	dkrD| jd| jj	| jgt| jd| _
nd | _
W d    n1 sQw   Y  t d | jrp| jd| j| jgt| jd| _nd | _W d    n1 s}w   Y  | jrd S d| _t| d	d d urt | jj | jd d | jjg W d    n1 sw   Y  t| d
d d urt | jj | jd  W d    n1 sw   Y  t| dd d urt | jj | jd d | jg W d    d S 1 sw   Y  d S d S )NZword_embeddingsro   r   token_type_embeddingsr   
embeddingsposition_embeddingsTr|   r&   r  )rC   rD   rs   r"   
vocab_sizer
  r   r   ro   type_vocab_sizer  r  r   r>   r  rA   rB   r|   r%   rE   r&   r  rF   r'   r'   r3   rE   H  sV   





"zTFDebertaEmbeddings.buildF	input_idsr   position_idstoken_type_idsinputs_embedsrX   r4   r5   r:   rW   c                 C  sj  |du r|du rt d|durt|| jj tj| j|d}t|dd }|du r2tj|dd}|du rDtj	tj
d|d ddd}|}| jrUtj| j|d}	||	7 }| jjdkrgtj| j|d}
||
7 }| j| jkrr| |}| |}|durtt|tt|krtt|d	krtjtj|d
dd
d}tjtj	|dd| jd}|| }| j||d}|S )z
        Applies embedding based on inputs tensor.

        Returns:
            final_embeddings (`tf.Tensor`): output embedding tensor.
        Nz5Need to provide either `input_ids` or `input_embeds`.)paramsr   rQ   r   dimsvalue)startlimitr   r  r   r   rY   r6   )r   r   r"   r  rC   r   ro   r   fillr   r   r  r  r  r  r
  r>   r  r|   r   r   r\   r_   r&   )r/   r  r  r  r  rX   r4   rG   Zfinal_embeddingsZposition_embedsZtoken_type_embedsr'   r'   r3   r9   q  s8   

zTFDebertaEmbeddings.callr=   )NNNNNF)r  r   r  r   r  r   r  r   rX   r   r4   r5   r:   rW   rz   r'   r'   r1   r3   r	  2  s    
+r	  c                      r   ) TFDebertaPredictionHeadTransformr"   r    c                   s~   t  jdi | t|d|j| _tjj| jt|j	dd| _
t|jtr,t|j| _n|j| _tjj|jdd| _|| _d S )Nr
  r#   r   r|   r}   r'   )r(   r)   rB   r>   r
  r   r*   r+   r   r   r#   r   r   r   r	   transform_act_fnr   r   r|   r"   r.   r1   r'   r3   r)     s   
z)TFDebertaPredictionHeadTransform.__init__r7   rW   r:   c                 C  s$   | j |d}| |}| |}|S r   )r#   r  r|   r   r'   r'   r3   r9     s   

z%TFDebertaPredictionHeadTransform.callNc                 C  s   | j rd S d| _ t| dd d ur2t| jj | jd d | jjg W d    n1 s-w   Y  t| dd d ur^t| j	j | j	d d | j
g W d    d S 1 sWw   Y  d S d S )NTr#   r|   )rA   rB   rC   rD   r#   r%   rE   r"   r>   r|   r
  rF   r'   r'   r3   rE     s   "z&TFDebertaPredictionHeadTransform.buildrH   r   r=   r   r'   r'   r1   r3   r    s    
r  c                      sZ   e Zd Zd fddZddd	ZdddZdddZdddZdddZdddZ	  Z
S ) TFDebertaLMPredictionHeadr"   r    input_embeddingskeras.layers.Layerc                   s@   t  jdi | || _t|d|j| _t|dd| _|| _d S )Nr
  	transformr$   r'   )	r(   r)   r"   rB   r>   r
  r  r"  r   r/   r"   r   r0   r1   r'   r3   r)     s
   
z"TFDebertaLMPredictionHead.__init__Nc                 C  s~   | j | jjfdddd| _| jrd S d| _t| dd d ur=t| jj	 | j
d  W d    d S 1 s6w   Y  d S d S )NZzerosTrr   )rp   rq   Z	trainabler%   r"  )rs   r"   r  rr   rA   rB   rC   rD   r"  r%   rE   rF   r'   r'   r3   rE     s   "zTFDebertaLMPredictionHead.buildr:   c                 C     | j S r=   )r   r?   r'   r'   r3   get_output_embeddings     z/TFDebertaLMPredictionHead.get_output_embeddingsr  tf.Variablec                 C     || j _t|d | j _d S Nr   )r   ro   r   r  r/   r  r'   r'   r3   set_output_embeddings     z/TFDebertaLMPredictionHead.set_output_embeddingsDict[str, tf.Variable]c                 C  s
   d| j iS )Nrr   )rr   r?   r'   r'   r3   get_bias  s   
z"TFDebertaLMPredictionHead.get_biasc                 C  s"   |d | _ t|d d | j_d S )Nrr   r   )rr   r   r"   r  r*  r'   r'   r3   set_bias  s   
z"TFDebertaLMPredictionHead.set_biasr7   rW   c                 C  sn   | j |d}t|d }tj|d| jgd}tj|| jjdd}tj|d|| jj	gd}tj
j|| jd}|S )Nr   r   rQ   r   T)ar   r   )r  rr   )r"  r   rC   r   r
  r   r   ro   r"   r  nnZbias_addrr   )r/   r7   Z
seq_lengthr'   r'   r3   r9     s   zTFDebertaLMPredictionHead.callr"   r    r   r!  r=   r:   r!  r  r'  )r:   r-  r   )rK   rL   rM   r)   rE   r%  r+  r.  r/  r9   rO   r'   r'   r1   r3   r    s    





r  c                      s2   e Zd Zd fddZdd
dZdddZ  ZS )TFDebertaOnlyMLMHeadr"   r    r   r!  c                   s&   t  jdi | t||dd| _d S )Npredictionsr$   r'   )r(   r)   r  r6  r#  r1   r'   r3   r)      s   zTFDebertaOnlyMLMHead.__init__sequence_outputrW   r:   c                 C  s   | j |d}|S )Nr   )r6  )r/   r7  prediction_scoresr'   r'   r3   r9     s   zTFDebertaOnlyMLMHead.callNc                 C  d   | j rd S d| _ t| dd d ur0t| jj | jd  W d    d S 1 s)w   Y  d S d S )NTr6  )rA   rB   rC   rD   r6  r%   rE   rF   r'   r'   r3   rE   	     "zTFDebertaOnlyMLMHead.buildr2  )r7  rW   r:   rW   r=   r   r'   r'   r1   r3   r5    s    
r5  c                      sj   e Zd ZeZd# fddZd$ddZd%ddZdd Ze										d&d'dd Z
d(d!d"Z  ZS ))TFDebertaMainLayerr"   r    c                   s8   t  jdi | || _t|dd| _t|dd| _d S )Nr  r$   encoderr'   )r(   r)   r"   r	  r  r   r<  r.   r1   r'   r3   r)     s   zTFDebertaMainLayer.__init__r:   r!  c                 C  r$  r=   )r  r?   r'   r'   r3   get_input_embeddings  r&  z'TFDebertaMainLayer.get_input_embeddingsr  r'  c                 C  r(  r)  )r  ro   r   r  r*  r'   r'   r3   set_input_embeddings!  r,  z'TFDebertaMainLayer.set_input_embeddingsc                 C  s   t )z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        )NotImplementedError)r/   Zheads_to_pruner'   r'   r3   _prune_heads%  s   zTFDebertaMainLayer._prune_headsNFr  TFModelInputType | Noner   np.ndarray | tf.Tensor | Noner  r  r  r   Optional[bool]r   r   r4   r5   r   c
                 C  s   |d ur|d urt d|d urt|}
n|d ur"t|d d }
nt d|d u r1tj|
dd}|d u r<tj|
dd}| j||||||	d}| j||||||	d}|d }|sa|f|dd   S t||j|jd	S )
NzDYou cannot specify both input_ids and inputs_embeds at the same timerQ   z5You have to specify either input_ids or inputs_embedsr   r  r   )r  r  r  r  rX   r4   )r7   r   r   r   r   r4   r   )	r   r   rC   r  r  r<  r
   r7   r   )r/   r  r   r  r  r  r   r   r   r4   rG   Zembedding_outputZencoder_outputsr7  r'   r'   r3   r9   ,  sF   
		zTFDebertaMainLayer.callc                 C  r   )NTr  r<  )rA   rB   rC   rD   r  r%   rE   r<  rF   r'   r'   r3   rE   e  r   zTFDebertaMainLayer.buildrH   r3  r4  	NNNNNNNNF)r  rA  r   rB  r  rB  r  rB  r  rB  r   rC  r   rC  r   rC  r4   r5   r:   r   r=   )rK   rL   rM   r    config_classr)   r=  r>  r@  r   r9   rE   rO   r'   r'   r1   r3   r;    s$    

8r;  c                   @  s   e Zd ZdZeZdZdS )TFDebertaPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    debertaN)rK   rL   rM   ra   r    rE  Zbase_model_prefixr'   r'   r'   r3   rF  q  s    rF  a1
  
    The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
    Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
    on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
    improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.

    This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
    as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
    behavior.

    <Tip>

    TensorFlow models and layers in `transformers` accept two formats as input:

    - having all inputs as keyword arguments (like PyTorch models), or
    - having all inputs as a list, tuple or dict in the first positional argument.

    The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
    and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
    pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
    format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
    the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
    positional argument:

    - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
    - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
    `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
    - a dictionary with one or several input Tensors associated to the input names given in the docstring:
    `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`

    Note that when creating models and layers with
    [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
    about any of this, as you can just pass inputs like you would to any other Python function!

    </Tip>

    Parameters:
        config ([`DebertaConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a	  
    Args:
        input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
            Indices of input sequence tokens in the vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
            1]`:

            - 0 corresponds to a *sentence A* token,
            - 1 corresponds to a *sentence B* token.

            [What are token type IDs?](../glossary#token-type-ids)
        position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.

            [What are position IDs?](../glossary#position-ids)
        inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
            is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
            model's internal embedding lookup matrix.
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput``] instead of a plain tuple.
zaThe bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.c                      sf   e Zd Zd fddZeeedee	e
ed									ddddZdddZ  ZS )TFDebertaModelr"   r    c                   s,   t  j|g|R i | t|dd| _d S )NrG  r$   )r(   r)   r;  rG  r/   r"   rV   r0   r1   r'   r3   r)     s   zTFDebertaModel.__init__batch_size, sequence_length
checkpointoutput_typerE  NFr  rA  r   rB  r  r  r  r   rC  r   r   r4   r:   r   c
                 C  s    | j |||||||||	d	}
|
S )N	r  r   r  r  r  r   r   r   r4   )rG  )r/   r  r   r  r  r  r   r   r   r4   r   r'   r'   r3   r9     s   zTFDebertaModel.callc                 C  r9  )NTrG  )rA   rB   rC   rD   rG  r%   rE   rF   r'   r'   r3   rE     r:  zTFDebertaModel.buildrH   rD  )r  rA  r   rB  r  rB  r  rB  r  rB  r   rC  r   rC  r   rC  r4   rC  r:   r   r=   )rK   rL   rM   r)   r   r   DEBERTA_INPUTS_DOCSTRINGformatr   _CHECKPOINT_FOR_DOCr
   _CONFIG_FOR_DOCr9   rE   rO   r'   r'   r1   r3   rH    s(    rH  z5DeBERTa Model with a `language modeling` head on top.c                      sr   e Zd Zd fddZd ddZeeed	e	e
eed
										d!d"ddZd#ddZ  ZS )$TFDebertaForMaskedLMr"   r    c                   sP   t  j|g|R i | |jrtd t|dd| _t|| jjdd| _	d S )NzpIf you want to use `TFDebertaForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.rG  r$   cls)r   r%   )
r(   r)   Z
is_decoderloggerwarningr;  rG  r5  r  mlmrI  r1   r'   r3   r)     s   zTFDebertaForMaskedLM.__init__r:   r!  c                 C  r<   r=   )rW  r6  r?   r'   r'   r3   get_lm_head  s   z TFDebertaForMaskedLM.get_lm_headrJ  rK  NFr  rA  r   rB  r  r  r  r   rC  r   r   labelsr4   )Union[TFMaskedLMOutput, Tuple[tf.Tensor]]c                 C  s   | j |||||||||
d	}|d }| j||
d}|	du rdn| j|	|d}|s<|f|dd  }|dur:|f| S |S t|||j|jdS )a  
        labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
            config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
            loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
        rN  r   )r7  r4   NrY  logitsr   lossr\  r7   r   )rG  rW  hf_compute_lossr   r7   r   )r/   r  r   r  r  r  r   r   r   rY  r4   r   r7  r8  r^  r`   r'   r'   r3   r9     s.   zTFDebertaForMaskedLM.callc                 C  r   )NTrG  rW  )rA   rB   rC   rD   rG  r%   rE   rW  rF   r'   r'   r3   rE   J  r   zTFDebertaForMaskedLM.buildrH   r3  
NNNNNNNNNF)r  rA  r   rB  r  rB  r  rB  r  rB  r   rC  r   rC  r   rC  rY  rB  r4   rC  r:   rZ  r=   )rK   rL   rM   r)   rX  r   r   rO  rP  r   rQ  r   rR  r9   rE   rO   r'   r'   r1   r3   rS    s,    
-rS  z
    DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
    pooled output) e.g. for GLUE tasks.
    c                      h   e Zd Zd fddZeeedee	e
ed										ddddZdddZ  ZS ) "TFDebertaForSequenceClassificationr"   r    c                   s   t  j|g|R i | |j| _t|dd| _t|dd| _t|dd }|d u r-| jj	n|}t
|dd| _tjj|jt|jdd| _| jj| _d S )NrG  r$   poolerZcls_dropout
classifierr   )r(   r)   
num_labelsr;  rG  r!   rc  rB   r"   r   r-   r&   r   r*   r+   r   r   rd  r@   )r/   r"   rV   r0   Zdrop_outr1   r'   r3   r)   ^  s   z+TFDebertaForSequenceClassification.__init__rJ  rK  NFr  rA  r   rB  r  r  r  r   rC  r   r   rY  r4   r:   3Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]c                 C  s   | j |||||||||
d	}|d }| j||
d}| j||
d}| |}|	du r+dn| j|	|d}|sH|f|dd  }|durF|f| S |S t|||j|jdS )a  
        labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        rN  r   r6   Nr[  r   r]  )rG  rc  r&   rd  r_  r   r7   r   )r/   r  r   r  r  r  r   r   r   rY  r4   r   r7  r8   r\  r^  r`   r'   r'   r3   r9   p  s2   
z'TFDebertaForSequenceClassification.callc                 C  sJ  | j rd S d| _ t| dd d ur-t| jj | jd  W d    n1 s(w   Y  t| dd d urRt| jj | jd  W d    n1 sMw   Y  t| dd d urwt| jj | jd  W d    n1 srw   Y  t| dd d urt| j	j | j	d d | j
g W d    d S 1 sw   Y  d S d S )NTrG  rc  r&   rd  )rA   rB   rC   rD   rG  r%   rE   rc  r&   rd  r@   rF   r'   r'   r3   rE     s(   "z(TFDebertaForSequenceClassification.buildrH   r`  )r  rA  r   rB  r  rB  r  rB  r  rB  r   rC  r   rC  r   rC  rY  rB  r4   rC  r:   rf  r=   )rK   rL   rM   r)   r   r   rO  rP  r   rQ  r   rR  r9   rE   rO   r'   r'   r1   r3   rb  V  s*    0rb  z
    DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
    Named-Entity-Recognition (NER) tasks.
    c                      ra  ) TFDebertaForTokenClassificationr"   r    c                   sh   t  j|g|R i | |j| _t|dd| _tjj|jd| _	tjj
|jt|jdd| _|| _d S )NrG  r$   )Zraterd  r   )r(   r)   re  r;  rG  r   r*   ZDropoutr   r&   r+   r   r   rd  r"   rI  r1   r'   r3   r)     s   
z(TFDebertaForTokenClassification.__init__rJ  rK  NFr  rA  r   rB  r  r  r  r   rC  r   r   rY  r4   r:   0Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]c                 C  s   | j |||||||||
d	}|d }| j||
d}| j|d}|	du r%dn| j|	|d}|sB|f|dd  }|dur@|f| S |S t|||j|jdS )	z
        labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
        rN  r   r6   r   Nr[  r   r]  )rG  r&   rd  r_  r   r7   r   )r/   r  r   r  r  r  r   r   r   rY  r4   r   r7  r\  r^  r`   r'   r'   r3   r9     s0   z$TFDebertaForTokenClassification.callc                 C     | j rd S d| _ t| dd d ur-t| jj | jd  W d    n1 s(w   Y  t| dd d urZt| jj | jd d | jj	g W d    d S 1 sSw   Y  d S d S )NTrG  rd  )
rA   rB   rC   rD   rG  r%   rE   rd  r"   r>   rF   r'   r'   r3   rE         "z%TFDebertaForTokenClassification.buildrH   r`  )r  rA  r   rB  r  rB  r  rB  r  rB  r   rC  r   rC  r   rC  rY  rB  r4   rC  r:   rh  r=   )rK   rL   rM   r)   r   r   rO  rP  r   rQ  r   rR  r9   rE   rO   r'   r'   r1   r3   rg    s*    ,rg  z
    DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
    layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
    c                      sj   e Zd Zd fddZeeedee	e
ed											ddddZd ddZ  ZS )!TFDebertaForQuestionAnsweringr"   r    c                   sV   t  j|g|R i | |j| _t|dd| _tjj|jt|j	dd| _
|| _d S )NrG  r$   
qa_outputsr   )r(   r)   re  r;  rG  r   r*   r+   r   r   rl  r"   rI  r1   r'   r3   r)     s   
z&TFDebertaForQuestionAnswering.__init__rJ  rK  NFr  rA  r   rB  r  r  r  r   rC  r   r   start_positionsend_positionsr4   r:   7Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]c                 C  s   | j |||||||||d	}|d }| j|d}tj|ddd\}}tj|dd}tj|dd}d}|	durK|
durKd	|	i}|
|d
< | j|||fd}|sb||f|dd  }|dur`|f| S |S t||||j|jdS )a  
        start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        rN  r   r   r   rQ   )r  r   rT   )inputrT   NZstart_positionZend_positionr[  )r^  start_logits
end_logitsr7   r   )	rG  rl  rC   r   r   r_  r   r7   r   )r/   r  r   r  r  r  r   r   r   rm  rn  r4   r   r7  r\  rq  rr  r^  rY  r`   r'   r'   r3   r9     s>   z"TFDebertaForQuestionAnswering.callc                 C  ri  )NTrG  rl  )
rA   rB   rC   rD   rG  r%   rE   rl  r"   r>   rF   r'   r'   r3   rE   a  rj  z#TFDebertaForQuestionAnswering.buildrH   )NNNNNNNNNNF)r  rA  r   rB  r  rB  r  rB  r  rB  r   rC  r   rC  r   rC  rm  rB  rn  rB  r4   rC  r:   ro  r=   )rK   rL   rM   r)   r   r   rO  rP  r   rQ  r   rR  r9   rE   rO   r'   r'   r1   r3   rk    s,    ;rk  )rS  rk  rb  rg  rH  rF  )Pra   
__future__r   rw   typingr   r   r   r   r   numpynpZ
tensorflowrC   Zactivations_tfr	   Zmodeling_tf_outputsr
   r   r   r   r   Zmodeling_tf_utilsr   r   r   r   r   r   r   r   r   Ztf_utilsr   r   r   utilsr   r   r   r   Zconfiguration_debertar    Z
get_loggerrK   rU  rR  rQ  r*   ZLayerr!   rP   r-   rk   r{   r   r   r   r   r   r   r   r   r   r   r   r	  r  r  r5  r;  rF  ZDEBERTA_START_DOCSTRINGrO  rH  rS  rb  rg  rk  __all__r'   r'   r'   r3   <module>   s   ,
 (0!3l

  w&0^
*,0P\LZ