o
    Zh|                     @   s  d Z ddlmZmZmZmZmZ ddlZddlZddlm	Z	 ddl
mZmZ ddlmZ ddlmZmZmZ dd	lmZmZ dd
lmZmZ ddlmZmZmZ ddlmZ ee Z!G dd de	j"Z#G dd de	j"Z$	d5de	j"dej%dej%dej%deej% de&de&fddZ'G dd de	j"Z(G dd de	j"Z)G d d! d!e	j"Z*G d"d# d#e	j"Z+G d$d% d%e	j"Z,G d&d' d'e	j"Z-G d(d) d)e	j"Z.G d*d+ d+e	j"Z/eG d,d- d-eZ0eG d.d/ d/e0Z1ed0d1G d2d3 d3e0Z2g d4Z3dS )6zPyTorch ViViT model.    )CallableOptionalSetTupleUnionN)nn)CrossEntropyLossMSELoss   )ACT2FN)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel) find_pruneable_heads_and_indicesprune_linear_layer)auto_docstringlogging	torch_int   )VivitConfigc                       s0   e Zd ZdZ fddZddefddZ  ZS )	VivitTubeletEmbeddingsa  
    Construct Vivit Tubelet embeddings.

    This module turns a batch of videos of shape (batch_size, num_frames, num_channels, height, width) into a tensor of
    shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.

    The seq_len (the number of patches) equals (number of frames // tubelet_size[0]) * (height // tubelet_size[1]) *
    (width // tubelet_size[2]).
    c                    s|   t    |j| _|j| _|j| _| j| jd  | j| jd   | j| jd   | _|j| _t	j
|j|j|j|jd| _d S )N   r   r   )Zkernel_sizeZstride)super__init__
num_frames
image_sizetubelet_size
patch_sizenum_patcheshidden_sizeZ	embed_dimr   Conv3dnum_channels
projectionselfconfig	__class__ W/var/www/auris/lib/python3.10/site-packages/transformers/models/vivit/modeling_vivit.pyr   .   s   
zVivitTubeletEmbeddings.__init__Finterpolate_pos_encodingc           	   
   C   s   |j \}}}}}|s+|| jks|| jkr+td| d| d| jd  d| jd  d	|ddddd	}| |}|ddd}|S )
NzImage image size (*z) doesn't match model (r   r   z).r   r
      )shaper   
ValueErrorpermuter$   flatten	transpose)	r&   pixel_valuesr,   
batch_sizer   r#   heightwidthxr*   r*   r+   forward>   s   (
zVivitTubeletEmbeddings.forwardF)__name__
__module____qualname____doc__r   boolr9   __classcell__r*   r*   r(   r+   r   #   s    
r   c                       sN   e Zd ZdZ fddZdejdededejfdd	Zdde	fddZ
  ZS )VivitEmbeddingsz
    Vivit Embeddings.

    Creates embeddings from a video using VivitTubeletEmbeddings, adds CLS token and positional embeddings.
    c                    st   t    ttdd|j| _t|| _	ttd| j	j
d |j| _t|j| _|jdd  | _|| _d S )Nr   )r   r   r   	ParametertorchZzerosr!   	cls_tokenr   patch_embeddingsr    position_embeddingsDropouthidden_dropout_probdropoutr   r   r'   r%   r(   r*   r+   r   V   s   


zVivitEmbeddings.__init__
embeddingsr6   r7   returnc                 C   s   |j d d }| jj d d }tj s||kr||kr| jS | jddddf }| jddddf }|j d }|| jd  }	|| jd  }
t|d }|d|||}|dddd}t	j
j||	|
fdd	d
}|dddddd|}tj||fddS )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   Nr   g      ?r
   r   ZbicubicF)sizemodeZalign_cornersdim)r/   rF   rC   Zjit
is_tracingr   r   reshaper1   r   
functionalZinterpolateviewcat)r&   rJ   r6   r7   r    Znum_positionsZclass_pos_embedZpatch_pos_embedrP   Z
new_heightZ	new_widthZsqrt_num_positionsr*   r*   r+   r,   d   s(   

z(VivitEmbeddings.interpolate_pos_encodingFr,   c           
      C   sr   |j \}}}}}| j||d}| j|ddg}	tj|	|fdd}|r-|| ||| }n|| j }| |}|S )Nr,   r   rO   )	r/   rE   rD   ZtilerC   rU   r,   rF   rI   )
r&   r4   r,   r5   r   r#   r6   r7   rJ   Z
cls_tokensr*   r*   r+   r9      s   

zVivitEmbeddings.forwardr:   )r;   r<   r=   r>   r   rC   Tensorintr,   r?   r9   r@   r*   r*   r(   r+   rA   O   s
    (rA           modulequerykeyvalueattention_maskscalingrI   c           
      K   s|   t ||dd| }tjj|dt jd|j}tjj	||| j
d}|d ur,|| }t ||}	|	dd }	|	|fS )NrL   )rP   dtype)ptrainingr   r   )rC   matmulr3   r   rS   ZsoftmaxZfloat32tora   rI   rc   
contiguous)
rZ   r[   r\   r]   r^   r_   rI   kwargsZattn_weightsZattn_outputr*   r*   r+   eager_attention_forward   s   rh   c                
       sv   e Zd Zdeddf fddZdejdejfddZ		dd
eej de	de
eejejf eej f fddZ  ZS )VivitSelfAttentionr'   rK   Nc                    s   t    |j|j dkrt|dstd|j d|j d|| _|j| _t|j|j | _| j| j | _	|j
| _| jd | _d| _tj|j| j	|jd| _tj|j| j	|jd| _tj|j| j	|jd| _d S )	Nr   Zembedding_sizezThe hidden size z4 is not a multiple of the number of attention heads .g      F)bias)r   r   r!   num_attention_headshasattrr0   r'   rX   attention_head_sizeall_head_sizeZattention_probs_dropout_probdropout_probr_   	is_causalr   LinearZqkv_biasr[   r\   r]   r%   r(   r*   r+   r      s"   

zVivitSelfAttention.__init__r8   c                 C   s6   |  d d | j| jf }||}|ddddS )NrL   r   r   r   r
   )rM   rl   rn   rT   r1   )r&   r8   Znew_x_shaper*   r*   r+   transpose_for_scores   s   
z'VivitSelfAttention.transpose_for_scoresF	head_maskoutput_attentionsc              
   C   s   |  | |}|  | |}|  | |}t}| jjdkr4| jjdkr.|r.td nt	| jj }|| ||||| j
| j| jsCdn| jd\}}	| d d | jf }
||
}|rc||	f}|S |f}|S )NeagerZsdpaz`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.rY   )rq   r_   rI   r`   )rs   r\   r]   r[   rh   r'   Z_attn_implementationloggerZwarning_oncer   rq   r_   rc   rp   rM   ro   rR   )r&   hidden_statesrt   ru   Z	key_layerZvalue_layerZquery_layerZattention_interfaceZcontext_layerZattention_probsZnew_context_layer_shapeoutputsr*   r*   r+   r9      s4   

zVivitSelfAttention.forwardNF)r;   r<   r=   r   r   rC   rW   rs   r   r?   r   r   r9   r@   r*   r*   r(   r+   ri      s    ri   c                       sF   e Zd ZdZdeddf fddZdejdejdejfd	d
Z  Z	S )VivitSelfOutputz
    The residual connection is defined in VivitLayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    r'   rK   Nc                    s.   t    t|j|j| _t|j| _d S N)	r   r   r   rr   r!   denserG   rH   rI   r%   r(   r*   r+   r        
zVivitSelfOutput.__init__rx   input_tensorc                 C   s   |  |}| |}|S r|   r}   rI   r&   rx   r   r*   r*   r+   r9     s   

zVivitSelfOutput.forward)
r;   r<   r=   r>   r   r   rC   rW   r9   r@   r*   r*   r(   r+   r{      s    $r{   c                       s~   e Zd Zdeddf fddZdee ddfddZ			dd
ej	de
ej	 dedeeej	ej	f eej	 f fddZ  ZS )VivitAttentionr'   rK   Nc                    s*   t    t|| _t|| _t | _d S r|   )r   r   ri   	attentionr{   outputsetpruned_headsr%   r(   r*   r+   r     s   


zVivitAttention.__init__headsc                 C   s   t |dkrd S t|| jj| jj| j\}}t| jj|| j_t| jj|| j_t| jj	|| j_	t| j
j|dd| j
_| jjt | | j_| jj| jj | j_| j|| _d S )Nr   r   rO   )lenr   r   rl   rn   r   r   r[   r\   r]   r   r}   ro   union)r&   r   indexr*   r*   r+   prune_heads  s   zVivitAttention.prune_headsFrx   rt   ru   c                 C   s4   |  |||}| |d |}|f|dd   }|S )Nr   r   )r   r   )r&   rx   rt   ru   Zself_outputsattention_outputry   r*   r*   r+   r9   )  s   zVivitAttention.forwardrz   )r;   r<   r=   r   r   r   rX   r   rC   rW   r   r?   r   r   r9   r@   r*   r*   r(   r+   r     s    r   c                       $   e Zd Z fddZdd Z  ZS )VivitIntermediatec                    sR   t    t|j|j| _t|j| _	t
|jtr#t|j | _d S |j| _d S r|   )r   r   r   rr   r!   intermediate_sizer}   rG   rH   rI   
isinstanceZ
hidden_actstrr   intermediate_act_fnr%   r(   r*   r+   r   8  s   
zVivitIntermediate.__init__c                 C   s"   |  |}| |}| |}|S r|   )r}   r   rI   )r&   rx   r*   r*   r+   r9   A  s   


zVivitIntermediate.forwardr;   r<   r=   r   r9   r@   r*   r*   r(   r+   r   7  s    	r   c                       r   )VivitOutputc                    s.   t    t|j|j| _t|j| _	d S r|   )
r   r   r   rr   r   r!   r}   rG   rH   rI   r%   r(   r*   r+   r   J  r~   zVivitOutput.__init__c                 C   s    |  |}| |}|| }|S r|   r   r   r*   r*   r+   r9   O  s   

zVivitOutput.forwardr   r*   r*   r(   r+   r   I      r   c                       s*   e Zd ZdZ fddZdddZ  ZS )	
VivitLayerzNThis corresponds to the EncoderBlock class in the scenic/vivit implementation.c                    sb   t    |j| _d| _t|| _t|| _t|| _	t
j|j|jd| _t
j|j|jd| _d S )Nr   Zeps)r   r   Zchunk_size_feed_forwardZseq_len_dimr   r   r   intermediater   r   r   	LayerNormr!   layer_norm_epslayernorm_beforelayernorm_afterr%   r(   r*   r+   r   \  s   



zVivitLayer.__init__NFc                 C   s`   | j | |||d}|d }|dd  }|| }| |}| |}| ||}|f| }|S )N)ru   r   r   )r   r   r   r   r   )r&   rx   rt   ru   Zself_attention_outputsr   ry   Zlayer_outputr*   r*   r+   r9   f  s   


zVivitLayer.forwardrz   )r;   r<   r=   r>   r   r9   r@   r*   r*   r(   r+   r   Y  s    
r   c                       s.   e Zd Z fddZ				dddZ  ZS )	VivitEncoderc                    s:   t     | _t fddt jD | _d| _d S )Nc                    s   g | ]}t  qS r*   )r   ).0_r'   r*   r+   
<listcomp>  s    z)VivitEncoder.__init__.<locals>.<listcomp>F)	r   r   r'   r   Z
ModuleListrangenum_hidden_layerslayergradient_checkpointingr%   r(   r   r+   r     s   
 
zVivitEncoder.__init__NFTc                 C   s   |rdnd }|r
dnd }t | jD ]8\}}	|r||f }|d ur$|| nd }
| jr6| jr6| |	j||
|}n|	||
|}|d }|rI||d f }q|rQ||f }|s_tdd |||fD S t|||dS )Nr*   r   r   c                 s   s    | ]	}|d ur|V  qd S r|   r*   )r   vr*   r*   r+   	<genexpr>  s    z'VivitEncoder.forward.<locals>.<genexpr>)last_hidden_staterx   
attentions)	enumerater   r   rc   Z_gradient_checkpointing_func__call__tupler   )r&   rx   rt   ru   output_hidden_statesreturn_dictZall_hidden_statesZall_self_attentionsiZlayer_moduleZlayer_head_maskZlayer_outputsr*   r*   r+   r9     s6   

zVivitEncoder.forward)NFFTr   r*   r*   r(   r+   r     s    	r   c                       r   )VivitPoolerc                    s*   t    t|j|j| _t | _d S r|   )r   r   r   rr   r!   r}   ZTanh
activationr%   r(   r*   r+   r     s   
zVivitPooler.__init__c                 C   s(   |d d df }|  |}| |}|S )Nr   )r}   r   )r&   rx   Zfirst_token_tensorpooled_outputr*   r*   r+   r9     s   

zVivitPooler.forwardr   r*   r*   r(   r+   r     r   r   c                   @   s0   e Zd ZeZdZdZdZg ZdZ	dZ
dd ZdS )VivitPreTrainedModelvivitr4   Tc                 C   s   t |tjtjfr#|jjjd| jjd |j	dur!|j	j
  dS dS t |tjrF|jjjd| jjd |jdurD|jj|j 
  dS dS t |tjr[|j	j
  |jjd dS t |trn|jj
  |jj
  dS dS )zInitialize the weightsrY   )meanZstdNg      ?)r   r   rr   r"   weightdataZnormal_r'   Zinitializer_rangerk   Zzero_Z	EmbeddingZpadding_idxr   Zfill_rA   rD   rF   )r&   rZ   r*   r*   r+   _init_weights  s"   


z"VivitPreTrainedModel._init_weightsN)r;   r<   r=   r   Zconfig_classZbase_model_prefixZmain_input_nameZsupports_gradient_checkpointingZ_no_split_modulesZ_supports_sdpaZ_supports_flash_attn_2r   r*   r*   r*   r+   r     s    r   c                       s   e Zd Zd fdd	Zdd Zdd Ze							dd
eej	 deej	 dee
 dee
 de
dee
 deeej	 ef fddZ  ZS )
VivitModelTc                    sX   t  | || _t|| _t|| _tj|j	|j
d| _|r#t|nd| _|   dS )zv
        add_pooling_layer (bool, *optional*, defaults to `True`):
            Whether to add a pooling layer
        r   N)r   r   r'   rA   rJ   r   encoderr   r   r!   r   	layernormr   pooler	post_init)r&   r'   add_pooling_layerr(   r*   r+   r     s   

zVivitModel.__init__c                 C   s   | j jS r|   )rJ   rE   )r&   r*   r*   r+   get_input_embeddings  s   zVivitModel.get_input_embeddingsc                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model.

        Args:
            heads_to_prune:
                dict of {layer_num: list of heads to prune in this layer}
        N)itemsr   r   r   r   )r&   Zheads_to_pruner   r   r*   r*   r+   _prune_heads  s   zVivitModel._prune_headsNFr4   rt   ru   r   r,   r   rK   c                 C   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}|du r&td| || j j}| j||d}| j|||||d}|d }	| 	|	}	| j
durR| 
|	nd}
|s`|	|
f|dd  S t|	|
|j|jdS )a  
        Examples:

        ```python
        >>> import av
        >>> import numpy as np

        >>> from transformers import VivitImageProcessor, VivitModel
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`List[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`List[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitModel.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> # prepare video for the model
        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> # forward pass
        >>> outputs = model(**inputs)
        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 3137, 768]
        ```Nz You have to specify pixel_valuesrV   )rt   ru   r   r   r   r   )r   Zpooler_outputrx   r   )r'   ru   r   use_return_dictr0   Zget_head_maskr   rJ   r   r   r   r   rx   r   )r&   r4   rt   ru   r   r,   r   Zembedding_outputZencoder_outputssequence_outputr   r*   r*   r+   r9     s4   T
zVivitModel.forward)T)NNNNFN)r;   r<   r=   r   r   r   r   r   rC   FloatTensorr?   r   r   r   r9   r@   r*   r*   r(   r+   r     s4    r   a  
        ViViT Transformer model with a video classification head on top (a linear layer on top of the final hidden state of the
    [CLS] token) e.g. for Kinetics-400.

        <Tip>

            Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by
            setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
            position embeddings to the higher resolution.

        </Tip>
    )Zcustom_introc                       s   e Zd Z fddZe							ddeej deej deej dee	 d	ee	 d
e	dee	 de
eej ef fddZ  ZS )VivitForVideoClassificationc                    sR   t  | |j| _t|dd| _|jdkrt|j|jnt | _	| 
  d S )NF)r   r   )r   r   
num_labelsr   r   r   rr   r!   ZIdentity
classifierr   r%   r(   r*   r+   r     s
   $z$VivitForVideoClassification.__init__NFr4   rt   labelsru   r   r,   r   rK   c                 C   s   |dur|n| j j}| j||||||d}|d }	| |	dddddf }
d}|durQ| jdkrAt }||
d|d}nt }||
d| j|d}|sg|
f|dd  }|dure|f| S |S t||
|j	|j
dS )a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Examples:

        ```python
        >>> import av
        >>> import numpy as np
        >>> import torch

        >>> from transformers import VivitImageProcessor, VivitForVideoClassification
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`List[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`List[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=4, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> with torch.no_grad():
        ...     outputs = model(**inputs)
        ...     logits = outputs.logits

        >>> # model predicts one of the 400 Kinetics-400 classes
        >>> predicted_label = logits.argmax(-1).item()
        >>> print(model.config.id2label[predicted_label])
        LABEL_116
        ```N)rt   ru   r   r,   r   r   r   rL   r   )losslogitsrx   r   )r'   r   r   r   r   r	   rT   r   r   rx   r   )r&   r4   rt   r   ru   r   r,   r   ry   r   r   r   Zloss_fctr   r*   r*   r+   r9     s6   ]	
z#VivitForVideoClassification.forward)NNNNNFN)r;   r<   r=   r   r   r   rC   r   Z
LongTensorr?   r   r   r   r9   r@   r*   r*   r(   r+   r   x  s6    	r   )r   r   r   )rY   )4r>   typingr   r   r   r   r   rC   Ztorch.utils.checkpointr   Ztorch.nnr   r	   Zactivationsr   Zmodeling_outputsr   r   r   Zmodeling_utilsr   r   Zpytorch_utilsr   r   utilsr   r   r   Zconfiguration_vivitr   Z
get_loggerr;   rw   Moduler   rA   rW   floatrh   ri   r{   r   r   r   r   r   r   r   r   r   __all__r*   r*   r*   r+   <module>   sd   
,W
?''3  