o
    ZhI-                     @   s   d Z ddlZddlmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ dd	lmZmZ G d
d dee	ZG dd deZddgZdS )zBEiT model configuration    NOrderedDict)Mapping)version   )PretrainedConfig)
OnnxConfig)BackboneConfigMixin*get_aligned_output_features_output_indicesc                       sh   e Zd ZdZdZddddddddd	d
ddddddddddg dddddddddddf fdd	Z  ZS )
BeitConfiga  
    This is the configuration class to store the configuration of a [`BeitModel`]. It is used to instantiate an BEiT
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the BEiT
    [microsoft/beit-base-patch16-224-pt22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k) architecture.

    Args:
        vocab_size (`int`, *optional*, defaults to 8192):
            Vocabulary size of the BEiT model. Defines the number of different image tokens that can be used during
            pre-training.
        hidden_size (`int`, *optional*, defaults to 768):
            Dimensionality of the encoder layers and the pooler layer.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` are supported.
        hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        layer_norm_eps (`float`, *optional*, defaults to 1e-12):
            The epsilon used by the layer normalization layers.
        image_size (`int`, *optional*, defaults to 224):
            The size (resolution) of each image.
        patch_size (`int`, *optional*, defaults to 16):
            The size (resolution) of each patch.
        num_channels (`int`, *optional*, defaults to 3):
            The number of input channels.
        use_mask_token (`bool`, *optional*, defaults to `False`):
            Whether to use a mask token for masked image modeling.
        use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`):
            Whether to use BERT-style absolute position embeddings.
        use_relative_position_bias (`bool`, *optional*, defaults to `False`):
            Whether to use T5-style relative position embeddings in the self-attention layers.
        use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`):
            Whether to use the same relative position embeddings across all self-attention layers of the Transformer.
        layer_scale_init_value (`float`, *optional*, defaults to 0.1):
            Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale.
        drop_path_rate (`float`, *optional*, defaults to 0.1):
            Stochastic depth rate per sample (when applied in the main path of residual layers).
        use_mean_pooling (`bool`, *optional*, defaults to `True`):
            Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the
            CLS token, before applying the classification head.
        pool_scales (`Tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`):
            Pooling scales used in Pooling Pyramid Module applied on the last feature map.
        use_auxiliary_head (`bool`, *optional*, defaults to `True`):
            Whether to use an auxiliary head during training.
        auxiliary_loss_weight (`float`, *optional*, defaults to 0.4):
            Weight of the cross-entropy loss of the auxiliary head.
        auxiliary_channels (`int`, *optional*, defaults to 256):
            Number of channels to use in the auxiliary head.
        auxiliary_num_convs (`int`, *optional*, defaults to 1):
            Number of convolutional layers to use in the auxiliary head.
        auxiliary_concat_input (`bool`, *optional*, defaults to `False`):
            Whether to concatenate the output of the auxiliary head with the input before the classification layer.
        semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
            The index that is ignored by the loss function of the semantic segmentation model.
        out_features (`List[str]`, *optional*):
            If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
            (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
            corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
            same order as defined in the `stage_names` attribute.
        out_indices (`List[int]`, *optional*):
            If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
            many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
            If unset and `out_features` is unset, will default to the last stage. Must be in the
            same order as defined in the `stage_names` attribute.
        add_fpn (`bool`, *optional*, defaults to `False`):
            Whether to add a FPN as part of the backbone. Only relevant for [`BeitBackbone`].
        reshape_hidden_states (`bool`, *optional*, defaults to `True`):
            Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
            case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
            seq_len, hidden_size)`. Only relevant for [`BeitBackbone`].

    Example:

    ```python
    >>> from transformers import BeitConfig, BeitModel

    >>> # Initializing a BEiT beit-base-patch16-224-pt22k style configuration
    >>> configuration = BeitConfig()

    >>> # Initializing a model (with random weights) from the beit-base-patch16-224-pt22k style configuration
    >>> model = BeitModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Zbeiti    i      i   Zgelug        g{Gz?g-q=      r   Fg?T)      r      g?   r      Nc            !         s  t  jdi |  || _|| _|| _|| _|| _|| _|| _|| _	|	| _
|
| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _d| v ritdt |  d}dgdd t!d| jd D  | _"t#||| j"d\| _$| _%|| _&|| _'d S )	NZsegmentation_indiceszuThe `segmentation_indices` argument is deprecated and will be removed in a future version, use `out_indices` instead.stemc                 S   s   g | ]}d | qS )Zstage ).0idxr   r   Z/var/www/auris/lib/python3.10/site-packages/transformers/models/beit/configuration_beit.py
<listcomp>   s    z'BeitConfig.__init__.<locals>.<listcomp>r   )out_featuresout_indicesstage_namesr   )(super__init__
vocab_sizehidden_sizenum_hidden_layersnum_attention_headsintermediate_size
hidden_acthidden_dropout_probattention_probs_dropout_probinitializer_rangelayer_norm_eps
image_size
patch_sizenum_channelsuse_mask_token use_absolute_position_embeddingsuse_relative_position_bias!use_shared_relative_position_biaslayer_scale_init_valuedrop_path_rateuse_mean_poolingpool_scalesuse_auxiliary_headauxiliary_loss_weightauxiliary_channelsauxiliary_num_convsauxiliary_concat_inputsemantic_loss_ignore_indexwarningswarnFutureWarningpopranger   r
   Z_out_featuresZ_out_indicesadd_fpnreshape_hidden_states)!selfr   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r   r   r?   r@   kwargs	__class__r   r   r      sP   #
"
zBeitConfig.__init__)__name__
__module____qualname____doc__Z
model_typer   __classcell__r   r   rC   r   r      sF    `r   c                   @   sJ   e Zd ZedZedeeee	ef f fddZ
edefddZdS )BeitOnnxConfigz1.11returnc                 C   s   t ddddddfgS )NZpixel_valuesbatchr+   heightwidth)r   r   r   r   r   rA   r   r   r   inputs   s   zBeitOnnxConfig.inputsc                 C   s   dS )Ng-C6?r   rO   r   r   r   atol_for_validation   s   z"BeitOnnxConfig.atol_for_validationN)rE   rF   rG   r   parseZtorch_onnx_minimum_versionpropertyr   strintrP   floatrQ   r   r   r   r   rJ      s    
 rJ   )rH   r:   collectionsr   typingr   	packagingr   Zconfiguration_utilsr   Zonnxr   Zutils.backbone_utilsr	   r
   r   rJ   __all__r   r   r   r   <module>   s    :