o
    ZhK                     @   s   d Z ddlmZ ddlmZmZmZmZ er"ddlm	Z	 ddl
mZ ddlmZ ddlmZ dd	l
mZ eeZG d
d deZG dd deZG dd deZG dd deZg dZdS )zCLIP model configuration    OrderedDict)TYPE_CHECKINGAnyMappingOptional   )ProcessorMixin)
TensorType)PretrainedConfig)
OnnxConfig)loggingc                       sH   e Zd ZdZdZdZ									
							d fdd	Z  ZS )CLIPTextConfiga  
    This is the configuration class to store the configuration of a [`CLIPTextModel`]. It is used to instantiate a CLIP
    text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the text encoder of the CLIP
    [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 49408):
            Vocabulary size of the CLIP text model. Defines the number of different tokens that can be represented by
            the `inputs_ids` passed when calling [`CLIPModel`].
        hidden_size (`int`, *optional*, defaults to 512):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 2048):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        projection_dim (`int`, *optional*, defaults to 512):
            Dimensionality of text and vision projection layers.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer encoder.
        max_position_embeddings (`int`, *optional*, defaults to 77):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        initializer_factor (`float`, *optional*, defaults to 1.0):
            A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
            testing).
        pad_token_id (`int`, *optional*, defaults to 1):
            Padding token id.
        bos_token_id (`int`, *optional*, defaults to 49406):
            Beginning of stream token id.
        eos_token_id (`int`, *optional*, defaults to 49407):
            End of stream token id.

    Example:

    ```python
    >>> from transformers import CLIPTextConfig, CLIPTextModel

    >>> # Initializing a CLIPTextConfig with openai/clip-vit-base-patch32 style configuration
    >>> configuration = CLIPTextConfig()

    >>> # Initializing a CLIPTextModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
    >>> model = CLIPTextModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Zclip_text_modeltext_config               M   
quick_geluh㈵>        {Gz?      ?       c                    sf   t  jd|||d| || _|| _|| _|| _|| _|| _|| _|	| _	|| _
|| _|| _|
| _d S )N)pad_token_idbos_token_ideos_token_id )super__init__
vocab_sizehidden_sizeintermediate_sizeprojection_dimnum_hidden_layersnum_attention_headsmax_position_embeddingslayer_norm_eps
hidden_actinitializer_rangeinitializer_factorattention_dropout)selfr$   r%   r&   r'   r(   r)   r*   r,   r+   r/   r-   r.   r   r   r    kwargs	__class__r!   Z/var/www/auris/lib/python3.10/site-packages/transformers/models/clip/configuration_clip.pyr#   a   s   
zCLIPTextConfig.__init__)r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   __name__
__module____qualname____doc__
model_typeZbase_config_keyr#   __classcell__r!   r!   r2   r4   r   !   s(    <r   c                       sD   e Zd ZdZdZdZ									
					d fdd	Z  ZS )CLIPVisionConfiga  
    This is the configuration class to store the configuration of a [`CLIPVisionModel`]. It is used to instantiate a
    CLIP vision encoder according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the vision encoder of the CLIP
    [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        hidden_size (`int`, *optional*, defaults to 768):
            Dimensionality of the encoder layers and the pooler layer.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        projection_dim (`int`, *optional*, defaults to 512):
            Dimensionality of text and vision projection layers.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_channels (`int`, *optional*, defaults to 3):
            The number of input channels.
        image_size (`int`, *optional*, defaults to 224):
            The size (resolution) of each image.
        patch_size (`int`, *optional*, defaults to 32):
            The size (resolution) of each patch.
        hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        layer_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the layer normalization layers.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        initializer_factor (`float`, *optional*, defaults to 1.0):
            A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
            testing).

    Example:

    ```python
    >>> from transformers import CLIPVisionConfig, CLIPVisionModel

    >>> # Initializing a CLIPVisionConfig with openai/clip-vit-base-patch32 style configuration
    >>> configuration = CLIPVisionConfig()

    >>> # Initializing a CLIPVisionModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
    >>> model = CLIPVisionModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```Zclip_vision_modelvision_config      r   r   r          r   r   r   r   r   c                    sd   t  jdi | || _|| _|| _|| _|| _|| _|| _|| _	|| _
|| _|| _|
| _|	| _d S )Nr!   )r"   r#   r%   r&   r'   r(   r)   num_channels
patch_size
image_sizer-   r.   r/   r+   r,   )r0   r%   r&   r'   r(   r)   rB   rD   rC   r,   r+   r/   r-   r.   r1   r2   r!   r4   r#      s   
zCLIPVisionConfig.__init__)r>   r?   r   r   r   r   r@   rA   r   r   r   r   r   r5   r!   r!   r2   r4   r<      s$    6r<   c                       sH   e Zd ZdZdZeedZ	d fdd	Ze	d	ed
efddZ
  ZS )
CLIPConfigaN  
    [`CLIPConfig`] is the configuration class to store the configuration of a [`CLIPModel`]. It is used to instantiate
    a CLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating
    a configuration with the defaults will yield a similar configuration to that of the CLIP
    [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        text_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`CLIPTextConfig`].
        vision_config (`dict`, *optional*):
            Dictionary of configuration options used to initialize [`CLIPVisionConfig`].
        projection_dim (`int`, *optional*, defaults to 512):
            Dimensionality of text and vision projection layers.
        logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
            The initial value of the *logit_scale* parameter. Default is used as per the original CLIP implementation.
        kwargs (*optional*):
            Dictionary of keyword arguments.

    Example:

    ```python
    >>> from transformers import CLIPConfig, CLIPModel

    >>> # Initializing a CLIPConfig with openai/clip-vit-base-patch32 style configuration
    >>> configuration = CLIPConfig()

    >>> # Initializing a CLIPModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
    >>> model = CLIPModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config

    >>> # We can also initialize a CLIPConfig from a CLIPTextConfig and a CLIPVisionConfig
    >>> from transformers import CLIPTextConfig, CLIPVisionConfig

    >>> # Initializing a CLIPText and CLIPVision configuration
    >>> config_text = CLIPTextConfig()
    >>> config_vision = CLIPVisionConfig()

    >>> config = CLIPConfig.from_text_vision_configs(config_text, config_vision)
    ```Zclipr   r=   Nr   /L
F@c                    s  | dd }| dd }t jdi | |d ur]|d u ri }tdi | }| D ]+\}	}
|	|v rW|
||	 krW|	dvrW|	|v rLd|	 d|	 d}nd|	 d}t| q,|| |d ur|d u rgi }t	di | }d	|v rd
d |d	  D |d	< | D ]+\}	}
|	|v r|
||	 kr|	dvr|	|v rd|	 d|	 d}nd|	 d}t| q|| |d u ri }td |d u ri }td tdi || _
t	di || _|| _|| _d| _d S )Ntext_config_dictvision_config_dict)Ztransformers_version`zp` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict["z"]` will be used instead.zj`text_config_dict` is provided which will be used to initialize `CLIPTextConfig`. The value `text_config["z"]` will be overridden.Zid2labelc                 S   s   i | ]	\}}t ||qS r!   )str).0keyvaluer!   r!   r4   
<dictcomp>D  s    z'CLIPConfig.__init__.<locals>.<dictcomp>zv` is found in both `vision_config_dict` and `vision_config` but with different values. The value `vision_config_dict["zp`vision_config_dict` is provided which will be used to initialize `CLIPVisionConfig`. The value `vision_config["zO`text_config` is `None`. Initializing the `CLIPTextConfig` with default values.zS`vision_config` is `None`. initializing the `CLIPVisionConfig` with default values.r   r!   )popr"   r#   r   to_dictitemsloggerinfoupdater<   r   r=   r'   logit_scale_init_valuer.   )r0   r   r=   r'   rV   r1   rH   rI   Z_text_config_dictrM   rN   messageZ_vision_config_dictr2   r!   r4   r#     sl   








zCLIPConfig.__init__r   r=   c                 K   s   | d|  |  d|S )z
        Instantiate a [`CLIPConfig`] (or a derived class) from clip text model configuration and clip vision model
        configuration.

        Returns:
            [`CLIPConfig`]: An instance of a configuration object
        rF   Nr!   )rQ   )clsr   r=   r1   r!   r!   r4   from_text_vision_configsk  s   
z#CLIPConfig.from_text_vision_configs)NNr   rG   )r6   r7   r8   r9   r:   r   r<   Zsub_configsr#   classmethodrY   r;   r!   r!   r2   r4   rE      s    -
XrE   c                       s   e Zd Zedeeeeef f fddZedeeeeef f fddZede	fddZ
				dd
ddededed deeef f
 fddZedefddZ  ZS )CLIPOnnxConfigreturnc                 C   s0   t ddddfdddddd	fd
dddfgS )NZ	input_idsbatchsequence)r   r   Zpixel_valuesrB   heightwidth)r   r      r   Zattention_maskr   r0   r!   r!   r4   inputsy  s   zCLIPOnnxConfig.inputsc                 C   s0   t dddifdddifdddifdddifgS )NZlogits_per_imager   r]   Zlogits_per_textZtext_embedsZimage_embedsr   rb   r!   r!   r4   outputs  s   



zCLIPOnnxConfig.outputsc                 C      dS )Ng-C6?r!   rb   r!   r!   r4   atol_for_validation     z"CLIPOnnxConfig.atol_for_validationN	processorr	   
batch_size
seq_length	frameworkr
   c                    s6   t  j|j|||d}t  j|j||d}i ||S )N)rj   rk   rl   )rj   rl   )r"   generate_dummy_inputsZ	tokenizerZimage_processor)r0   ri   rj   rk   rl   Ztext_input_dictZimage_input_dictr2   r!   r4   rm     s   
z$CLIPOnnxConfig.generate_dummy_inputsc                 C   re   )N   r!   rb   r!   r!   r4   default_onnx_opset  rg   z!CLIPOnnxConfig.default_onnx_opset)rh   rh   N)r6   r7   r8   propertyr   rK   intrc   rd   floatrf   r   r   rm   ro   r;   r!   r!   r2   r4   r[   x  s.     	 

r[   )rE   r[   r   r<   N)r9   collectionsr   typingr   r   r   r   Zprocessing_utilsr	   utilsr
   Zconfiguration_utilsr   Zonnxr   r   Z
get_loggerr6   rS   r   r<   rE   r[   __all__r!   r!   r!   r4   <module>   s    
e\ .