o
    Zh-                     @   s(  d Z ddlZddlZddlZddlZddlmZ ddlmZ ddl	m
Z
mZ ddlmZmZmZmZmZmZmZmZmZ dd	lmZmZmZ e rOdd
lmZ eeZdZdZ dZ!dZ"dZ#dd Z$G dd dZ%G dd de%Z&d#ddZ'd$ddZ(dd Z)dd Z*dd  Z+G d!d" d"eZ,dgZ-dS )%z-Factory function to build auto-model classes.    N)OrderedDict   )PretrainedConfig)get_class_from_dynamic_moduleresolve_trust_remote_code)	CONFIG_NAMEcached_file	copy_funcextract_commit_hashfind_adapter_config_fileis_peft_availableis_torch_availableloggingrequires_backends   )
AutoConfigmodel_type_to_module_name!replace_list_option_in_docstrings)GenerationMixinaJ  
    This is a generic model class that will be instantiated as one of the model classes of the library when created
    with the [`~BaseAutoModelClass.from_pretrained`] class method or the [`~BaseAutoModelClass.from_config`] class
    method.

    This class cannot be instantiated directly using `__init__()` (throws an error).
ax  
        Instantiates one of the model classes of the library from a configuration.

        Note:
            Loading a model from its configuration file does **not** load the model weights. It only affects the
            model's configuration. Use [`~BaseAutoModelClass.from_pretrained`] to load the model weights.

        Args:
            config ([`PretrainedConfig`]):
                The model class to instantiate is selected based on the configuration class:

                List options
            attn_implementation (`str`, *optional*):
                The attention implementation to use in the model (if relevant). Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (using [`F.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html)), or `"flash_attention_2"` (using [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention)). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation.

        Examples:

        ```python
        >>> from transformers import AutoConfig, BaseAutoModelClass

        >>> # Download configuration from huggingface.co and cache.
        >>> config = AutoConfig.from_pretrained("checkpoint_placeholder")
        >>> model = BaseAutoModelClass.from_config(config)
        ```
ac  
        Instantiate one of the model classes of the library from a pretrained model.

        The model class to instantiate is selected based on the `model_type` property of the config object (either
        passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
        falling back to using pattern matching on `pretrained_model_name_or_path`:

        List options

        The model is set in evaluation mode by default using `model.eval()` (so for instance, dropout modules are
        deactivated). To train the model, you should first set it back in training mode with `model.train()`

        Args:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                Can be either:

                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
                    - A path to a *directory* containing model weights saved using
                      [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
                    - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
                      this case, `from_tf` should be set to `True` and a configuration object should be provided as
                      `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
                      PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
            model_args (additional positional arguments, *optional*):
                Will be passed along to the underlying model `__init__()` method.
            config ([`PretrainedConfig`], *optional*):
                Configuration for the model to use instead of an automatically loaded configuration. Configuration can
                be automatically loaded when:

                    - The model is a model provided by the library (loaded with the *model id* string of a pretrained
                      model).
                    - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
                      save directory.
                    - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
                      configuration JSON file named *config.json* is found in the directory.
            state_dict (*Dict[str, torch.Tensor]*, *optional*):
                A state dictionary to use instead of a state dictionary loaded from saved weights file.

                This option can be used if you want to create a model from a pretrained configuration but load your own
                weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and
                [`~PreTrainedModel.from_pretrained`] is not a simpler option.
            cache_dir (`str` or `os.PathLike`, *optional*):
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
            from_tf (`bool`, *optional*, defaults to `False`):
                Load the model weights from a TensorFlow checkpoint save file (see docstring of
                `pretrained_model_name_or_path` argument).
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download:
                Deprecated and ignored. All downloads are now resumed by default when possible.
                Will be removed in v5 of Transformers.
            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            output_loading_info(`bool`, *optional*, defaults to `False`):
                Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
            local_files_only(`bool`, *optional*, defaults to `False`):
                Whether or not to only look at local files (e.g., not try downloading the model).
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
                identifier allowed by git.
            trust_remote_code (`bool`, *optional*, defaults to `False`):
                Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
                should only be set to `True` for repositories you trust and in which you have read the code, as it will
                execute code present on the Hub on your local machine.
            code_revision (`str`, *optional*, defaults to `"main"`):
                The specific revision to use for the code on the Hub, if the code leaves in a different repository than
                the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based
                system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier
                allowed by git.
            kwargs (additional keyword arguments, *optional*):
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
                `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
                automatically loaded:

                    - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
                      underlying model's `__init__` method (we assume all relevant updates to the configuration have
                      already been done)
                    - If a configuration is not provided, `kwargs` will be first passed to the configuration class
                      initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
                      corresponds to a configuration attribute will be used to override said attribute with the
                      supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
                      will be passed to the underlying model's `__init__` function.

        Examples:

        ```python
        >>> from transformers import AutoConfig, BaseAutoModelClass

        >>> # Download model and configuration from huggingface.co and cache.
        >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")

        >>> # Update configuration during loading
        >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
        >>> model.config.output_attentions
        True

        >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
        >>> config = AutoConfig.from_pretrained("./tf_model/shortcut_placeholder_tf_model_config.json")
        >>> model = BaseAutoModelClass.from_pretrained(
        ...     "./tf_model/shortcut_placeholder_tf_checkpoint.ckpt.index", from_tf=True, config=config
        ... )
        ```
a  
        Instantiate one of the model classes of the library from a pretrained model.

        The model class to instantiate is selected based on the `model_type` property of the config object (either
        passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
        falling back to using pattern matching on `pretrained_model_name_or_path`:

        List options

        Args:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                Can be either:

                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
                    - A path to a *directory* containing model weights saved using
                      [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
                    - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this
                      case, `from_pt` should be set to `True` and a configuration object should be provided as `config`
                      argument. This loading path is slower than converting the PyTorch model in a TensorFlow model
                      using the provided conversion scripts and loading the TensorFlow model afterwards.
            model_args (additional positional arguments, *optional*):
                Will be passed along to the underlying model `__init__()` method.
            config ([`PretrainedConfig`], *optional*):
                Configuration for the model to use instead of an automatically loaded configuration. Configuration can
                be automatically loaded when:

                    - The model is a model provided by the library (loaded with the *model id* string of a pretrained
                      model).
                    - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
                      save directory.
                    - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
                      configuration JSON file named *config.json* is found in the directory.
            cache_dir (`str` or `os.PathLike`, *optional*):
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
            from_pt (`bool`, *optional*, defaults to `False`):
                Load the model weights from a PyTorch checkpoint save file (see docstring of
                `pretrained_model_name_or_path` argument).
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download:
                Deprecated and ignored. All downloads are now resumed by default when possible.
                Will be removed in v5 of Transformers.
            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            output_loading_info(`bool`, *optional*, defaults to `False`):
                Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
            local_files_only(`bool`, *optional*, defaults to `False`):
                Whether or not to only look at local files (e.g., not try downloading the model).
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
                identifier allowed by git.
            trust_remote_code (`bool`, *optional*, defaults to `False`):
                Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
                should only be set to `True` for repositories you trust and in which you have read the code, as it will
                execute code present on the Hub on your local machine.
            code_revision (`str`, *optional*, defaults to `"main"`):
                The specific revision to use for the code on the Hub, if the code leaves in a different repository than
                the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based
                system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier
                allowed by git.
            kwargs (additional keyword arguments, *optional*):
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
                `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
                automatically loaded:

                    - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
                      underlying model's `__init__` method (we assume all relevant updates to the configuration have
                      already been done)
                    - If a configuration is not provided, `kwargs` will be first passed to the configuration class
                      initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
                      corresponds to a configuration attribute will be used to override said attribute with the
                      supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
                      will be passed to the underlying model's `__init__` function.

        Examples:

        ```python
        >>> from transformers import AutoConfig, BaseAutoModelClass

        >>> # Download model and configuration from huggingface.co and cache.
        >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")

        >>> # Update configuration during loading
        >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
        >>> model.config.output_attentions
        True

        >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
        >>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json")
        >>> model = BaseAutoModelClass.from_pretrained(
        ...     "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config
        ... )
        ```
c                 C   s   |t |  }t|ttfs|S dd |D }t| dg }|D ],}||v r*||   S d| |v r:|d|    S d| |v rJ|d|    S q|d S )Nc                 S   s   i | ]}|j |qS  __name__).0modelr   r   T/var/www/auris/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py
<dictcomp>  s    z$_get_model_class.<locals>.<dictcomp>architecturesTFFlaxr   )type
isinstancelisttuplegetattr)configmodel_mappingZsupported_modelsZname_to_modelr   archr   r   r   _get_model_class  s   r'   c                   @   sT   e Zd ZdZdd Zedd Zededefdd	Zed
d Z	edddZ
dS )_BaseAutoModelClassNc                 O   s&   t | jj d| jj d| jj d)Nz+ is designed to be instantiated using the `z5.from_pretrained(pretrained_model_name_or_path)` or `z.from_config(config)` methods.)EnvironmentError	__class__r   )selfargskwargsr   r   r   __init__  s   
z_BaseAutoModelClass.__init__c           
      K   s&  | dd }t|do| j|jv }t|| j v }t||j||}|r`|r`|j| j }d|v r8|	d\}}n|j
}t||fi |}| j|j|dd | dd }	t|}|j|fi |S t|| j v rxt|| j}|j|fi |S td|j d| j d	d
dd | j D  d)Ntrust_remote_codeauto_mapz--Texist_okcode_revision!Unrecognized configuration class  for this kind of AutoModel: .
Model type should be one of , c                 s       | ]}|j V  qd S Nr   r   cr   r   r   	<genexpr>      z2_BaseAutoModelClass.from_config.<locals>.<genexpr>.)pophasattrr   r0   r   _model_mappingkeysr   Z_name_or_pathsplitZname_or_pathr   registerr*   $add_generation_mixin_to_remote_modelZ_from_configr'   
ValueErrorjoin)
clsr$   r-   r/   has_remote_codehas_local_code	class_refZrepo_idmodel_class_r   r   r   from_config  s0   
z_BaseAutoModelClass.from_configr$   returnc                 C   s   |S )z`Additional autoclass-specific config post-loading manipulation. May be overridden in subclasses.r   )rH   r$   r   r   r   _prepare_config_for_auto_class  s   z2_BaseAutoModelClass._prepare_config_for_auto_classc                    sn    dd } dd }d d< g d} fdd|D }  dd }  d	d }	  d
d }
| dd }| dd }|d urOtdt |d urMtd|}|d urW||d< |	d u ryt|tsst|t	fdddd|}t
||	}	nt|d	d }	t r|
d u ri }
|d ur||
d< t|fd	|	i|
}|d urt|ddd}t|}||
d< |d }W d    n1 sw   Y  t|tst } dd dkrԈ  d} dd d ur  d}tj|fd||	d| \} |dd dkrd d< |dd d ur|d  d< t|do| j|jv }t|| j v }t||||}| d< |
 d
< |rm|rm|j| j }t||fd|i| }| dd }| j|j|dd t|}|j|g|R d|i| S t|| j v rt || j}|j!|j"dd kr|# }|j|g|R d|i| S td|j d| j dd $d!d" | j D  d#)$Nr$   r/   TZ
_from_auto)		cache_dirZforce_downloadZlocal_files_onlyproxiesZresume_downloadrevisionZ	subfolderuse_auth_tokentokenc                    s    i | ]}| v r|  |qS r   )r?   )r   namer-   r   r   r     s     z7_BaseAutoModelClass.from_pretrained.<locals>.<dictcomp>r3   _commit_hashadapter_kwargsrU   rT   zrThe `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.zV`token` and `use_auth_token` are both specified. Please set only the argument `token`.F)Z _raise_exceptions_for_gated_repoZ%_raise_exceptions_for_missing_entriesZ'_raise_exceptions_for_connection_errorsrzutf-8)encodingZ_adapter_model_pathZbase_model_name_or_pathZtorch_dtypeautoZquantization_config)Zreturn_unused_kwargsr3   rX   r0   r1   Ztext_configr4   r5   r6   r7   c                 s   r8   r9   r   r:   r   r   r   r<   @  r=   z6_BaseAutoModelClass.from_pretrained.<locals>.<genexpr>r>   )%r?   getwarningswarnFutureWarningrF   r    r   r   r   r
   r#   r   r   openjsonloadcopydeepcopyr   from_pretrainedr@   r   r0   r   rA   rB   r   r   rD   r*   rE   r'   config_classZsub_configsZget_text_configrG   )rH   pretrained_model_name_or_path
model_argsr-   r$   r/   Zhub_kwargs_namesZ
hub_kwargsr3   Zcommit_hashrY   rU   rT   Zresolved_config_fileZmaybe_adapter_pathfZadapter_configZkwargs_origrM   rI   rJ   rK   rL   r   rW   r   rf     s   







z#_BaseAutoModelClass.from_pretrainedFc                 C   sF   t |dr|jj|jkrtd|j d| d| jj|||d dS )a  
        Register a new model for this class.

        Args:
            config_class ([`PretrainedConfig`]):
                The configuration corresponding to the model to register.
            model_class ([`PreTrainedModel`]):
                The model to register.
        rg   zThe model class you are passing has a `config_class` attribute that is not consistent with the config class you passed (model has z and you passed z!. Fix one of those so they match!r1   N)r@   rg   r   rF   rA   rD   )rH   rg   rL   r2   r   r   r   rD   C  s   z_BaseAutoModelClass.registerF)r   
__module____qualname__rA   r.   classmethodrN   r   rP   rf   rD   r   r   r   r   r(     s    

~r(   c                       s4   e Zd ZdZe fddZe fddZ  ZS )_BaseAutoBackboneClassNc           
         s   t | ddg ddlm} |d| }|dd d ur td|dd	r*td
|d|j}|d|j}|d|j}|d|j	}	||||||	d}t
 j|fi |S )NZvisionZtimmr   )TimmBackboneConfigr$   Zout_featuresz0Cannot specify `out_features` for timm backbonesZoutput_loading_infoFz@Cannot specify `output_loading_info=True` when loading from timmnum_channelsfeatures_onlyuse_pretrained_backboneout_indices)Zbackbonerq   rr   rs   rt   )r   Zmodels.timm_backbonerp   r?   r]   rF   rq   rr   rs   rt   superrN   )
rH   rh   ri   r-   rp   r$   rq   rr   rs   rt   r*   r   r   #_load_timm_backbone_from_pretrained[  s&   z:_BaseAutoBackboneClass._load_timm_backbone_from_pretrainedc                    sB   | dd}|r| j|g|R i |S t j|g|R i |S )Nuse_timm_backboneF)r?   rw   ru   rf   )rH   rh   ri   r-   rx   rv   r   r   rf   u  s   z&_BaseAutoBackboneClass.from_pretrained)r   rl   rm   rA   rn   rw   rf   __classcell__r   r   rv   r   ro   W  s    ro    c                 C   s,   t |dkr| dd| dS | ddS )Nr   z(one of the model classes of the library z0one of the model classes of the library (with a z head) z-one of the base model classes of the library )lenreplace)	docstringhead_docr   r   r   insert_head_doc~  s   
r   google-bert/bert-base-casedc                 C   s  | j }| j}tt|d}|d|| _ttj}tt	|d}|d|}|d|}||_t
|j dd|}t|| _|drCt}n
|drKt}nt}ttj}	t||d}|d|}|d|}|dd	 d
d }
|d|
}||	_t
|j |	}	t|	| _| S )N)r~   ZBaseAutoModelClassZcheckpoint_placeholderF)Zuse_model_typesr   r   /-r   Zshortcut_placeholder)rA   r   r   CLASS_DOCSTRINGr|   __doc__r	   r(   rN   FROM_CONFIG_DOCSTRINGr   rn   
startswithFROM_PRETRAINED_TF_DOCSTRINGFROM_PRETRAINED_FLAX_DOCSTRINGFROM_PRETRAINED_TORCH_DOCSTRINGrf   rC   )rH   Zcheckpoint_for_exampler~   r%   rV   Zclass_docstringrN   Zfrom_config_docstringZfrom_pretrained_docstringrf   Zshortcutr   r   r   auto_class_update  s4   





r   c                 C   s<   g }|   D ]}t|ttfr|t|7 }q|| q|S r9   )valuesr    r!   r"   append)r%   resultr   r   r   r   
get_values  s   r   c                    s   |d u rd S t |trt fdd|D S t |r t |S td} |krEzt||W S  tyD   td| d  d| dw td| d| d)	Nc                 3   s    | ]}t  |V  qd S r9   )getattribute_from_module)r   amoduler   r   r<     s    z+getattribute_from_module.<locals>.<genexpr>ZtransformerszCould not find z neither in z nor in !z in )r    r"   r@   r#   	importlibimport_moduler   rF   )r   attrZtransformers_moduler   r   r   r     s   



r   c                 C   s   dt | jvr	| S dt | jv r| S t| dodt t| dv}t| do-dt t| dv}|s2|r@t| j| tfi | j}|S | S )a  
    Adds `GenerationMixin` to the inheritance of `model_class`, if `model_class` is a PyTorch model.

    This function is used for backwards compatibility purposes: in v4.45, we've started a deprecation cycle to make
    `PreTrainedModel` stop inheriting from `GenerationMixin`. Without this function, older models dynamically loaded
    from the Hub may not have the `generate` method after we remove the inheritance.
    ztorch.nn.modules.module.Moduler   generateZprepare_inputs_for_generation)	str__mro__	__bases__r@   r#   r   r   r   __dict__)rL   Zhas_custom_generate_in_classZhas_custom_prepare_inputsZ!model_class_with_generation_mixinr   r   r   rE     s    	rE   c                   @   sr   e Zd ZdZdd Zdd Zdd Zdd	 Zd
d Zdd Z	dd Z
dd Zdd Zdd Zdd ZdddZdS )_LazyAutoMappinga  
    " A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed.

    Args:
        - config_mapping: The map model type to config class
        - model_mapping: The map model type to model (or tokenizer) class
    c                 C   s8   || _ dd | D | _|| _| | j_i | _i | _d S )Nc                 S   s   i | ]\}}||qS r   r   r   kvr   r   r   r     s    z-_LazyAutoMapping.__init__.<locals>.<dictcomp>)_config_mappingitems_reverse_config_mappingrA   _extra_content_modules)r+   Zconfig_mappingr%   r   r   r   r.     s   
z_LazyAutoMapping.__init__c                 C   s,   t | j | j }t|t| j S r9   )setr   rB   intersectionrA   r{   r   )r+   Zcommon_keysr   r   r   __len__  s   z_LazyAutoMapping.__len__c                    s    | j v r
| j   S | j j }|| jv r | j| }| ||S  fdd| j D }|D ]}|| jv rB| j| }| ||  S q.t )Nc                    s   g | ]\}}| j kr|qS r   r   r   keyr   r   
<listcomp>	  s    z0_LazyAutoMapping.__getitem__.<locals>.<listcomp>)r   r   r   rA   _load_attr_from_moduler   r   KeyError)r+   r   
model_typeZ
model_nameZmodel_typesmtyper   r   r   __getitem__   s   





z_LazyAutoMapping.__getitem__c                 C   s:   t |}|| jvrtd| d| j|< t| j| |S )Nr>   ztransformers.models)r   r   r   r   r   )r+   r   r   module_namer   r   r   r     s   
z'_LazyAutoMapping._load_attr_from_modulec                    *    fdd j  D }|t j  S )Nc                    *   g | ]\}}| j  v r ||qS r   )rA   rB   r   r   r   rV   r+   r   r   r     
    
z)_LazyAutoMapping.keys.<locals>.<listcomp>)r   r   r!   r   rB   )r+   Zmapping_keysr   r   r   rB        
z_LazyAutoMapping.keysc                 C   s$   z|  |W S  ty   | Y S w r9   )r   r   )r+   r   defaultr   r   r   r]     s
   z_LazyAutoMapping.getc                 C      t |  S r9   )boolrB   r   r   r   r   __bool__$     z_LazyAutoMapping.__bool__c                    r   )Nc                    r   r   )r   rB   r   r   r   r   r   r   (  r   z+_LazyAutoMapping.values.<locals>.<listcomp>)rA   r   r!   r   r   )r+   Zmapping_valuesr   r   r   r   '  r   z_LazyAutoMapping.valuesc                    r   )Nc                    s>   g | ]}| j  v r | j |  | j| fqS r   )r   rB   r   rA   )r   r   r   r   r   r   0  s    z*_LazyAutoMapping.items.<locals>.<listcomp>)rA   rB   r!   r   r   )r+   Zmapping_itemsr   r   r   r   /  s   
z_LazyAutoMapping.itemsc                 C   r   r9   )iterrB   r   r   r   r   __iter__:  r   z_LazyAutoMapping.__iter__c                 C   s>   || j v rdS t|dr|j| jvrdS | j|j }|| jv S )NTr   F)r   r@   r   r   rA   )r+   itemr   r   r   r   __contains__=  s   

z_LazyAutoMapping.__contains__Fc                 C   sR   t |dr"|j| jv r"| j|j }|| j v r"|s"td| d|| j|< dS )z7
        Register a new model in this mapping.
        r   'z*' is already used by a Transformers model.N)r@   r   r   rA   rB   rF   r   )r+   r   valuer2   r   r   r   r   rD   E  s
   z_LazyAutoMapping.registerNrk   )r   rl   rm   r   r.   r   r   r   rB   r]   r   r   r   r   r   rD   r   r   r   r   r     s    r   )rz   )r   rz   ).r   rd   r   rb   r^   collectionsr   Zconfiguration_utilsr   Zdynamic_module_utilsr   r   utilsr   r   r	   r
   r   r   r   r   r   Zconfiguration_autor   r   r   Z
generationr   Z
get_loggerr   loggerr   r   r   r   r   r'   r(   ro   r   r   r   r   rE   r   __all__r   r   r   r   <module>   s:   ,
lcd A
'
# 
f