
    fThl                     l    S r SSKJr  SSKJr  SSKJr  \R                  " \5      r	 " S S\5      r
S/rg)	zTimesFM model configuration    )List   )PretrainedConfig)loggingc            %          ^  \ rS rSrSrSr/ rSrSSSSS	S
S
SSSS/ SQSSSSSS4S\S\S\S\S\S\S\S\S\S\	S\	S\
\	   S \	S!\	S"\S#\	S$\S%\4$U 4S& jjjrS'rU =r$ )(TimesFmConfig   ai  
This is the configuration class to store the configuration of a [`TimesFmModelForPrediction`] or a [`TFTimesFmModel`]. It is used to
instantiate a TimesFM model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the TimesFM
[google/timesfm-2.0-500m-pytorch](https://huggingface.co/google/timesfm-2.0-500m-pytorch) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.

Arguments:
    patch_length (`int`, *optional*, defaults to 32):
        The length of one patch in the input sequence.
    context_length (`int`, *optional*, defaults to 512):
        The length of the input context.
    horizon_length (`int`, *optional*, defaults to 128):
        The length of the prediction horizon.
    freq_size (`int`, *optional*, defaults to 3):
        The number of frequency embeddings.
    num_hidden_layers (`int`, *optional*, defaults to 50):
        Number of Transformer layers.
    hidden_size (`int`, *optional*, defaults to 1280):
        Size of the hidden layers in the feed-forward networks.
    intermediate_size (`int`, *optional*, defaults to 1280):
        Dimension of the MLP representations.
    head_dim (`int`, *optional*, defaults to 80):
        Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will
        be defined as `num_attention_heads * head_dim`.
    num_attention_heads (`int`, *optional*, defaults to 16):
        Number of attention heads for each attention layer in the Transformer encoder.
    tolerance (`float`, *optional*, defaults to 1e-06):
        The tolerance for the quantile loss.
    rms_norm_eps (`float`, *optional*, defaults to 1e-06):
        The epsilon used by the RMS normalization layers.
    quantiles (`List[float]`, *optional*, defaults to `[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]`):
        The quantiles to predict.
    pad_val (`float`, *optional*, defaults to 1123581321.0):
        The value used to pad the predictions.
    attention_dropout (`float`, *optional*, defaults to 0.0):
        The dropout probability for the attention scores.
    use_positional_embedding (`bool`, *optional*, defaults to `False`):
        Whether to add positional embeddings.
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    min_timescale (`int`, *optional*, defaults to 1):
        The start of the geometric positional index. Determines the periodicity of
        the added signal.
    max_timescale (`int`, *optional*, defaults to 10000):
        The end of the geometric positional index. Determines the frequency of the
        added signal.
timesfmF    i      r   2   i   P      gư>)	g?g?g333333?g?g      ?g333333?gffffff?g?g?g  @bAg        g{Gz?   i'  patch_lengthcontext_lengthhorizon_length	freq_sizenum_hidden_layershidden_sizeintermediate_sizehead_dimnum_attention_heads	tolerancerms_norm_eps	quantilespad_valattention_dropoutuse_positional_embeddinginitializer_rangemin_timescalemax_timescalec                   > Xl         X l        X0l        Xl        Xl        X@l        X`l        Xpl        Xl        XPl	        Xl
        Xl        Xl        Xl        Xl        UU l        UU l        UU l        [$        TU ]L  " SSU R(                  0UD6  g )Nis_encoder_decoder )r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   super__init__r$   )selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   kwargs	__class__s                       i/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/timesfm/configuration_timesfm.pyr'   TimesFmConfig.__init__R   s    , ),,""&!2 !2#6 "(!2(@%!2** 	
#66	
	
    )r   r   r   r   r   r   r    r   r"   r!   r   r   r   r   r   r   r   r   )__name__
__module____qualname____firstlineno____doc__
model_typekeys_to_ignore_at_inferencer$   intfloatr   boolr'   __static_attributes____classcell__)r*   s   @r+   r   r      s   1f J"$ !!!#!%#%"!N%#&).#'#',
,
 ,
 	,

 ,
 ,
 ,
 ,
 ,
 !,
 ,
 ,
 ;,
 ,
 !,
  #'!,
" !#,
$ %,
& ',
 ,
r-   r   N)r2   typingr   configuration_utilsr   utilsr   
get_loggerr.   loggerr   __all__r%   r-   r+   <module>r@      s?    "  3  
		H	%d
$ d
N 
r-   