
    fTh*1                     t    S r SSKJrJrJr  SSKJr  SSKJr  \R                  " \
5      r " S S\5      rS/rg)	z PatchTSMixer model configuration    )ListOptionalUnion   )PretrainedConfig)loggingc            G       T  ^  \ rS rSrSrSrSSS.rSSS	SS
SSSSSSSSS	SSSSSSSSSS/SSSSSSSSSSS4#S\S\S \S!\S"\S\S#\S\S$\S%\	S&\
S'\	S(\
S)\S*\
S+\	S,\\\	\
4      S-\	S.\S/\
S0\S1\	S2\S3\\\\   \4      S4\S5\
S6\
S7\\\      S8\S9\	S:\S;\\   S<\S=\\   S>\	4FU 4S? jjjrS@rU =r$ )APatchTSMixerConfig   a  
This is the configuration class to store the configuration of a [`PatchTSMixerModel`]. It is used to instantiate a
PatchTSMixer model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the PatchTSMixer
[ibm/patchtsmixer-etth1-pretrain](https://huggingface.co/ibm/patchtsmixer-etth1-pretrain) architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.

Args:
    context_length (`int`, *optional*, defaults to 32):
        The context/history length for the input sequence.
    patch_length (`int`, *optional*, defaults to 8):
        The patch length for the input sequence.
    num_input_channels (`int`, *optional*, defaults to 1):
        Number of input variates. For Univariate, set it to 1.
    patch_stride (`int`, *optional*, defaults to 8):
        Determines the overlap between two consecutive patches. Set it to patch_length (or greater), if we want
        non-overlapping patches.
    num_parallel_samples (`int`, *optional*, defaults to 100):
        The number of samples to generate in parallel for probabilistic forecast.
    d_model (`int`, *optional*, defaults to 8):
        Hidden dimension of the model. Recommended to set it as a multiple of patch_length (i.e. 2-5X of
        patch_length). Larger value indicates more complex model.
    expansion_factor (`int`, *optional*, defaults to 2):
        Expansion factor to use inside MLP. Recommended range is 2-5. Larger value indicates more complex model.
    num_layers (`int`, *optional*, defaults to 3):
        Number of layers to use. Recommended range is 3-15. Larger value indicates more complex model.
    dropout (`float`, *optional*, defaults to 0.2):
        The dropout probability the `PatchTSMixer` backbone. Recommended range is 0.2-0.7
    mode (`str`, *optional*, defaults to `"common_channel"`):
        Mixer Mode. Determines how to process the channels. Allowed values: "common_channel", "mix_channel". In
        "common_channel" mode, we follow Channel-independent modelling with no explicit channel-mixing. Channel
        mixing happens in an implicit manner via shared weights across channels. (preferred first approach) In
        "mix_channel" mode, we follow explicit channel-mixing in addition to patch and feature mixer. (preferred
        approach when channel correlations are very important to model)
    gated_attn (`bool`, *optional*, defaults to `True`):
        Enable Gated Attention.
    norm_mlp (`str`, *optional*, defaults to `"LayerNorm"`):
        Normalization layer (BatchNorm or LayerNorm).
    self_attn (`bool`, *optional*, defaults to `False`):
        Enable Tiny self attention across patches. This can be enabled when the output of Vanilla PatchTSMixer with
        gated attention is not satisfactory. Enabling this leads to explicit pair-wise attention and modelling
        across patches.
    self_attn_heads (`int`, *optional*, defaults to 1):
        Number of self-attention heads. Works only when `self_attn` is set to `True`.
    use_positional_encoding (`bool`, *optional*, defaults to `False`):
        Enable the use of positional embedding for the tiny self-attention layers. Works only when `self_attn` is
        set to `True`.
    positional_encoding_type (`str`, *optional*, defaults to `"sincos"`):
        Positional encodings. Options `"random"` and `"sincos"` are supported. Works only when
        `use_positional_encoding` is set to `True`
    scaling (`string` or `bool`, *optional*, defaults to `"std"`):
        Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
        scaler is set to "mean".
    loss (`string`, *optional*, defaults to `"mse"`):
        The loss function for the model corresponding to the `distribution_output` head. For parametric
        distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared
        error "mse".
    init_std (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated normal weight initialization distribution.
    post_init (`bool`, *optional*, defaults to `False`):
        Whether to use custom weight initialization from `transformers` library, or the default initialization in
        `PyTorch`. Setting it to `False` performs `PyTorch` weight initialization.
    norm_eps (`float`, *optional*, defaults to 1e-05):
        A value added to the denominator for numerical stability of normalization.
    mask_type (`str`, *optional*, defaults to `"random"`):
        Type of masking to use for Masked Pretraining mode. Allowed values are "random", "forecast". In Random
        masking, points are masked randomly. In Forecast masking, points are masked towards the end.
    random_mask_ratio (`float`, *optional*, defaults to 0.5):
        Masking ratio to use when `mask_type` is `random`. Higher value indicates more masking.
    num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`):
        Number of patches to be masked at the end of each batch sample. If it is an integer, all the samples in the
        batch will have the same number of masked patches. If it is a list, samples in the batch will be randomly
        masked by numbers defined in the list. This argument is only used for forecast pretraining.
    mask_value (`float`, *optional*, defaults to `0.0`):
        Mask value to use.
    masked_loss (`bool`, *optional*, defaults to `True`):
        Whether to compute pretraining loss only at the masked portions, or on the entire output.
    channel_consistent_masking (`bool`, *optional*, defaults to `True`):
        When true, masking will be same across all channels of a timeseries. Otherwise, masking positions will vary
        across channels.
    unmasked_channel_indices (`list`, *optional*):
        Channels that are not masked during pretraining.
    head_dropout (`float`, *optional*, defaults to 0.2):
        The dropout probability the `PatchTSMixer` head.
    distribution_output (`string`, *optional*, defaults to `"student_t"`):
        The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or
        "negative_binomial".
    prediction_length (`int`, *optional*, defaults to 16):
        Number of time steps to forecast for a forecasting task. Also known as the Forecast Horizon.
    prediction_channel_indices (`list`, *optional*):
        List of channel indices to forecast. If None, forecast all channels. Target data is expected to have all
        channels and we explicitly filter the channels in prediction and target before loss computation.
    num_targets (`int`, *optional*, defaults to 3):
        Number of targets (dimensionality of the regressed variable) for a regression task.
    output_range (`list`, *optional*):
        Output range to restrict for the regression task. Defaults to None.
    head_aggregation (`str`, *optional*, defaults to `"max_pool"`):
        Aggregation mode to enable for classification or regression task. Allowed values are `None`, "use_last",
        "max_pool", "avg_pool".

Example:

```python
>>> from transformers import PatchTSMixerConfig, PatchTSMixerModel

>>> # Initializing a default PatchTSMixer configuration
>>> configuration = PatchTSMixerConfig()

>>> # Randomly initializing a model (with random weights) from the configuration
>>> model = PatchTSMixerModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```patchtsmixerd_model
num_layers)hidden_sizenum_hidden_layers          d      r   g?common_channelT	LayerNormFsincosstdmseg{Gz?gh㈵>randomg      ?r   N	student_t   max_poolcontext_lengthpatch_lengthnum_input_channelspatch_stridenum_parallel_samplesexpansion_factordropoutmode
gated_attnnorm_mlp	self_attnself_attn_headsuse_positional_encodingpositional_encoding_typescalinglossinit_std	post_initnorm_eps	mask_typerandom_mask_rationum_forecast_mask_patches
mask_valuemasked_losschannel_consistent_maskingunmasked_channel_indiceshead_dropoutdistribution_outputprediction_lengthprediction_channel_indicesnum_targetsoutput_rangehead_aggregationc$                 2  > X0l         Xl        X l        X@l        X`l        Xpl        Xl        Xl        Xl        Xl	        Xl
        UU l        UU l        [        X5      U-
  U-  S-   U l        UU l        UU l        UU l        UU l        UU l        UU l        SU l        Xl        UU l        UU l        U U l        U!U l        U"U l        U#U l        Xl        Xl        UU l        UU l         UU l!        UU l"        XPl#        UU l$        UU l%        [L        T%U ]  " S0 U$D6  g )Nr   T )(r!   r   r    r"   r   r$   r   r%   r&   r'   r(   r-   r9   maxnum_patchesr2   r3   r4   r5   r7   r6   
patch_lastr+   r,   r;   r<   r=   r>   r?   r)   r*   r/   r0   r:   r.   r#   r8   r1   super__init__)&selfr   r    r!   r"   r#   r   r$   r   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   kwargs	__class__s&                                        s/var/www/auris/envauris/lib/python3.13/site-packages/transformers/models/patchtsmixer/configuration_patchtsmixer.pyrF   PatchTSMixerConfig.__init__   s'   Z #5,(( 0$	$ (=LQ]]`aa"!2)B&$*D'&'>$(@%!2*D'&( 0". "#6 	$8!(@% "6"    )%r7   r   r   r:   r%   r$   r'   r?   r9   r/   r.   r2   r5   r6   r&   r1   r(   r4   r!   r   r#   rC   r=   r>   rD   r    r"   r,   r0   r<   r;   r3   r-   r)   r*   r8   r+   )__name__
__module____qualname____firstlineno____doc__
model_typeattribute_mapintfloatstrboolr   r   r   listrF   __static_attributes____classcell__)rI   s   @rJ   r
   r
      sZ   sj  J )M !"#$' !$# (-(0.3!#&FGS +/8<!#.!#59'+ *UR# R# 	R#
  R# R# "R# R# R# R# R# R# R# R#  !R#" #R#$ "&%R#& #&'R#( %T	*+)R#* +R#, -R#. /R#0 1R#4 5R#6 !7R#8 $,E$s)S.,A#B9R#: ;R#< =R#> %)?R#@ #+49"5AR#D ER#F !GR#J KR#L %-TNMR#P QR#R tnSR#T UR# R#rL   r
   N)rQ   typingr   r   r   configuration_utilsr   utilsr   
get_loggerrM   loggerr
   __all__rA   rL   rJ   <module>ra      sB    ' ( ( 3  
		H	%N#) N#b  
 rL   