o
    Zhr                    @   s  d Z ddlZddlZddlZddlZddlZddlZddlZddl	Z	ddl
Z
ddlZddlZddlZddlZddlZddlZddlZddlmZ ddlmZ ddlmZ ddlmZmZmZmZmZ ddlmZ ddlm Z! ddl"Z#ddl$Z$ddl%m&Z' dd	l(m)Z)m*Z*m+Z+ dd
l,m-Z- ddl$m.Z. ddl/m0Z0m1Z1m2Z2m3Z3m4Z4 ddl5m6Z6 ddl7m8Z8 ddl9m:Z:m;Z;m<Z< ddl=m>Z>m?Z? ddl@mAZA ddlBmCZC ddlDmEZEmFZF ddlGmHZH ddlImJZJmKZKmLZL ddlMmNZN ddlOmPZP ddlQmRZRmSZSmTZT ddlUmVZVmWZW ddlXmYZYmZZZ ddl[m\Z\ ddl]m^Z^m_Z_ ddl`maZa ddlbmcZcmdZdmeZemfZfmgZgmhZhmiZimjZj ddlkmlZlmmZmmnZnmoZompZpmqZqmrZrmsZsmtZtmuZumvZvmwZwmxZxmyZymzZzm{Z{m|Z|m}Z}m~Z~mZ dd lmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZ dd!lmZmZmZ dd"l mZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZ dd#lmZ dd$lmZ dd%lmZ edgZegZe rdd&lmZ eZe rdd'lmZ e r#ddlZe r\ddlm  mZ ddlm  mZ ddlmZ ddlm6Z e-eޡe-ekZer[ddlm&  mZ nd(Ze rddlm  m$Z ddlm6Z e-ee-d)kZdd*lkmZmZmZmZ nd(Ze rddlZe rdd+lmZ e rdd,lmZmZ ddlm6Z dd-lmZ dd.lmZmZmZmZmZmZmZ e3gZ e-ee-d/krdd0lmZ e-ee-d1krdd2lmZ e eg7 Z eL rdd3lmZ ed4rdd5lmZ d6d7 Zd8d9 Zd:d; Zer2ddl	Z	e r2ddlZeÐ
eZd<Zd=Zd>Zd?Zd@ZdAZdBZedCdDG dEdF dFZdS )Guc   
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
    N)Mappingpartial)Path)TYPE_CHECKINGAnyCallableOptionalUnion   )#get_reporting_integration_callbacks)	ModelCardcreate_repoupload_folder)version)nn)
DataLoaderDatasetIterableDatasetRandomSamplerSequentialSampler)__version__)PretrainedConfig)DataCollatorDataCollatorWithPaddingdefault_data_collator)DebugOptionDebugUnderflowOverflow)SequenceFeatureExtractor)FeatureExtractionMixin)"ALL_HYPERPARAMETER_SEARCH_BACKENDSdefault_hp_search_backend)BaseImageProcessor)deepspeed_initdeepspeed_load_checkpointis_deepspeed_available)tpu_spmd_dataloader)TrainingSummary)PreTrainedModelload_sharded_checkpointunwrap_model)!MODEL_FOR_CAUSAL_LM_MAPPING_NAMESMODEL_MAPPING_NAMES)	Adafactorget_scheduler)ProcessorMixin)ALL_LAYERNORM_LAYERS"is_torch_greater_or_equal_than_2_3)PreTrainedTokenizerBase)CallbackHandlerDefaultFlowCallbackExportableStatePrinterCallbackProgressCallbackTrainerCallbackTrainerControlTrainerState)DistributedTensorGathererEvalLoopContainerIterableDatasetShardLabelSmootherLayerWiseDummyOptimizerLengthGroupedSamplerSequentialDistributedSamplerdistributed_broadcast_scalarsdistributed_concatfind_batch_sizeget_model_param_countget_module_class_from_nameget_parameter_namesnested_concatnested_detachnested_numpifynested_xla_mesh_reducereissue_pt_warningsremove_dummy_checkpointset_rng_state_for_device)PREFIX_CHECKPOINT_DIRBestRunEvalLoopOutputEvalPredictionHPSearchBackendHubStrategyPredictionOutputRemoveColumnsCollatorSaveStrategyTrainerMemoryTrackerTrainOutputcheck_target_module_existsdefault_compute_objectivedenumpify_detensorizeenable_full_determinismfind_executable_batch_sizeget_last_checkpoint
has_lengthneftune_post_forward_hooknumber_of_argumentsseed_workerset_seedspeed_metrics)OptimizerNamesParallelModeTrainingArguments))ADAPTER_CONFIG_NAMEADAPTER_SAFE_WEIGHTS_NAMEADAPTER_WEIGHTS_NAMECONFIG_NAMESAFE_WEIGHTS_INDEX_NAMESAFE_WEIGHTS_NAMEWEIGHTS_INDEX_NAMEWEIGHTS_NAMEXLA_FSDPV2_MIN_VERSIONPushInProgressPushToHubMixincan_return_losscheck_torch_load_is_safefind_labelsis_accelerate_availableis_apex_availableis_apollo_torch_availableis_bitsandbytes_availableis_datasets_availableis_galore_torch_availableis_grokadamw_availableis_in_notebookis_ipex_availableis_liger_kernel_availableis_lomo_availableis_peft_availableis_safetensors_availableis_sagemaker_dp_enabledis_sagemaker_mp_enabledis_schedulefree_availableis_torch_hpu_availableis_torch_mlu_availableis_torch_mps_availableis_torch_musa_availableis_torch_neuroncore_availableis_torch_npu_availableis_torch_xla_availableis_torch_xpu_availableis_torchao_availablelogging	strtobool)deprecate_kwarg)requires)QuantizationMethod)NotebookProgressCallback)ampFz1.10)smp_forward_backwardsmp_forward_only
smp_gathersmp_nested_concat)	PeftModel)Acceleratorskip_first_batches)AcceleratorState)AutocastKwargsDistributedDataParallelKwargsDistributedTypeload_fsdp_modelload_fsdp_optimizersave_fsdp_modelsave_fsdp_optimizer1.3.0)TorchTensorParallelPluginz0.23.0)SeedableRandomSampler)DeepSpeedSchedulerWrapper0.28.0)DataLoaderConfigurationc                 C   sP   t  r&tf}ttjdtdkr!ddlm} g ||R }t| |S dS )Npeftz0.7.0r   )PeftMixedModelF)	r   r   r   parse	importlibmetadatar   r   
isinstance)modelZclasses_to_checkr    r   C/var/www/auris/lib/python3.10/site-packages/transformers/trainer.py_is_peft_model  s   
r   c                   C   s&   t  rdtttjv rddiS i S )NZadapter_onlyT)rw   listinspect	signaturer   
parametersr   r   r   r   _get_fsdp_ckpt_kwargs  s   r   c                  C   sx   t tjjt djk rt S t tjt dkrtjntj	} | j
jtjtjg}|tttjg7 }tj|S )Nz2.6z2.0.0)r   r   torchr   release
contextlibnullcontextnpZ_corecoreZ
multiarray_reconstructndarraydtypetypeZuint32Zserializationsafe_globals)Znp_coreZ	allowlistr   r   r   r     s   "r   ztraining_args.binztrainer_state.jsonzoptimizer.ptz	scaler.ptzoptimizer.binzscheduler.ptZpytorch_model_fsdp)r   
accelerate)backendsc                   @   s	  e Zd ZdZddlmZmZmZmZm	Z	 e
ddddd																					
				d deeejd	f dedee deeeedf  deeeeeef df  deeeeeef  deeg ef  dee deeegef  deee  deeej j! eej j"j# f deee$ej j! eee%f f  deeej&ej&gej&f  fddZ'e(dee fddZ)e)j*dddZ)dd Z+d d! Z,d"d# Z-d$d% Z.d&d' Z/d(d) Z0d*d+ Z1dd,dd-ee fd.d/Z2		dded-ee defd0d1Z3ddee deej4j5j6 fd2d3Z7			4		dd,ed-ed5e8d6eeegej4j5j6f  d7e9d8ee de:fd9d:Z;de:fd;d<Z<dedeej4j5j6 fd=d>Z=ddeeeef  de:fd?d@Z>dAede:fdBdCZ?dDe8fdEdFZ@dee fdGdHZAdIdJ ZBdKdL ZCdMdN ZDddOeeeejjEjFf  fdPdQZGeH		ddedee dee%e%f fdRdSZIddDe8dTej j!fdUdVZJdWe:de8fdXdYZKeHddZe:d[ee8 de8fd\d]ZLd^ed_eee%f f fd`daZMd^ed_eee%f f dbe8dceeeNf fdddeZOdfefdgdhZPddidjZQddkdlZRd4ejSfdmdnZTdodp ZUddqdrZV						ddseeee9f  d^ed_eee%f d	f dteee  fdudvZW		ddwdxZXdydz ZYdd{d|ZZd}d~ Z[dd Z\dddZ]		dddZ^dd Z_dd Z`dd Zadd Zbdd Zcdd Zddd Zedd Zfdd Zg										ddeed_geeeNf f  deeeeeNf geNf  de8deeee f deedehf  deed_gef  deeieei f fddZjddeeeNf deeN dd	fddZkdeej&e%f deej&e%f fddZldeeeej&e%f f deeeej&e%f f fddZmdd Znd	dee9 fddZo		ddejdeeeej&e%f f dej&fddZpd
ddZqde9fddZrde9fddZsddee de9fddZtddee fddZud
dee fddZvdd Zwd	exd4fdee fddĄZyd
dddƄZz					ǐddeeeeeef f  deee  dedeeeNf fdd˄Z{	̐ddAedeee  dede|fdd΄Z}					ǐddWe:d-edee9 deee  dede~fddфZdddӄZ		ddejdeeeej&e%f f de9deee  deeej& eej& eej& f f
ddՄZdeeeej&e%f f fddׄZddee fddڄZ																		ddee dee deeee d	f dee dee deeee d	f deeee d	f d,eeee d	f deeee d	f fddZdd Zdd Z						ddee de9dee dee def
ddZ					ǐddWe:d-edee9 deee  dede~fddZdd ZdddZdd ZdddZdd Zdd ZdedWe:de8fddZd	S (  TraineruA  
    Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.

    Args:
        model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*):
            The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed.

            <Tip>

            [`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use
            your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers
            models.

            </Tip>

        args ([`TrainingArguments`], *optional*):
            The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the
            `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided.
        data_collator (`DataCollator`, *optional*):
            The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will
            default to [`default_data_collator`] if no `processing_class` is provided, an instance of
            [`DataCollatorWithPadding`] otherwise if the processing_class is a feature extractor or tokenizer.
        train_dataset (Union[`torch.utils.data.Dataset`, `torch.utils.data.IterableDataset`, `datasets.Dataset`], *optional*):
            The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the
            `model.forward()` method are automatically removed.

            Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a
            distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a
            `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will
            manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally
            sets the seed of the RNGs used.
        eval_dataset (Union[`torch.utils.data.Dataset`, Dict[str, `torch.utils.data.Dataset`, `datasets.Dataset`]), *optional*):
             The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the
             `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each
             dataset prepending the dictionary key to the metric name.
        processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*):
            Processing class used to process the data. If provided, will be used to automatically process the inputs
            for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
            reuse the fine-tuned model.
            This supersedes the `tokenizer` argument, which is now deprecated.
        model_init (`Callable[[], PreTrainedModel]`, *optional*):
            A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start
            from a new instance of the model as given by this function.

            The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to
            be able to choose different architectures according to hyper parameters (such as layer count, sizes of
            inner layers, dropout probabilities etc).
        compute_loss_func (`Callable`, *optional*):
            A function that accepts the raw model outputs, labels, and the number of items in the entire accumulated
            batch (batch_size * gradient_accumulation_steps) and returns the loss. For example, see the default [loss function](https://github.com/huggingface/transformers/blob/052e652d6d53c2b26ffde87e039b723949a53493/src/transformers/trainer.py#L3618) used by [`Trainer`].
        compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*):
            The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return
            a dictionary string to metric values. *Note* When passing TrainingArgs with `batch_eval_metrics` set to
            `True`, your compute_metrics function must take a boolean `compute_result` argument. This will be triggered
            after the last eval batch to signal that the function needs to calculate and return the global summary
            statistics rather than accumulating the batch-level statistics
        callbacks (List of [`TrainerCallback`], *optional*):
            A list of callbacks to customize the training loop. Will add those to the list of default callbacks
            detailed in [here](callback).

            If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method.
        optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*, defaults to `(None, None)`):
            A tuple containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your
            model and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`.
        optimizer_cls_and_kwargs (`Tuple[Type[torch.optim.Optimizer], Dict[str, Any]]`, *optional*):
            A tuple containing the optimizer class and keyword arguments to use.
            Overrides `optim` and `optim_args` in `args`. Incompatible with the `optimizers` argument.

            Unlike `optimizers`, this argument avoids the need to place model parameters on the correct devices before initializing the Trainer.
        preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*):
            A function that preprocess the logits right before caching them at each evaluation step. Must take two
            tensors, the logits and the labels, and return the logits once processed as desired. The modifications made
            by this function will be reflected in the predictions received by `compute_metrics`.

            Note that the labels (second parameter) will be `None` if the dataset does not have them.

    Important attributes:

        - **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`]
          subclass.
        - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
          original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`,
          the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner
          model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`.
        - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
          data parallelism, this means some of the model layers are split on different GPUs).
        - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
          to `False` if model parallel or deepspeed is used, or if the default
          `TrainingArguments.place_model_on_device` is overridden to return `False` .
        - **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while
          in `train`)

    r   )_get_learning_ratelog_metricsmetrics_formatsave_metrics
save_state	tokenizerprocessing_classz5.0.0T)new_namer   Zraise_if_both_namesNNNr   argsdata_collatortrain_datasetzdatasets.Dataseteval_dataset
model_initcompute_loss_funccompute_metrics	callbacks
optimizersoptimizer_cls_and_kwargspreprocess_logits_for_metricsc                  C   s	  |d u rd}t d| d t|d}|jr)|	d ur)dt|	j vr)td|j	d ur@|j	dkr@|d u r@td|j	 d	|j
tjksI|jrR|jd u rRtd
|| _|| _| jjrbt| jjnt| jj d | _d | _d| _|| _|   t| jj| _| j  | }t | |j! |d u r|d ur|| _"| # }nt$d|d urt%&dt' || _"|j(j)t*v rtd|j(j) dt+|ddrt+|ddrd| _,nd| _,t+|dd d urdd t-|j./ D }t0|dkrd| _,nt0|dkr| jj1t21|d k| _,nd| _,| j,rt d | jj3rNt4 rJddl5m6} t7|t8r.||d n t9|drDt7|: t8rD||: d n
t ;d nt<dt+|ddo[t+|dd }t+|d d d uoh|j=j>}t+|d d d uoxt+|j=d!d}|rt9|d"rtd#|rt?|s|std$|r|std%|j=j@jA d&|j=j@jA |jBd' | _Ct0|jDdkr| jErtd(|jBd' s|jFtGjHkrtd)|jI| _I| j,s| jEs|jJs|jKr|jLr| jCs| jMrd| _I|d urt7|tNtOfrtP|ntQ}|d ur|n|| _R|| _S|| _T|| _U| jIr0t+|d*d tVjWks0| X||j1 | j,r8d| j_Y|| _Z|| _| j[\|}t?|sL|j]n|: j]}t|j}t9|d+rb|j^| __nt`d,d- |/ D | __|ja| _a|	| _b|| _c|\| _d| _e|| _f| jfd ur| jdd urt$d.|d ur| jdd us| jed urt$d/tg r| jdd ur| j D ]}|j1} | jdjhD ]}t0|d0 dkr|d0 d j1} nq||krtd1| jCs| jMr| jdd us| jed urt$d2titj| jjk }|
d u r|n||
 }
tl|
| j| jU| jd| je| _m| n| jjortpntq d| _rd | _s| jjtr1| u  | jjvr?twjx| jjydd3 tz| jRsStzt+| jRd4d rStd5|j{dkrd|j|dkrdt d6 |d urxt}|sx|j{dkrxtd7|d urt7|t2j~jjr|jrtd8d | _d| _d| _t r|jrtd9tr|jtjjjkrt ;d:tjjj d;|j d<tjjj  tjjj|_nt9tjjd=rt ;d:tjjj d> |js|jr|jd?kr|j1t21d@kr|jrtstdAndB|_t dC|j dD |js|jr>| jEs>t s>|jdBkr-d| _t2j| _n|jdEkr>t s;t<dFd| _| jjdkrNt| jjdG| _nd | _t | _t|  |  dHd | jmj| jg D dI| _d| _d | _t?| jr| jjd u rt ;dJ| jj(j) dK t| jj(}| jjd u r|n| jj| _t| jj(| _| jm| j| j| j| _|j| _d| _| j  |jBdLd| _| jrtstdMt }ttjtt||dfdNdO | jCo| j | _d S )PNZtmp_trainerz1No `TrainingArguments` passed, using `output_dir=z`.)
output_dircompute_resultzWhen using `batch_eval_metrics`, your `compute_metrics` function must take a `compute_result` boolean argument which will be triggered after the last batch of the eval set to signal that the summary statistics should be returned by the function.noz%You have set `args.eval_strategy` to zx but you didn't pass an `eval_dataset` to `Trainer`. Either set `args.eval_strategy` to `no` or pass an `eval_dataset`. z`args.metric_for_best_model` must be provided when using 'best' save_strategy or if `args.load_best_model_at_end` is set to `True`.Fz<`Trainer` requires either a `model` or `model_init` argumentz`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.zThe model you have picked (a  ) cannot be used as is for training: it only computes hidden states and does not accept any labels. You should choose a model with a head suitable for your task like any of the `AutoModelForXxx` listed at https://huggingface.co/docs/transformers/model_doc/autoZis_parallelizableZmodel_parallelThf_device_mapc                 S   s   g | ]}|d vr|qS ))cpuZdiskr   ).0devicer   r   r   
<listcomp>      z$Trainer.__init__.<locals>.<listcomp>r   r   zYou have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.)_apply_liger_kernel_to_instancer   get_base_modelzRThe model is not an instance of PreTrainedModel. No liger kernels will be applied.zYou have set `use_liger_kernel` to `True` but liger-kernel >= 0.3.0 is not available. Please install it with `pip install liger-kernel`Zis_quantizedZ_hf_peft_config_loadedhf_quantizerZis_qat_trainableZ	_orig_modzYou cannot fine-tune quantized model with `torch.compile()` make sure to pass a non-compiled model when fine-tuning a quantized model with PEFTzYou cannot perform fine-tuning on purely quantized models. Please attach trainable adapters on top of the quantized model to correctly perform fine-tuning. Please see: https://huggingface.co/docs/transformers/peft for more detailsz8The model you are trying to fine-tune is quantized with z but that quantization method do not support training. Please open an issue on GitHub: https://github.com/huggingface/transformers to request the support for training support for xlazZUsing --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags.z.Using fsdp only works in distributed training.quantization_methodaccepts_loss_kwargsc                 s   s    | ]
}|j tjjkV  qd S N)kindr   	ParameterVAR_KEYWORDr   kr   r   r   	<genexpr>  s    
z#Trainer.__init__.<locals>.<genexpr>zSPassing both `optimizers` and `optimizer_cls_and_kwargs` arguments is incompatible.zPassing a `model_init` is incompatible with providing the `optimizers` argument. You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method.paramsa[  The model and the optimizer parameters are not on the same device, which probably means you created an optimizer around your model **before** putting on the device and passing it to the `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and `model.to(xm.xla_device())` is performed before the optimizer creation in your script.zPassing `optimizers` is not allowed if PyTorch FSDP is enabled. You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method.exist_okZcollate_batchzRThe `data_collator` should be a simple callable (function, class with `__call__`).zHmax_steps is given, it will override any value given in num_train_epochszThe train_dataset does not implement __len__, max_steps has to be specified. The number of steps needs to be known in advance for the learning rate scheduler.zTthe `--group_by_length` option is only available for `Dataset`, not `IterableDatasetzOSageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead z(FP16 provided in SM_HP_MP_PARAMETERS is z+, but FP16 provided in trainer argument is z, setting to fp16zJ, but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer.autor   z2Tried to use `fp16` but it is not supported on cpuZcpu_ampzUsing z half precision backendapexzcUsing FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex.)epsilonc                 S      g | ]	}t |tr|qS r   r   r5   r   cbr   r   r   r     
    
)is_local_process_zerois_world_process_zerostateful_callbacksz)No label_names provided for model class `z`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.Zxla_fsdp_v2z*FSDPv2 requires `torch_xla` 2.2 or higher.)fsdptensor)Z
axis_names)loggerinforh   batch_eval_metricsr   r   r   keys
ValueErrorZeval_strategysave_strategyrW   BESTload_best_model_at_endmetric_for_best_modelr   r   full_determinismr]   seedrd   hp_name	deepspeedis_in_trainr   "create_accelerator_and_postprocessrX   Zskip_memory_metrics_memory_trackerstartZget_process_log_levelr   Zset_verbosityZ_setup_devicesr   call_model_initRuntimeErrorwarningswarnFutureWarning	__class____name__r,   getattris_model_parallelsetr   valueslenr   r   Zuse_liger_kernelr   Zliger_kernel.transformersr   r   r(   hasattrr   warningImportErrorr   is_trainabler   quantization_configZquant_methodfsdp_configis_fsdp_xla_enabledr  is_deepspeed_enabledparallel_moderg   DISTRIBUTEDplace_model_on_devicefp16_full_evalbf16_full_evaldo_trainis_fsdp_enabledr2   r   r   r   r   r   r   r   r   BITS_AND_BYTES_move_model_to_deviceZ_n_gpumodel_wrappedacceleratorr*   forwardr   model_accepts_loss_kwargsanyneftune_noise_alphar   r   	optimizerlr_schedulerr   r   param_groupsDEFAULT_CALLBACKSr   Z	report_tor3   callback_handleradd_callbackZdisable_tqdmr6   DEFAULT_PROGRESS_CALLBACKZ_loggers_initializedhub_model_idpush_to_hubinit_hf_reposhould_saveosmakedirsr   callable	max_stepsnum_train_epochsr`   utilsdatar   group_by_length_signature_columnsuse_apexuse_cpu_ampr   Zbf16IS_SAGEMAKER_MP_POST_1_10r   smpstatecfgZhalf_precision_backendr1   bfloat16	amp_dtyperx   Zlabel_smoothing_factorr>   label_smootherr9   controlr:   r   r  r   current_floshp_search_backendlabel_namesrv   rt   Zon_init_endtrain_batch_size_train_batch_size_created_lr_schedulerstop_and_update_metricsgetis_fsdp_xla_v2_enabledIS_XLA_FSDPV2_POST_2_2xrZglobal_runtime_device_countxsZset_global_meshZMeshr   arrayrangeis_fsdp_xla_v1_enabled) selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   	log_levelZdevicesr   Z_is_quantized_and_base_modelZ&_quantization_method_supports_trainingZ%_is_model_quantized_and_qat_trainableZdefault_collatorunwrapped_modelZmodel_forwardZforward_paramsparamZmodel_deviceZparam_groupZoptimizer_deviceZdefault_callbacksZdefault_label_namesZnum_devicesr   r   r   __init__  s$  





	



"


 
 
 
	
$zTrainer.__init__returnc                 C   s   t d | jS )NzUTrainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.r  r#  r   rf  r   r   r   r   2  s   
zTrainer.tokenizerc                 C   s   t d || _d S )NzjTrainer.tokenizer is now deprecated. You should use `Trainer.processing_class = processing_class` instead.rl  )rf  r   r   r   r   r   7  s   
c                 C   sH   | j |}t|r|jj }n| }~| j|_|t}|| _	|S )z
        Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper:
        https://arxiv.org/abs/2310.05914
        )
r4  r*   r   
base_modelr   get_input_embeddingsr8  Zregister_forward_hookra   neftune_hook_handle)rf  r   rh  
embeddingsZhook_handler   r   r   _activate_neftune>  s   
zTrainer._activate_neftunec                 C   sP   t | ds	td| j|}t|r|jj }n| }| j	  |`
~dS )z^
        Deactivates the neftune method. Make sure to call `_activate_neftune` first.
        rp  zNNeftune is not activated make sure to call `trainer._activate_neftune()` firstN)r"  r	  r4  r*   r   rn  r   ro  rp  remover8  )rf  r   rh  rq  r   r   r   _deactivate_neftuneQ  s   


zTrainer._deactivate_neftunec                 C      | j | dS )ag  
        Add a callback to the current list of [`~transformers.TrainerCallback`].

        Args:
           callback (`type` or [`~transformers.TrainerCallback]`):
               A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
               first case, will instantiate a member of that class.
        N)r=  r>  rf  callbackr   r   r   r>  b     	zTrainer.add_callbackc                 C   s   | j |S )aK  
        Remove a callback from the current list of [`~transformers.TrainerCallback`] and returns it.

        If the callback is not found, returns `None` (and no error is raised).

        Args:
           callback (`type` or [`~transformers.TrainerCallback]`):
               A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
               first case, will pop the first member of that class found in the list of callbacks.

        Returns:
            [`~transformers.TrainerCallback`]: The callback removed, if found.
        )r=  pop_callbackrv  r   r   r   ry  m  s   zTrainer.pop_callbackc                 C   ru  )a  
        Remove a callback from the current list of [`~transformers.TrainerCallback`].

        Args:
           callback (`type` or [`~transformers.TrainerCallback]`):
               A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
               first case, will remove the first member of that class found in the list of callbacks.
        N)r=  remove_callbackrv  r   r   r   rz  }  rx  zTrainer.remove_callbackc                 C   s6   | |}| jjtjkrt|dr|  d S d S d S )Ntie_weights)tor   r*  rg   ZTPUr"  r{  )rf  r   r   r   r   r   r2    s   
zTrainer._move_model_to_devicec                 C   s   | j d u r>| j}t| jrt| jdr| j }n| jjj}t|j}t	|j
 | _ |  j t	tddg| j 7  _ d S d S )Nr   label	label_ids)rL  r   r   r"  r   rn  r   r   r5  r   r   r  r  rY  )rf  Zmodel_to_inspectr   r   r   r    _set_signature_columns_if_needed  s   


$z(Trainer._set_signature_columns_if_neededdatasetdescriptionc                    s
  | j js S |   | j}tt jt| }t|dkrK|d u r$dnd| d}t	d| d| j
jj dd| d	d| d
| j
jj d  fdd|D }t|dkretdd| dttjtdk r j jd | jd d  S  |S )Nr    zin the z setzThe following columns z) don't have a corresponding argument in `z!.forward` and have been ignored: , z. If z are not expected by `z/.forward`,  you can safely ignore this message.c                    s   g | ]	}| j v r|qS r   )column_namesr   r  r   r   r         z2Trainer._remove_unused_columns.<locals>.<listcomp>zNo columns in the dataset match the model's forward method signature: ({', '.join(signature_columns)}). The following columns have been ignored: [zp]. Please check the dataset and model. You may need to set `remove_unused_columns=False` in `TrainingArguments`.1.4.0r   format_kwargs)r   columnsr  )r   remove_unused_columnsr  rL  r   r  r  r!  r  r  r   r  r  joinr	  r   r   datasetsr   Z
set_formatformatZremove_columns)rf  r  r  signature_columnsZignored_columnsZdset_descriptionr  r   r  r   _remove_unused_columns  s>   
zTrainer._remove_unused_columnsc                 C   s6   | j js|S |   | j}t||t|| jjjd}|S )z=Wrap the data collator in a callable removing unused columns.)r   r  r  r  
model_name)	r   r  r  rL  rV   r  r   r  r  )rf  r   r  r  Zremove_columns_collatorr   r   r   "_get_collator_with_removed_columns  s   z*Trainer._get_collator_with_removed_columnsc                 C   s   |d u r| j }|d u st|sd S | jjrKt r.t|tjr.| jj|j	v r+|| jj nd }nd }| j
d ur;| j
jd nd }t| jj| jj |||dS t|S )Nr   r  lengthsmodel_input_name)r   r`   r   rK  r{   r   r  r   length_column_namer  r   model_input_namesr@   rZ  gradient_accumulation_stepsr   )rf  r   r  r  r   r   r   _get_train_sampler  s(   zTrainer._get_train_samplerF
batch_size
sampler_fnis_trainingdataloader_keyc           
      C   s   | j }t rt|tjr| j||d}n| j| j |d}||| jj| jj	| jj
d}t|tjjjsW|dur<|||d< | jj|d< | jj|d< |rWtt| jj| jjd|d< t|fi |}	|durw| jj
rwt| d	rr|	| j|< n||	i| _| j|	S )
zACreate a [`~torch.utils.data.DataLoader`] from the given dataset.)r  )r  Z
collate_fnnum_workersZ
pin_memoryZpersistent_workersNsamplerZ	drop_lastZprefetch_factor)r  rankZworker_init_fn_eval_dataloaders)r   r{   r   r  r   r  r  r   Zdataloader_num_workersdataloader_pin_memorydataloader_persistent_workersr   rI  rJ  r   Zdataloader_drop_lastZdataloader_prefetch_factorr   rc   process_indexr   r"  r  r4  prepare)
rf  r  r  r  r  r  r  r   dataloader_params
dataloaderr   r   r   _get_dataloader  s2   


zTrainer._get_dataloaderc                 C   s,   | j du r	td| j| j d| j| jddS )a@  
        Returns the training [`~torch.utils.data.DataLoader`].

        Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed
        training if necessary) otherwise.

        Subclass and override this method if you want to inject some custom behavior.
        Nz+Trainer: training requires a train_dataset.ZTrainingT)r  r  r  r  r  )r   r	  r  r[  r  rm  r   r   r   get_train_dataloader  s   
	zTrainer.get_train_dataloaderc                 C   s   |d u st |s
d S | jjr1t rt|t t dS t r-t|t	
 t	 | jjdS t|S | jjrgt rNt|tjrN| jj|jv rK|| jj nd }nd }| jd ur[| jjd nd }t| jj|||dS | jjdkrqt|S d S )N)num_replicasr  )r  r  r  r   r  r   )r`   r   use_legacy_prediction_loopr   rA   ra  
world_sizeZglobal_ordinalr   rP  Zdp_sizeZdp_rankZper_device_eval_batch_sizer   rK  r{   r   r  r   r  r  r   r  r@   eval_batch_size)rf  r   r  r  r   r   r   _get_eval_sampler/  sB   zTrainer._get_eval_samplerc                 C   s   |du r| j du rtdt|tr|nd}t| dr-|| jv r-| jjr-| j	| j| S t|tr7| j | n|dur=|n| j }| j
|d| jj| j|dS )a   
        Returns the evaluation [`~torch.utils.data.DataLoader`].

        Subclass and override this method if you want to inject some custom behavior.

        Args:
            eval_dataset (`str` or `torch.utils.data.Dataset`, *optional*):
                If a `str`, will use `self.eval_dataset[eval_dataset]` as the evaluation dataset. If a `Dataset`, will override `self.eval_dataset` and must implement `__len__`. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed.
        Nz-Trainer: evaluation requires an eval_dataset.evalr  
Evaluation)r  r  r  r  r  )r   r	  r   strr"  r  r   r  r4  r  r  r  r  )rf  r   r  r   r   r   get_eval_dataloader\  s,   

zTrainer.get_eval_dataloadertest_datasetc                 C   s   | j |d| jj| jdS )a  
        Returns the test [`~torch.utils.data.DataLoader`].

        Subclass and override this method if you want to inject some custom behavior.

        Args:
            test_dataset (`torch.utils.data.Dataset`, *optional*):
                The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the
                `model.forward()` method are automatically removed. It must implement `__len__`.
        test)r  r  r  r  )r  r   r  r  )rf  r  r   r   r   get_test_dataloader  s   zTrainer.get_test_dataloadernum_training_stepsc                 C   s8   |    trtjjjr| jj}n| j}| j||d dS )aZ  
        Setup the optimizer and the learning rate scheduler.

        We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
        Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or
        `create_scheduler`) in a subclass.
        )r  r9  N)create_optimizerrO  rP  rQ  rR  r   r9  create_schedulerrf  r  r9  r   r   r   create_optimizer_and_scheduler  s
   
z&Trainer.create_optimizer_and_schedulerc                 C   s   t |tg d}|S )a0  
        Get all parameter names that weight decay will be applied to.

        This function filters out parameters in two ways:
        1. By layer type (instances of layers specified in ALL_LAYERNORM_LAYERS)
        2. By parameter name patterns (containing 'bias', 'layernorm', or 'rmsnorm')
        )ZbiasZ	layernormZrmsnorm)rG   r0   )rf  r   decay_parametersr   r   r   get_decay_parameter_names  s   z!Trainer.get_decay_parameter_namesc           	         s  t  r| jn| j}| jdu r| |  fdd| D | jjd fdd| D ddg}| jdur<| j\}}n	| 	| j|\}}d|v rN|
d}d|v rW|
d}d	|v r`|
d	}||fi || _d
t|v r|dddkrddl}|jj }d}| D ]7}t|tjr|tdd | D  7 }td| d|d  d ||dddi td| d qtd|d  d t  rt| j| _| jS )a   
        Setup the optimizer.

        We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
        Trainer's init through `optimizers`, or subclass and override this method in a subclass.
        Nc                    s"   g | ]\}}| v r|j r|qS r   requires_gradr   npr  r   r   r         z,Trainer.create_optimizer.<locals>.<listcomp>)r   weight_decayc                    s"   g | ]\}}| vr|j r|qS r   r  r  r  r   r   r     r          r   r   optimizer_dictbitsandbytes
optim_bits   r   c                 S   s   i | ]	}|  | qS r   )Zdata_ptrnumelr   r  r   r   r   
<dictcomp>  r  z,Trainer.create_optimizer.<locals>.<dictcomp>zskipped : i   zM paramsweight    zbitsandbytes: will optimize z in fp32z	skipped: )r   r3  r   r9  r  named_parametersr   r  r   get_optimizer_cls_and_kwargspopr  r^  r  optimZGlobalOptimManagerZget_instancemodulesr   r   Z	Embeddingsumr   r   r  r  Zregister_module_overridedebugrP  ZDistributedOptimizer)	rf  Z	opt_modelZoptimizer_grouped_parametersoptimizer_clsoptimizer_kwargsr  managerZskippedmoduler   r  r   r    sN   







zTrainer.create_optimizerc                 C   s   t dd | j D S )z9
        Get the number of trainable parameters.
        c                 s   s    | ]
}|j r| V  qd S r   )r  r  r  r   r   r   r         z7Trainer.get_num_trainable_parameters.<locals>.<genexpr>)r  r   r   rm  r   r   r   get_num_trainable_parameters  s   z$Trainer.get_num_trainable_parametersc                 C   s$   | j du r	tddd | j jD S )zR
        Returns the learning rate of each parameter from self.optimizer.
        NPTrainer optimizer is None, please make sure you have setup the optimizer before.c                 S      g | ]}|d  qS )lrr   r   groupr   r   r   r         z.Trainer.get_learning_rates.<locals>.<listcomp>r9  r	  r;  rm  r   r   r   get_learning_rates  s   
zTrainer.get_learning_ratesri  c                 C   sN   | j du r	td|dur| j jD ]}||d v r|  S qdd | j jD S )a  
        Returns optimizer group for a parameter if given, else returns all optimizer groups for params.

        Args:
            param (`str` or `torch.nn.parameter.Parameter`, *optional*):
                The parameter for which optimizer group needs to be returned.
        Nr  r   c                 S   r  )r   r   r  r   r   r   r     r  z/Trainer.get_optimizer_group.<locals>.<listcomp>r  )rf  ri  r  r   r   r   get_optimizer_group  s   
zTrainer.get_optimizer_groupc           #         s	  i  j r j dddD ]}|d\}}||< qd ji j jf jd}	d}dtd	ttt	f d
ttt	f dt
dtt	t	f f
 fdd} jtjkrbt}ddd |fS  jtjtjfv rddlm} |}|  jtjkrddi |fS  jtjkrzddlm} |}| W |fS  ty   tdw  jtjkrzddlm}	 |	}| W |fS  ty   tdw  jtjkrzddlm}
 |
}| W |fS  ty   tdw  jtjtj tj!tj"tj#tj$tj%tj&tj'tj(tj)tj*tj+tj,tj-fv rzddl.m}m/}m0} d}d}d}|}d jv r4d}d jv r<d}d jv rE|}nd  jv rV|}d! j jfi}ntd" jv ra|}}nid# jv rt1 r~t23t4j52d$t23d%k r~td&dd'l.m6} |}t78d( jt78d) jt78d*d+ft78d,d-t78d. jd/}d0v rt9d0 |d0< d1v rt9d1 |d1< d2|i}d" jvr||d3< | | W n ty   td4w t1 rt23t4j52d$t23d5k rt:;d6 |fS  jtj<krWz7dd7l=m>} |}| t?8d8d9t@tA8d:d;t@tA8d<d;t@tA8d=d>d? W |fS  tyV   td@w  jtjBkrftAjjB}|fS  jtjCkrutAjjD}|fS  jtjEkrtAjj0}|fS  jtjFtjGtjHtjItjJtjKfv rtL stdAddBlMmN}mO}mP} tjF|tjG|tjH|tjI|tjJ|tjK|i}t9QdCdDt9QdEdFt7QdGdHQdIdJdK}| j||\} jtjHkrddd |fS  jtjRtjSfv rNtT stdLddMlUmV} tjR|tjS|i}t9QdCdDQdNdOQdPdQt9QdEdFt7QdGdRQdIdJdS}|| | j||\}|fS  jtjWtjXfv rtY s`tdTtZdUsitdVdu rrtdWddXl[m\}m]} dY jv r|}n|}dZi |fS  jtj^krt_ std[dd\l`ma} |}t78d]d^t78d_d`t78dadbt78dcdbt78dddRde |fS  jtjbtjcfv rItd rt23t4j52dft23dgk rtdht23t4j52dit23djkrtdkt23t4j52dft23dlkr ddmlemf}mg} nddmlhmf}mg}  jtjbkr2|}n jtjckr<|}ntdn| |fS  jtjitjjtjkfv rtl s]tdotZdUsftdpddqlmmn}mo}  i }d}! jtjikrtldrstdsddtlmmp}" |"}|}d}!n jtjjkr|}|}n jtjkkr| }ntdu jq|dv< |!r jr|dw< |t78dxd`t78dydzd{ | |fS td| j )~z
        Returns the optimizer class and optimizer parameters based on the training arguments.

        Args:
            args (`transformers.training_args.TrainingArguments`):
                The training arguments for the training session.

         r  ,=r  )betasepsToptimizer_nameoptimizer_mappingoptim_kwargsis_layerwise_supportedrk  c                    sP  |   d}|rjtjkr|rtd|  d||  }jdu r*td|  dtjt	t
fs:tdj du rFtd|  d	tjt
oTjd
ddk}g  D ]0\}}tj|dd\}	}
t|tjs|	r~|
s~t| d|  d q[|	s|sq[|d  q[tdkrtd|  dj dfdd D }fdd D }| d|id|i|g}|rjdkrtd|  di  |D ]}|d|gigfi  |< q|D ]}|d|gi|gfi  |< q fdd} D ]}|jr|| qt}d i d|i |fS ) a  
            Helper function to set up low-rank optimizers like GaLore and Apollo.

            Args:
                optimizer_name (str): Name of the optimizer.
                optimizer_mapping (dict): Mapping of optimizer names to their classes.
                optim_kwargs (dict): Keyword arguments for the optimizer.
                is_layerwise_supported (bool): Whether layerwise optimization is supported.

            Returns:
                Tuple[Any, Any]: Optimizer class and updated optimizer kwargs.
            Z	layerwisezLayer-wise z" does not support DDP at this timeNz1You need to define `optim_target_modules` to use z optimizerszX`optim_target_modules` must be a list of strings, a regex string, or 'all-linear'. Got: z'You need to pass a model to initialize z optimizer._-z
all-linearT)Zreturn_is_regexz matched but ignored. z only supports linear layers.z.weightr   zNo target modules found for z ().c                    s   g | ]
\}}| v r|qS r   r   r  target_params_namesr   r   r   i      zZTrainer.get_optimizer_cls_and_kwargs.<locals>.setup_low_rank_optimizer.<locals>.<listcomp>c                    s   g | ]
\}}| vr|qS r   r   r  r  r   r   r   j  r  r   r   z
Layerwise z( does not support gradient accumulation!c                    s*   | j d ur |     |    d S d S r   )Zgradstep	zero_grad)ri  )r  r   r   optimizer_hook|  s   
z^Trainer.get_optimizer_cls_and_kwargs.<locals>.setup_low_rank_optimizer.<locals>.optimizer_hookr  )lowerendswithr*  rg   r+  NotImplementedErrorZoptim_target_modulesr	  r   r   r  replaceZnamed_modulesrZ   r   ZLinearr  r#  appendr!  r  updater  r   r  Z"register_post_accumulate_grad_hookr?   )r  r  r  r  Zis_layerwiser  Z
all_linearmodule_namer  Ztarget_module_existsZis_regexZtarget_paramsZnon_target_paramsr;  ri  r  r   r   
optim_argsr  )r  r  r   setup_low_rank_optimizer,  sl   





"
zFTrainer.get_optimizer_cls_and_kwargs.<locals>.setup_low_rank_optimizerF)Zscale_parameterZrelative_stepr   )AdamWZfusedz7Trainer failed to import syncfree AdamW from torch_xla.)NpuFusedAdamWz3Trainer failed to import FusedAdamW from torch_npu.)	FusedAdamzFTrainer tried to instantiate apex FusedAdam but apex is not installed!)r  LionRMSpropr  NZpaged8bitr  ZadamZlionr  ZrmspropZademamixr  z0.44.0z{The AdEMAMix optimizer is not supported by your current version of `bitsandbytes`. Please install `bitsandbytes` >= 0.44.0.)AdEMAMixZbeta1Zbeta2Zbeta3gH.?alphag      @r  )r  r  r  Zt_alphaZt_beta3r  is_pagedzOTrainer tried to instantiate bnb optimizer but `bitsandbytes` is not installed!z0.41.1zYou are using 8-bit optimizers with a version of `bitsandbytes` < 0.41.1. It is recommended to update your version as a major bug has been fixed in 8-bit optimizers.)AnyPrecisionAdamWuse_kahan_summationFalsemomentum_dtypefloat32variance_dtypecompensation_buffer_dtyperS  )r  r  r  r  z4Please install https://github.com/pytorch/torchdistxzYou need to install `galore_torch` in order to use GaLore optimizers install it with `pip install git+https://github.com/jiaweizzhao/GaLore`)GaLoreAdafactorGaLoreAdamWGaLoreAdamW8bitr     update_proj_gap   scaleg      ?	proj_typeZstd)r  r  r  r  zYou need to install `apollo_torch` in order to use APOLLO optimizers install it with `pip install git+https://github.com/zhuhanqing/APOLLO`)APOLLOAdamWprojrandom
scale_typeZchannel      ?)r  r  r  r  r  r  ziYou need to install `lomo_optim` in order to use LOMO optimizers install it with `pip install lomo-optim`0.30.0zGYou need to have `accelerate>=0.30.0` to be able to use LOMO optimizerszMYou need to pass a `model` in order to correctly initialize a LOMO optimizer.)AdaLomoLomoadar   z5Please install grokadamw with `pip install grokadamw`)	GrokAdamW
alpha_initg\(\?lambg       @gammag?grokking_signal_decay_rategradient_clipping)r#  r$  r%  r&  r'  Ztorchaoz0.4.0zYou need to have `torchao>=0.4.0` in order to use torch 4-bit optimizers.Install it with `pip install torchao` or follow the instructions here: https://github.com/pytorch/aor   z2.4zYou need to have `torch>2.4` in order to use torch 4-bit optimizers. Install it with `pip install --upgrade torch` it is available on pipy. Otherwise, you need to install torch nightly.z0.11.0)	AdamW4bit	AdamW8bitzInvalid optimizerzwYou need to install `schedulefree` in order to use schedulefree optimizers. Install it with `pip install schedulefree.`zOYou need to have `accelerate>=0.30.0` to be able to use schedulefree optimizers)AdamWScheduleFreeSGDScheduleFreer  zYou need to install `schedulefree>=1.4.0` in order to use RAdamScheduleFree optimizer. Install it with `pip install schedulefree.`)RAdamScheduleFreezInvalid schedulefree optimizerr  warmup_stepsweight_lr_powerrr  )r.  r/  z2Trainer cannot instantiate unsupported optimizer: T)sr  r  splitlearning_rateZ
adam_beta1Z
adam_beta2Zadam_epsilonr  dictr   booltupler  rf   Z	ADAFACTORr-   r  ZADAMW_TORCHZADAMW_TORCH_FUSEDZtorch.optimr  ZADAMW_TORCH_XLAZtorch_xla.amp.syncfreer$  r	  ZADAMW_TORCH_NPU_FUSEDZtorch_npu.optimr  ZADAMW_APEX_FUSEDZapex.optimizersr  Z	ADAMW_BNBZ
ADAMW_8BITZPAGED_ADAMWZPAGED_ADAMW_8BITZADEMAMIXZADEMAMIX_8BITZPAGED_ADEMAMIXZPAGED_ADEMAMIX_8BITZLIONZ	LION_8BITZ
PAGED_LIONZPAGED_LION_8BITZRMSPROP_BNBZRMSPROP_8BITZRMSPROP_32BITZbitsandbytes.optimr  r  rz   r   r   r   r   r  floatr^  intr  r#  ZADAMW_ANYPRECISIONZtorchdistx.optimizersr
  r   r  r   ZSGDZADAGRADZAdagradZRMSPROPZGALORE_ADAMWZGALORE_ADAMW_8BITZGALORE_ADAFACTORZGALORE_ADAMW_LAYERWISEZGALORE_ADAMW_8BIT_LAYERWISEZGALORE_ADAFACTOR_LAYERWISEr|   Zgalore_torchr  r  r  r  ZAPOLLO_ADAMWZAPOLLO_ADAMW_LAYERWISEry   Zapollo_torchr  LOMOADALOMOr   rw   Z
lomo_optimr  r   Z	GROKADAMWr}   Z	grokadamwr"  ZADAMW_TORCH_4BITZADAMW_TORCH_8BITr   Ztorchao.optimr(  r)  Ztorchao.prototype.low_bit_optimZSCHEDULE_FREE_RADAMZSCHEDULE_FREE_ADAMWZSCHEDULE_FREE_SGDr   Zschedulefreer*  r+  r,  r  r-  )#r   r   mappingkeyvalueZadam_kwargsr   r  r  r  r  r  r  r	  r  Zadditional_optim_kwargsr  Z
bnb_kwargsr
  r  r  r  r  Zgalore_optim_kwargsr  Zapollo_optim_kwargsr  r   r"  r(  r)  r*  r+  Zrequire_warmupr,  r   r  r   r    s  


	


_  >  
  7    1    )    !  





 P 
 D  7  5  3 

  



s

]U
1




z$Trainer.get_optimizer_cls_and_kwargsr9  c                 C   sH   | j du r!t| jj|du r| jn|| j||| jjd| _ d| _| j S )z
        Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
        passed as an argument.

        Args:
            num_training_steps (int): The number of training steps to do.
        N)r9  Znum_warmup_stepsr  Zscheduler_specific_kwargsT)r:  r.   r   Zlr_scheduler_typer9  Zget_warmup_stepsZlr_scheduler_kwargsr\  r  r   r   r   r    s   

zTrainer.create_schedulerr  c              
   C   sT   z|j }t|trt|j j W S t|j W S  tttfy)   t|| jj  Y S w )z
        Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When
        dataloader.dataset does not exist or has no length, estimates as best it can
        )	r  r   r=   r!  	NameErrorAttributeError	TypeErrorr   per_device_train_batch_size)rf  r  r  r   r   r   num_examples  s   
zTrainer.num_examplestrain_dlrG  c                 C   s^   d}z| D ]}|d   }|dur||   W S ||7 }qW |S  ty.   td Y |S w )zq
        Helper to get number of tokens in a [`~torch.utils.data.DataLoader`] by enumerating dataloader.
        r   	input_idsNz%Cannot get num_tokens from dataloader)r  KeyErrorr  r#  )rB  rG  Ztrain_tokensbatchtokensr   r   r   
num_tokens  s   
zTrainer.num_tokenstrialzoptuna.Trialc                 C   s  || _ | jdu s|du rdS | jtjkr| |}n(| jtjkr)|}|dd n| jtjkr:dd |j	 D }n| jtj
krB|}|	 D ],\}}t| j|sZtd| d qFt| j|d}|durkt||}t| j|| qF| jtjkrtd|j  | jtjkrtd|j  | jtj
krtd	|  | jr| jjdu rtd
| j  ddlm} ddlm} || jj| j_| jj| j || jjd| j_t  !  | "  dS )zHP search setup codeNwandbc                 S   s(   i | ]\}}|t |trt|n|qS r   )r   r  r7  r   r   vr   r   r   r    s   ( z,Trainer._hp_search_setup.<locals>.<dictcomp>zTrying to set zY in the hyperparameter search but there is no corresponding field in `TrainingArguments`.zTrial: zSigOpt Assignments: zW&B Sweep parameters: z7For sweeps with deepspeed, `args.deepspeed` must be setr   )DeepSpeedPluginHfTrainerDeepSpeedConfig)hf_ds_config)#Z_trialrX  rS   OPTUNAhp_spaceRAYr  SIGOPTZassignmentsitemsWANDBr"  r   r  r#  r  r   setattrr  r   r)  r  r	  r4  free_memoryaccelerate.utilsrL  #transformers.integrations.deepspeedrN  hf_deepspeed_configtrainer_config_processdeepspeed_pluginr   Z_reset_stater  )rf  rH  r   r;  r<  old_attrrL  rN  r   r   r   _hp_search_setup  sN   


zTrainer._hp_search_setupr  metricsc                 C   s  | j d u s	|d u rd S | }| || _| j tjkrIdd l}t|drC|j	 sE|
| j| | rG| j| j| j| j | d S d S d S | j tjkrdd l}t )}d }| jjrk| j|d |jj|}| j|d< |jj
||d W d    d S 1 sw   Y  d S d S )Nr   study)checkpoint_dir	objective
checkpoint)rX  copycompute_objectiverb  rS   rP  optunar"  r`  Z_is_multi_objectivereportZshould_pruner=  on_train_endr   rQ  rV  ZTrialPrunedrR  	ray.traintempfileTemporaryDirectoryrC  _tune_save_checkpointtrainZ
Checkpointfrom_directory)rf  rH  r  r_  rg  rayZtemp_checkpoint_dirrd  r   r   r   _report_to_hp_search4  s0   

"zTrainer._report_to_hp_searchra  c                 C   s   t j|t d| jj }| j|dd | jjrK| j	 | jj
d< | jt j|t t| j t j|t t| j t j|t d S d S )Nr  T_internal_callr9   )rD  pathr  rO   rQ  global_step
save_modelr   rC  rV  r  save_to_jsonTRAINER_STATE_NAMEr   saver9  
state_dictOPTIMIZER_NAMEr:  SCHEDULER_NAME)rf  ra  r   r   r   r   rm  L  s    zTrainer._tune_save_checkpointc                 C   sL   t | j}|dkr|  }n|dkr| |}ntd|d u r$td|S )Nr   r   z'model_init should have 0 or 1 argument.z"model_init should not return None.)rb   r   r  )rf  rH  Zmodel_init_argcountr   r   r   r   r  V  s   

zTrainer.call_model_initc                    s  |s|d u rt d |S tt| |   zt|}|  |jdd }|r.||_	t
dd}| jj|d; t ' t trOtjj| dd}ntjj| fdd D dd}W d    n1 siw   Y  W d    n1 sxw   Y  tj|}t  |di   |di   W d    n1 sw   Y  |}d| _W |S  tttttfy } zt d	| d
 W Y d }~|S d }~ww |S )NzAfailed to use PyTorch jit mode due to current dataloader is none.Z_original_forwardF)cache_enabled)autocast_handler)Zexample_kwarg_inputsstrictc                    s   i | ]}| | qS r   r   )r   r;  Zexample_batchr   r   r  y  s    z0Trainer.torch_jit_model_eval.<locals>.<dictcomp>z'failed to use PyTorch jit mode due to: .r   )r  r#  nextiter_prepare_inputsre  r  __dict__r  r5  r   r4  autocastr   no_gradr   r3  ZjittracefreezerN  r  r?  r	  r=  
IndexError)rf  r   r  trainingZ	jit_modelZoriginal_forwardr~  er   r  r   torch_jit_model_evald  sN   




 	
zTrainer.torch_jit_model_evalc                 C   s   t  stddd l}|s+|  | js| jjrtjn|}|j	||dd| j d}|S |j
s2|  |j	||| jddd\}| _|S )NzUsing IPEX but IPEX is not installed or IPEX's version does not match current PyTorch, please refer to https://github.com/intel/intel-extension-for-pytorch.r   ZO1F)r   levelZconv_bn_foldinginplaceT)r   r9  r  r  )r   r$  Zintel_extension_for_pytorchr  r  r   r.  r   rS  optimizer  rn  r9  )rf  r   r  r   Zipexr   r   r   ipex_optimize_model  s    zTrainer.ipex_optimize_modelc              	   C   s   dddd}d}d}|  D ],\}}t||d }t||d }	|d ur:|	d ur:||	kr:|d| d| d	|	 d
7 }d}q|j}
|jtd|j }|
|krX|d|
 d	| d
7 }d}|rat| d S d S )Nlogging_steps
eval_steps
save_steps)r  r  r  FztWarning: The following arguments do not match the ones in the `trainer_state.json` within the checkpoint directory: z
	r  z (from args) != z (from trainer_state.json)Tr   z
	per_device_train_batch_size: )rT  r  r@  rZ  maxn_gpur  warning_once)rf  training_argsZtrainer_stateZattributes_mapZhas_warningZwarning_strZarg_attrZ
state_attr	arg_valueZstate_valueZtrain_bs_argsZtrain_bs_stater   r   r   #compare_trainer_and_checkpoint_args  s*   z+Trainer.compare_trainer_and_checkpoint_argsc                    sV  j jrjr
tjntj}j|||d}t r,tj	t
jjr#j	S t
j|j jdS jj|dd|ur8|S jrK|rKtj|jj jd\}_j jdkr\t|dds\t|}j jrut }|||}tt | d_|sy|S jrCzd	d
lm   d	dlm! d	dl"m#}m$} j%rd	dl&m' W n t(y   t(dw d }d }	t|dd }
j j)*d|
}j j)d d	krt+j,|j j)d d}n%|d urt- }|D ]}t.||}|d u rt/d|0| qt+j,||d}j j1}j j)d r|j2j3rt45d d|j2_3 fdd}	j%r)dd }||||	d _}n |f||	d| _}di fdd}|t6_7|S t8 rWtj9j:|t;t<=dgd }|S j j>t?j@krtA re|S i }j jBd uruj jB|d!< nt|tCr|jD |d!< nd"|d!< j jEd urj jE|d#< j jFd urj jF|d$< tGd%i |j_H|S )&N)r   )Zbackward_passes_per_stepFZkeep_torch_compile)	opt_levelr   Zis_loaded_in_8bit   r   )XlaFullyShardedDataParallel)checkpoint_module)size_based_auto_wrap_policytransformer_auto_wrap_policy)SpmdFullyShardedDataParallelzJMissing XLA FSDP related module; please make sure to use torch-xla >= 2.0.Z_no_split_modulesZtransformer_layer_cls_to_wrapmin_num_params)r  z@Could not find the transformer layer class to wrap in the model.)Ztransformer_layer_clsZxla_fsdp_grad_ckptzX`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.c                    s(   j s n}|| g|R i |S r   )r_  )mr   kwargsZ
target_clsFSDPZFSDPv2r  rf  r   r   auto_wrapper_callable  s   z2Trainer._wrap_model.<locals>.auto_wrapper_callablec                 S   sh   ddl m} d }t| tjr| }nt| tr| d }nt| |r#| j}|d u r+tdt	||d d S )Nr   )CausalLMOutputWithPastr   zASomething went wrong, the output of the model shouldn't be `None`)r  NN)
Zmodeling_outputsr  r   r   Tensorr5  logitsr	  rb  Zmark_sharding)outputZmeshr  Zreal_outputr   r   r   shard_output  s   


z)Trainer._wrap_model.<locals>.shard_output)r  auto_wrap_policyr  )r  r  c                 S   s    | j di |}|rt  |S )Nr   )r  xm	mark_step)r9  barrierZoptimizer_argslossr   r   r   patched_optimizer_step4  s   z3Trainer._wrap_model.<locals>.patched_optimizer_stepZSMDATAPARALLEL_LOCAL_RANK)Z
device_idsZfind_unused_parametersTZbucket_cap_mbZbroadcast_buffersr   )Ir   Zuse_ipexrN  r   rS  r  r  r   r   r3  rP  r   ZDistributedModelr  r4  r*   rM  r   Z
initializer9  Zfp16_opt_levelr  r  r   ZDataParallelZjit_mode_evaltimer  roundjit_compilation_timer(  torch_xla.distributed.fsdpr  r  Ztorch_xla.distributed.fsdp.wrapr  r  r_  Z7torch_xla.experimental.spmd_fully_sharded_data_parallelr  r$  r'  r^  	functoolsr   r  rF   	ExceptionaddZxla_fsdp_configconfigZ	use_cacher  r  r  Zoptimizer_stepr   ZparallelZDistributedDataParallelr7  rD  getenvr*  rg   r+  r   Zddp_find_unused_parametersr(   Zis_gradient_checkpointingZddp_bucket_cap_mbZddp_broadcast_buffersr   Zddp_handler)rf  r   r  r  r   
start_timer  r  r  r  Z%default_transformer_cls_names_to_wrapZ"fsdp_transformer_layer_cls_to_wrapZtransformer_cls_to_wrapZlayer_classZtransformer_clsZfsdp_kwargsr  r  r  r   r  r   _wrap_model  s   



	zTrainer._wrap_modelresume_from_checkpointignore_keys_for_evalc           	      K   s  |du rd}| j   | j}d| _| jdur| | j| _|js#|jr6|j	s6| j
s6| jdu r6| | j|j d|v rE|d}tdt t|dkrZtddt|  d	| | | jj| _d}| jdur| jjrut| jjnt| jj | || _d}d
\| _| _ t!|t"r|rt#|j$}|du rt%d|j$ d|durt& s| j's| j(s| )| t*+t,j-|t.}|jdur|j| _|r| j/r| | j|j | j| _0t1| j2| j|j3}|j4rzt56  |||||dW t57  S t57  w |||||dS )a  
        Main training entry point.

        Args:
            resume_from_checkpoint (`str` or `bool`, *optional*):
                If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a
                `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance
                of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here.
            trial (`optuna.Trial` or `Dict[str, Any]`, *optional*):
                The trial run or the hyperparameter dictionary for hyperparameter search.
            ignore_keys_for_eval (`List[str]`, *optional*)
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions for evaluation during the training.
            kwargs (`Dict[str, Any]`, *optional*):
                Additional keyword arguments used to hide deprecated arguments
        FNTZ
model_pathzi`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` instead.r   z*train() got unexpected keyword arguments: r  r  r   z/No valid checkpoint found in output directory ())r   r  rH  r  )8r  r  r   r  r8  rr  r   r-  r.  r/  r  r   r2  r   r  r  r  r  r!  r?  r  r   r  r^  rZ  r[  r  r]   r  rd   r  r9  r:  r   r4  r_   r   r	  r   r)  r0  _load_from_checkpointr:   load_from_jsonrD  rt  rx  r,  r3  r^   _inner_training_loopauto_find_batch_sizerA  hf_hub_utilsZdisable_progress_barsZenable_progress_bars)	rf  r  rH  r  r  r   Zmodel_reloadedrQ  Zinner_training_loopr   r   r   rn  V  s   









zTrainer.trainc           ;      C   s  | j   || _| jjrE| jj| jkr@ddlm} || j	\| _	| j
| _	| jr@| jj}| jtd| jj | j_| d || j_| j| j_td| j  |  }| jrYt|}| j|j |j }	| |||	\}
}}}}}}d }| jjr| ||r}d n|}|d ur|r||j9 }n||j9 }tj| jjv r| jjdkrtdt| j
}t p| j p| j!}| j!ot"| j jj#dddk}|rd	}| j$rd | _%d	| _$| jrt&| |d
\| _'| _%|s| j(|d
 t)dd | j*j+| j,g D d| _|d u| j_-| j| j_| j.|| |j/r| j
j0|j1d | 2| j	}|| j
u rdnd	}|r.| j!r.t3| j
dd| _
|rM|rG| 4  | j j5dkrG| j 6| j
| _
| j(|d
 |r| j
7  t8| j%dru| j9rh| j 6| j
}n1| j 6| j
| j'\}| _'n$| j 6| j
| j'| j%\}| _'| _%n| jj:t;j<t;j=fv r| j 6| j'| _'| j!r| | _
| _	|| j
ur|| _	| jr| j	| _>|d ur| jrt?| j	|t@| j
 d nt s| j!r| A|| j	 | B| | C| tDd tDd|d tDd|
d tDd| jjd | jj| jkrtDd| jd tDd|	d tDd|j  tDd|d tDdtE|ddd d| j_FtGG }d}d}d }|d urtHjIJtHjIK|tLrt)MtHjIK|tL| _| N| j| j | O  tP| jjQ| }|jRs| jjQ| }||j9 }nd}tDd tDd|  tDd | jjQ  |jRstDd!| d"| d# d$D ]}tS| j*|t"| | q|| j*_T| jU| ||
| tVjWd%|jXd&}d%| _Y| jjQ| _Z|[  d }d }| j*\|| j| j,| _,|j]r| j^||dd' t_||
D ]}|} t8| d(r| `| |jadkr d | _b|d ur)tc| n|jd|j }!| j*e|| j| j,| _,||krN|d urN|dkrN| f| d	}"d}#|dkrbtg| |} |}#d}d}"d)}$th| }%|!|j }&|&dkru|j}&d)}'|!|j tP|&|jk  }(t_|(D ]4})|'d7 }'|'|(d kr|jn|&}*| i|%|*|jX\}+},tj|+D ]\}-}.|$d7 }$|$d |j dkp|$d |!k}/| j jkl|/ | jjmr t"| j
d*d+}0|0|.vrtnd, n!|.|0 o }1tVjW|1| jjXtVjpd-}1| j jq| j r|1s t 7  _q|"r
| f| d	}"|dkr)|d8 }|d ur|ud |dkr'| f| q|d ur4|v  d }|$|j dkrG| j*w|| j| j,| _,|-tc|+d kra| j jxtyjzkrat{j|| j j}|d.nt~j}2|2  | ||.|,}3W d    n	1 szw   Y  |jrt stV|3stV|3r||d| jjQ | jZ   }n|jX|3jXkrtd/|jX d0|3jX ||3 }|  jt| |.7  _|/r| j jkld |jd ur"|jdkr"t r|jr| j'|j}4n| j9rtjt| j'|j}4n
| j | |j}4t r | j jxtyjzkr | }t8|d1r|t }n|4}| j*|| j| j,| _,| j'  | j*|| j| j,| _,|  }| j jsUt| j%tVj:j%jsU| j%  |[  | j jQd7  _Q||$d |# |!  | j_F| j*|| j| j,| _,| j||||||||d2 n| j*|| j| j,| _,| j,js| j,jrt rt   nq| j,js| j,jrt rt   nq|$dk rtnd3| jjQ d4| d5 d| j,_| j*|| j| j,| _,| j||||||||d2 tj| jjv rt r tt  ntnd6 | j,jr nq|jart8| d7rt| d7 tDd8 |jrO| jjd urOt r7td9 n|jtjkrCt  nt rKt  |   |  jY|t 7  _Yt| jjQd:}5| jY|5 }6td;||| jjd|d<}7|   | jj|7d=< |6|7d>< d	| _| j|7 | |7 | |}8| jd	|8d?}9| jjr| jjd ur| jjdkr|9D ]}:tHjI|:| jjstDd@|: dA tj|:ddB q| j*|| j| j,| _,|   | jd ur| | j
 t| jjQ|6|7S )CNr   )release_memoryr   Tz)Currently training with a batch size of: zCurrently --debug underflow_overflow is not supported under DP. Please use DDP (torchrun or torch.distributed.launch (deprecated)).Zfsdp_version   F)r  c                 S   r   r   r   r   r   r   r   r   	  r   z0Trainer._inner_training_loop.<locals>.<listcomp>)r  )gradient_checkpointing_kwargs)	recursivefp8r  Zload_module_strictz***** Running training *****  Num examples = r  z  Num Epochs = z(  Instantaneous batch size per device = zA  Training with DataParallel so batch size has been adjusted to: zE  Total train batch size (w. parallel, distributed & accumulation) = z   Gradient Accumulation steps = z  Total optimization steps = z#  Number of trainable parameters = )Ztrainable_onlyzE  Continuing training from checkpoint, will skip to saved global_stepz!  Continuing training from epoch z'  Continuing training from global step z  Will skip the first z epochs then the first z batches in the first epoch.)r   r9  r:  r  r   )skip_scheduler	set_epochmain_input_namerC  zTried to track the number of tokens seen, however the current model is not configured properly to know what item is the input. To fix this, add a `main_input_name` attribute to the model class you are using.)r   r   r   z0Calculated loss must be on the original device: z but device in use is item)r2  zXThere seems not to be a single sample in your epoch_iterator, stopping training at step zI! This is expected if you're using an IterableDataset and set num_steps (z.) higher than the number of available samples.zYou enabled PyTorch/XLA debug metrics but you don't have a TPU configured. Check your training configuration if this is unexpected._pastzU

Training completed. Do not forget to share your model on huggingface.co/models =)

r  gMbP?rn  )num_samples	num_stepsrG  
total_flos
train_loss	use_mtimer   Deleting older checkpoint [] due to args.save_total_limitignore_errors)r4  rW  r[  r   r  rQ  rZ  rX  r  r3  r   r)  r@  r  r  propagate_args_to_deepspeedr  r  r  r_  r&   r  r  set_initial_training_valuesZinclude_tokens_per_secondrG  rH  r   ZUNDERFLOW_OVERFLOWr	  r   r   r(  r0  r  fsdp_pluginr\  r:  r#   r9  r  r:   r=  r   rV  Zis_hyper_param_searchZcompute_stepsgradient_checkpointingZgradient_checkpointing_enabler  r  r*   _fsdp_qlora_plugin_updatesmixed_precisionr  rn  r"  rM  r  rf   r8  r9  r  r$   r   r  _load_optimizer_and_scheduler_load_scalerr  rE   epochr  rD  rt  isfiler  rx  r  r  _load_callback_stater7  ru  Zignore_data_skiprV  train_dataloaderZinit_training_referencesr   r  r   _total_loss_scalar_globalstep_last_loggedr  Zon_train_beginZeval_on_start	_evaluaterd  r  
past_indexr  r!  rG  Zon_epoch_begin_load_rng_stater   r  get_batch_samples	enumerategradient_stateZ_set_sync_gradientsinclude_num_input_tokens_seenr#  r  Zint64num_input_tokens_seengatherr  r  r  closeZon_step_begindistributed_typer   	DEEPSPEEDr  r   Zno_syncr   r   training_stepZlogging_nan_inf_filterr   isnanisinfrW  r6  floating_point_opsZmax_grad_normr   Zclip_master_gradsr   rI  Zclip_grad_norm_r   Zmaster_paramsr   rw   Zget_global_grad_normZon_pre_optimizer_stepr  Zon_optimizer_stepr   Zoptimizer_step_was_skippedr   ReduceLROnPlateauZon_step_end_maybe_log_save_evaluateZon_substep_endZshould_epoch_stopZshould_training_stopr  r  Zon_epoch_endTPU_METRICS_DEBUGmaster_printmetmetrics_reportdelattrr  best_model_checkpoint
rendezvousr*  rg   r+  distr  rP  _load_best_modelre   
store_flosr  r  r  r]  log_get_output_dir_sorted_checkpointsrC  save_total_limitsamefileshutilrmtreeri  _finish_current_pushr8  rt  rY   );rf  r  r   r  rH  r  r  Zoriginal_bsr  total_train_batch_sizerH  num_update_steps_per_epochrA  num_train_samplesepoch_basedlen_dataloaderrG  Znum_train_tokensZdebug_overflowZdelay_optimizer_creationZis_fsdp2r   Zuse_accelerator_preparer  Zepochs_trainedZsteps_trained_in_current_epochZsteps_trained_progress_barattrtr_loss	grad_normr2  r  Zepoch_dataloaderZsteps_in_epochZrng_to_syncZsteps_skippedr  epoch_iterator	remainderZupdate_stepZtotal_updatesr  num_batchesbatch_samplesnum_items_in_batchiinputsZdo_sync_stepr  Zinput_tokenscontextZtr_loss_stepZ
_grad_normZeffective_global_stepr  r_  run_dircheckpoints_sortedrd  r   r   r   r    s  


	


















 

 


















&zTrainer._inner_training_loopc                 C   s   | j d urW|d urW| j tjkr|j}n*| j tjkr%dd l}|j  }n| j tj	kr/|j
}n| j tjkr=dd l}|jj
}| jd urG| |nd| }tj| jj|}|S | jj}|S )Nr   zrun-)rX  rS   rP  numberrR  rj  rn  Zget_contextZget_trial_idrS  idrU  rI  runr  rD  rt  r  r   r   )rf  rH  Zrun_idrp  rI  Zrun_namer!  r   r   r   r
  
  s    zTrainer._get_output_dirc                    s  |d u r| j }tj t}tj t}tj t}tj t}tj t}tj t	}tj t
}	tj oYt fddt D pYtjtj t d}
tj rl fddt D ng }|
r{| js{td  dtdd ||||	||fD s|
s|std	  td
  d tj|rt|}|j}|d ur|tkrtd| dt d tj|stj|s|
rWt rtjtj drtj tddd d S t| jdr| jjdu rtd t  t j!|ddd}d|d< |j"|dd}~d S | jr)t#| j$j%j&| j$| fi t'  d S | jj(r>tj|r>t)j j*|dd}nt  t j!|ddd}|"|d}~| +| d S t,|rt|dsht|drt|drtj- rt|dr|j.}t/|dkrtd |d }n|j0}|r|D ]}tj |}|j1||||kd  q|2| d S |j1 |dd  d S td!t d" d S td# d S t3| t | jj(d$}t s| +| d S d S )%Nc                 3   s.    | ]}t jt j |rt|v V  qd S r   )rD  rt  isdirr  FSDP_MODEL_NAMEr   Zfolder_namer  r   r   r   
  s    
z0Trainer._load_from_checkpoint.<locals>.<genexpr>z.binc              	      sV   g | ]'}t jt j |rt jt j |ts't jt j |tr|qS r   )rD  rt  r&  r  r  rk   rj   r(  r)  r   r   r   
  s    z1Trainer._load_from_checkpoint.<locals>.<listcomp>zCheckpoint found at z* is only supported when using PyTorch FSDPc                 s   s    | ]	}t j|V  qd S r   )rD  rt  r  )r   fr   r   r   r   
  s
    

z!Can't find a valid checkpoint at zLoading model from r  z9You are resuming training from a checkpoint trained with z- of Transformers but your current version is zJ. This is not recommended and could yield to errors or unwanted behaviors.user_content.ptFrt  tagr   Zload_optimizerr   TzOEnabling FP16 and loading from smp < 1.10 checkpoint together is not supported.r   map_locationweights_only_smp_is_partialr  r  active_adapteractive_adaptersload_adapterr   zFMultiple active adapters detected will only consider the first adapterr   )r%  jThe intermediate checkpoints of PEFT may not be saved correctly, consider using a custom callback to save i in corresponding saving folders. Check some examples here: https://github.com/huggingface/peft/issues/96GCould not load adapter model, make sure to have `peft>=0.3.0` installed)r  Zprefer_safe)4r   rD  rt  r  rl   rk   rj   rp   ro   rn   rm   r&  r7  listdirr  r'  r0  r	  r  r  r   Zfrom_json_fileZtransformers_versionr   r#  r   rP  r  r"  r   r   ru   r   loadload_state_dictr   r4  rQ  r  r   save_safetensorssafetensors	load_file_issue_warnings_after_loadr   existsr4  r!  r3  r5  Zset_adapterr)   )rf  r  r   config_fileZadapter_weights_fileZadapter_safe_weights_fileZweights_fileZweights_index_fileZsafe_weights_fileZsafe_weights_index_fileZis_fsdp_ckptZadapter_subdirsr  Zcheckpoint_versionrz  load_resultr4  r3  Zsubdir_nameZpeft_idr   r)  r   r  
  s   





	


zTrainer._load_from_checkpointc              
   C   s  t d| jj d| jj d tj| jjt}tj| jjt	}tj| jjt
}tj| jjt}t r:| jn| j}| jrPt| j| jjt| j d d S | jrgt| jjj| j|| jjfi t }d S tj|stj|stj|stj|rd}t rtjtj| jjdrtj| jjtddd d S | jjrtj|rtjj|d	d
}nt   tj!|d	dd}d|d< |j"|dd}d S t|rXt#|dst#|drPt#|drPt#|dr|j$d }	t%|j$dkrt &d n|j'}	tj|stj|rDz
|(| jj|	 W n% t)y7 }
 z|j*|	 j+r2d|j*|	 j,j- d}t)||
 d }
~
ww ddl.m/} |g g }n:t &dt
 d d}n.t &d d}n&| jjrmtj|rmtjj|d	d
}nt   tj!|d	dd}|"|d}t s|r| 0| d S d S d S tjtj| jjt1stjtj| jjt2rt3|| jjt d}t s| 0| d S d S t &d| d d S )NzLoading best model from z	 (score: r  r  Tr+  Fr,  r   r  r.  r1  r2  r3  r4  r5  r   r   zCDetected multiple active adapters, will only consider the first onez0When using prompt learning PEFT methods such as z, setting load_best_model_at_end=True can lead to errors, it is recommended to set this to False and to load the model manually from the checkpoint directory using PeftModel.from_pretrained(base_model, <path>) after training has finished.)_IncompatibleKeysr6  r7  r8  z#Could not locate the best model at zi, if you are running a distributed training on multiple nodes, you should activate `--save_on_each_node`.)4r  r  rQ  r  best_metricrD  rt  r  rp   rn   rk   rj   r   r3  r   r)  r$   r   r0  r   r4  r  r   r@  r  rP  r  r   r<  r=  r   r>  ru   r:  r;  r"  r4  r!  r#  r3  r5  r  Zpeft_configZis_prompt_learningZ	peft_typer<  Ztorch.nn.modules.modulerC  r?  rm   ro   r)   )rf  Zbest_model_pathZbest_safe_model_pathZbest_adapter_model_pathZbest_safe_adapter_model_pathr   rB  Zhas_been_loadedrz  r3  excmsgrC  r   r   r   r  Z  s    







	





"
zTrainer._load_best_modelc                 C   sz   t |jdkr(| jjd urt|jt| jjkr| j  n
td|j d t |jdkr;td|j d d S d S )Nr   z8There were missing keys in the checkpoint model loaded: r  z;There were unexpected keys in the checkpoint model loaded: )	r!  Zmissing_keysr   Z_keys_to_ignore_on_saver  r{  r  r#  Zunexpected_keys)rf  rB  r   r   r   r?    s   z"Trainer._issue_warnings_after_loadc                 C   s   | j |d}| || jj| t| jtjjjrS|sS| j	j
}|ds(d| }z| j||  W |S  tyR } ztd| dt|  d| d|d }~ww |S )Nignore_keyseval_9The `metric_for_best_model` training argument is set to 'W', which is not found in the evaluation metrics. The available evaluation metrics are: zX. Please ensure that the `compute_metrics` function returns a dictionary that includes 'zM' or consider changing the `metric_for_best_model` via the TrainingArguments.)evaluaterq  rQ  ru  r   r:  r   r  r  r   r  
startswithr  rD  r   r  )rf  rH  r  r  r_  metric_to_checkrE  r   r   r   r    s,   

	
zTrainer._evaluatec	                 C   s<  | j jrf| jj| jkrft rt  i }	| |	 
 }
||8 }t|
| jj| j  d|	d< |d urAt|tjr=|
 n||	d< |d urJ||	d< n|  |	d< |  j|
7  _| jj| _|   | |	| d }| j jr| ||}| j||d}| jjtjkr|| j _| j jr| || | j| j| j| j | _ d S d S )Nr  r  r  r2  )r_  rH  )rV  Z
should_logrQ  ru  r  r   r  r  _nested_gathermeanr  r  r   r   r  r   r  r  r	  Zshould_evaluater  _determine_best_metricr   r
  rW   r  rC  _save_checkpointr=  Zon_save)rf  r  r  r   rH  r  r  r  r2  logsZtr_loss_scalarr_  is_new_best_metricr   r   r   r    s4   

z Trainer._maybe_log_save_evaluatec                 C   s|  |d u rd S | j jdkr-| j j}tj|d| d}tj|s,td| d d S ntj|d}tj|sAtd d S t	  t
|}W d    n1 sTw   Y  t|d  tj|d	  t
j|d
  t rzt|d  | j jtjk}t
j rtdt
j|| t rtdt
j|| t rtdt
j|| t rtdt
j|| t rtdt
j || d S d S )Nr   
rng_state_.pthz$Didn't find an RNG file for process zr, if you are resuming a training that wasn't launched in a distributed fashion, reproducibility is not guaranteed.rng_state.pthzDidn't find an RNG file, if you are resuming a training that was launched in a distributed fashion, reproducibility is not guaranteed.pythonnumpyr   r   CUDAZNPUZHPUZMLUZMUSA)!r   r  r  rD  rt  r  r  r  r  r   r   r:  r  setstater   Z	set_stateZset_rng_stater   r  r*  rg   r+  cudais_availablerN   r   npur   hpur   mlur   musa)rf  rd  r  Zrng_fileZcheckpoint_rng_stateZis_distributedr   r   r   r  !  sJ   

zTrainer._load_rng_statec              
   C   s   d}| j jdurt| j j}|dsd| }z|| }W n ty8 } ztd| dt|  d|d}~ww | j jr@tjntj	}| j
jdu rW| j jrQtdntd| j
_||| j
jrt|| j
_| j jtjtjfv rr| j
j| j
_d	}|S )
z
        Determine if the model should be saved based on the evaluation metrics.

        Returns:
            bool: True if a new best metric was found, else False
        FNrI  rJ  rK  zJ. Consider changing the `metric_for_best_model` via the TrainingArguments.z-infinfT)r   r  rM  rD  r   r  Zgreater_is_betterr   ZgreaterlessrQ  rD  r6  r
  rW   STEPSEPOCHru  best_global_step)rf  r_  rH  rT  rN  Zmetric_valuerE  operatorr   r   r   rQ  L  s4   


zTrainer._determine_best_metricc                 C   sz  t  d| jj }| jd u r|d u r|   | j|d}tj||}| j	|dd | j
jtjtjfv rR| jjrRt  d| jj }tj||}tj|rR|| j_| j
jse| | | | | | | j
jrdd | jj| jg D D ]#}|jj}	| }
t| jj|	 tr| jj|	 |
 qv|
| jj|	< qv| j tj|t! | j
j"r| #| | j
jr| j$d|d d S d S )Nr  )rH  Trr  c                 S   r   r   r   r   r   r   r   r     r   z,Trainer._save_checkpoint.<locals>.<listcomp>r  )%rO   rQ  ru  rX  r  r
  rD  rt  r  rv  r   r
  rW   rd  re  rf  r@  r  save_only_model_save_optimizer_and_scheduler_save_scaler_save_rng_staterC  r=  r   rV  r  r  r   r  r   r  rw  rx  rA  _push_from_checkpoint_rotate_checkpoints)rf  r   rH  checkpoint_folderr!  r   Zbest_checkpoint_folderZbest_checkpoint_dirr   Zcb_nameZcb_stater   r   r   rR  r  s<   




zTrainer._save_checkpointc              	   C   s  t  tj  tj  d}tj r+| jj	t
jkr#tjj  |d< ntjj  |d< t r4t |d< t rO| jj	t
jkrGtjj  |d< ntjj  |d< t rj| jj	t
jkrbtjj  |d< ntjj  |d< t r| jj	t
jkr}tjj  |d< ntjj  |d< t r| jj	t
jkrtj |d< ntj |d< tj|dd	 | jjd
krt|tj|d d S t|tj|d| jj d d S )N)rX  rY  r   r\  r   r^  r_  r`  ra  Tr   r   rW  rU  rV  )r  getstater   Z	get_stater   Zget_rng_stater\  r]  r   r*  rg   r+  Zget_rng_state_allr   r  r   r^  r   r_  r   r`  r   ra  rD  rE  r  ry  rt  r  r  )rf  r   Z
rng_statesr   r   r   rk    s<   
&zTrainer._save_rng_statec                 C   sv  t  rhtd | jr1| j | j d}tj|t	j
|d| jj d| jj dt dd nt| j t	j
|t tjdd	}t| j t	j
|t t| W d    n1 sbw   Y  nt r| jjdd
}t  t dkstjjjrtj|t	j
|tdtjjjd n]| jrdtt | j!j"j#$ v }|rt%| jr| j!j"|dd n<| j!"| n5| j&rt'| j(jj)| j(| j|fi t*  t+| j(jj)| j(| j| j| n| jj,rt-| j t	j
|t | jot.| jt/ }| jj,r5| jr|r9t  s7tjdd	}t-| j t	j
|t W d    n	1 s*w   Y  t| d S d S d S d S )NZsaving_optimizer_states)r9  shard_metadatar  -of-r  FZmaster_onlyTrecord)Zgather_if_shardr   )r   Zv3exclude_frozen_parameters)ru  )0r   r  r  re  r9  rz  r   get_shard_metadatary  rD  rt  r  r   r  r  r{  r  catch_warningsr:  r|  rL   r   Zlocal_state_dictrP  r  Zrdp_rankrQ  rR  Zshard_optimizer_stater)  r  r   r   r3  save_checkpointr   r  r   r0  r   r4  r  r   r   rC  r   r   r   )rf  r   Zoptmcaught_warningsZopt_state_dictZ accept_exclude_frozen_parametersZis_deepspeed_custom_schedulerr   r   r   ri    s   

z%Trainer._save_optimizer_and_schedulerc              
      s^   du rdS | j r>t| jts<tjdd}t  | jtj	t
j tdd W d   n1 s3w   Y  t| dS t rMtt
j td n)t
jt
j tpvt
jt
j tpvt
j ovt fddt
 D }| jrtt
j d| jj d	t n|}|rt
jt
j trt r| jrt  tj	t
j d
| jj d| jj d	t ddd}|d }nt  tj	t
j tddd}tjdd}t  tj	t
j tddd}W d   n1 sw   Y  t| t|| jj t|| jj | j| | j| dS t rBt
jt
j dr5 fdd}n fdd}| j !| n9| jjdkrM| jjnd}| j"rgt#| j$j%j&| j$| j| j' fi t(  nt  | jtj	t
j t|dd tjdd}t  | jtj	t
j tdd W d   n	1 sw   Y  t| dS dS dS )z3If optimizer and scheduler states exist, load them.NTrs  r0  _*c                 3   s8    | ]}t jt j |rtd d |v V  qdS )r  r   N)rD  rt  r&  r  OPTIMIZER_NAME_BINr1  r(  rc  r   r   r   0  s    
z8Trainer._load_optimizer_and_scheduler.<locals>.<genexpr>z	rank*-of-r  r  rq  r   r.  r9  r+  c                    s"   | tjtj tdd d S )NTr   )r;  rP  r:  rD  rt  r  r{  modoptrc  r   r   opt_load_hooka  s   "z<Trainer._load_optimizer_and_scheduler.<locals>.opt_load_hookc                    sJ   t r|tjtj tddd d S |tjtj tdd d S )NT)r   Zback_compatr   )rO  r;  rP  r:  rD  rt  r  r{  r}  rc  r   r   r  f  s
   "r   ))r)  r   r:  r   r  rw  ru   r;  r   r:  rD  rt  r  r|  rL   r   globr{  r  r|  r&  r7  r9  re  r   r  r   r  r  send_cpu_data_to_devicer   r9  r3  Zregister_post_step_hookr0  r   r4  rQ  r  r   r   )rf  rd  ry  checkpoint_file_existsZoptimizer_stateZlr_scheduler_stater  r/  r   rc  r   r    s   &
	z%Trainer._load_optimizer_and_schedulerc                 C   s   z| j j}W n
 ty   Y d S w |d u rd S t rHtd tjdd}t| j j	 t
j|t t| W d    n1 sCw   Y  | jjrzt s|tjdd}t| j j	 t
j|t W d    n1 sow   Y  t| d S d S d S )NZsaving_scaler_stateTrs  )r4  scalerr>  r   r  r  r  rw  ry  rz  rD  rt  r  SCALER_NAMErL   r   rC  r   )rf  r   r  ry  r   r   r   rj    s&   

 zTrainer._save_scalerc                 C   s  |du rdS t jt j|t}|rt rQtjdd}t  t	j
t j|tddd}W d   n1 s7w   Y  t| t|| jj | jj| dS tjdd}t  | jjt	j
t j|tdd W d   n1 svw   Y  t| dS dS )z If scaler state exists, load it.NTrs  r   r.  rz  )rD  rt  r  r  r  r   r  rw  ru   r   r:  rL   r  r  r   r   r4  r  r;  )rf  rd  r  ry  Zscaler_stater   r   r   r    s,   zTrainer._load_scalerc                    sN  | j jsdS g }g }| jj| jg }| jj D ]o\ }t|t	s$|g}t
 fdd|D r fdd|D }t||D ]>\}}|di }|di }	t|di |}
|	 D ]
\}}t|
|| qZt|trn|
| _n||
 | jt|
 q=td q|  qt|d	krtd
d| d |D ]}| j| qdS )zLIf callback states exist and were passed in, restore their states if enabledNc                 3   s    | ]	}|j j kV  qd S r   r  r  r   rw  Zstored_callbackr   r   r     s    z/Trainer._load_callback_state.<locals>.<genexpr>c                    s   g | ]
}|j j kr|qS r   r  r  r  r   r   r     s    z0Trainer._load_callback_state.<locals>.<listcomp>r   
attributeszPContinuing training from checkpoint, restoring any callbacks that were passed inr   zPCheckpoint included callbacks not included in current configuration. Ignoring. (r  r  r   )r   Z'restore_callback_states_from_checkpointr=  r   rV  rQ  r  rT  r   r   r7  zipr^  r   rV  r9   r  rz  r  r  r!  r#  r  r>  )rf  	not_foundZnew_callbacksZoriginal_callbacksrJ  
duplicatesrw  Zcallback_datar   r  Znew_callback	attributer<  r   r  r   r    s>   



zTrainer._load_callback_state   minimizerQ  rf  n_trials	directionbackendr  r  c           
      K   s   |du rt  }t|}t|  }|  || _| jdu r td|du r'|jn|| _|| _	|du r3t
n|| _|j| ||fi |}	d| _|	S )az  
        Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined
        by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided,
        the sum of all metrics otherwise.

        <Tip warning={true}>

        To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to
        reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to
        subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom
        optimizer/scheduler.

        </Tip>

        Args:
            hp_space (`Callable[["optuna.Trial"], Dict[str, float]]`, *optional*):
                A function that defines the hyperparameter search space. Will default to
                [`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or
                [`~trainer_utils.default_hp_space_sigopt`] depending on your backend.
            compute_objective (`Callable[[Dict[str, float]], float]`, *optional*):
                A function computing the objective to minimize or maximize from the metrics returned by the `evaluate`
                method. Will default to [`~trainer_utils.default_compute_objective`].
            n_trials (`int`, *optional*, defaults to 100):
                The number of trial runs to test.
            direction (`str` or `List[str]`, *optional*, defaults to `"minimize"`):
                If it's single objective optimization, direction is `str`, can be `"minimize"` or `"maximize"`, you
                should pick `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or
                several metrics. If it's multi objectives optimization, direction is `List[str]`, can be List of
                `"minimize"` and `"maximize"`, you should pick `"minimize"` when optimizing the validation loss,
                `"maximize"` when optimizing one or several metrics.
            backend (`str` or [`~training_utils.HPSearchBackend`], *optional*):
                The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending
                on which one is installed. If all are installed, will default to optuna.
            hp_name (`Callable[["optuna.Trial"], str]]`, *optional*):
                A function that defines the trial/run name. Will default to None.
            kwargs (`Dict[str, Any]`, *optional*):
                Additional keyword arguments for each backend:

                - `optuna`: parameters from
                  [optuna.study.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html)
                  and also the parameters `timeout`, `n_jobs` and `gc_after_trial` from
                  [optuna.study.Study.optimize](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.Study.html#optuna.study.Study.optimize)
                - `ray`: parameters from [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run).
                  If `resources_per_trial` is not set in the `kwargs`, it defaults to 1 CPU core and 1 GPU (if available).
                  If `progress_reporter` is not set in the `kwargs`,
                  [ray.tune.CLIReporter](https://docs.ray.io/en/latest/tune/api/doc/ray.tune.CLIReporter.html) is used.
                - `sigopt`: the parameter `proxies` from
                  [sigopt.Connection.set_proxies](https://docs.sigopt.com/support/faq#how-do-i-use-sigopt-with-a-proxy).

        Returns:
            [`trainer_utils.BestRun` or `List[trainer_utils.BestRun]`]: All the information about the best run or best
            runs for multi-objective optimization. Experiment summary can be found in `run_summary` attribute for Ray
            backend.
        NzXTo use hyperparameter search, you need to pass your model through a model_init function.)r!   rS   r    Zensure_availablerX  r   r  Zdefault_hp_spacerQ  r  r[   rf  r%  )
rf  rQ  rf  r  r  r  r  r  Zbackend_objZbest_runr   r   r   hyperparameter_search  s    @

zTrainer.hyperparameter_searchrS  r  c                 C   s   | j jdur| j j|d< | jjr&| j j|d< |dur&|td|| j jd i |d| j ji}| j j	| | j
| j| j | j|| _dS )a8  
        Log `logs` on the various objects watching training.

        Subclass and override this method to inject custom behavior.

        Args:
            logs (`Dict[str, float]`):
                The values to log.
            start_time (`Optional[float]`):
                The start of training.
        Nr  r  rn  )rG  r  )rQ  r  r   r  r  r  re   ru  Zlog_historyr  r=  Zon_logrV  )rf  rS  r  r  r   r   r   r	  5  s   zTrainer.logrJ  c                    s   t |trt| fdd| D S t |ttfr(t| fdd|D S t |tjrVd jj	i} j
rNt|sAt|rN|d jjjj i |jdi |S |S )	z|
        Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors.
        c                    s   i | ]
\}}|  |qS r   _prepare_inputrJ  rm  r   r   r  Q  r  z*Trainer._prepare_input.<locals>.<dictcomp>c                 3       | ]}  |V  qd S r   r  )r   rK  rm  r   r   r   S      z)Trainer._prepare_input.<locals>.<genexpr>r   r   Nr   )r   r   r   rT  r5  r   r   r  r   r   r)  is_floating_pointZ
is_complexr  r4  rQ  r\  rO  r   r|  )rf  rJ  r  r   rm  r   r  L  s   
zTrainer._prepare_inputr  c                 C   sR   |  |}t|dkrtdd| j d| jjdkr'| jdur'| j|d< |S )z
        Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and
        handling potential state.
        r   zThe batch received was empty, your model won't be able to train on it. Double-check that your training dataset contains keys expected by the model: r  r  NZmems)r  r!  r	  r  rL  r   r  r  rf  r  r   r   r   r  ^  s   


zTrainer._prepare_inputsc                 C   s   |   S )zF
        A helper wrapper to group together context managers.
        )autocast_smart_context_managerrm  r   r   r   compute_loss_context_managern  s   z$Trainer.compute_loss_context_managerr}  c                 C   s*   | j rtjjj|| jd}|S t }|S )z
        A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired
        arguments, depending on the situation.
        )r}  r   )rN  r   r   r   r  rT  r   r   )rf  r}  Zctx_managerr   r   r   r  t  s
   z&Trainer.autocast_smart_context_managerc                 C   s  |   t| jdrt| jj r| j   | |}t r0t||| jj}|	 
 | jjS |   | j|||d}W d   n1 sGw   Y  ~| jjdur| jj| jj dkrt rftj  n2t rotj  n)t rxtj  n t rtj  nt rtj  nt rt d ntj!  i }| jj"t#j$t#j%fv r| & |d< | jj'dkr|( }| j)rt*+|| j}|,  W d   dS 1 sw   Y  dS | j-s| j.du r|| jj }| j/j0t1j2krd|d	< | j/j,|fi | |
 S )
aq  
        Perform a training step on a batch of inputs.

        Subclass and override to inject custom behavior.

        Args:
            model (`nn.Module`):
                The model to train.
            inputs (`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument `labels`. Check your model's documentation for all accepted arguments.

        Return:
            `torch.Tensor`: The tensor with training loss on this batch.
        rn  r  Nr   zW`torch_empty_cache_steps` is set but HPU device/backend does not support empty_cache().r2  r   FZscale_wrt_gas)3rn  r"  r9  rF  r  r   r   r   r  reduce_meandetachr|  r   r  compute_lossZtorch_empty_cache_stepsrQ  ru  r   r   Zxpuempty_cacher   r`  r   ra  r   r^  r   Zmpsr   r  r#  r\  r  rf   r8  r9  r   r  rP  rM  r   Z
scale_lossZbackwardr6  r   r4  r  r   r  )rf  r   r  r  loss_mbr  r  Zscaled_lossr   r   r   r    sV   




"zTrainer.training_stepc                 C   s  | j dus
| jdurd|v r|d}nd}| jr)i }|dur#||d< i ||}|di |}| jjdkr=|| jj | _|durz| j|}t	|rR|j
j }	n| }	| jdurd| j|||d}
nA|	t v rs| j ||dd}
n2|  ||}
n+t|trd|vrtd	d
|  dd
|  dt|tr|d n|d }
| jjr| js| jr|dur|
| jj9 }
|r|
|fS |
S )z
        How the loss is computed by Trainer. By default, all models return the loss in the first element.

        Subclass and override for custom behavior.
        Nlabelsr  r   r  T)Zshift_labelsr  zJThe model did not return a loss from the inputs, only the following keys: r  z,. For reference, the inputs it received are r  r   )rU  r   r  r6  r   r  r  r4  r*   r   rn  r   Z	_get_namer+   r   r   r3  r	  r  r  average_tokens_across_devicesZnum_processes)rf  r   r  return_outputsr  r  Zloss_kwargsoutputsrh  r  r  r   r   r   r    sN   
zTrainer.compute_lossc                 C   s   | j jdkS )z
        Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
        machines) main process.
        r   )r   Zlocal_process_indexrm  r   r   r   r     s   zTrainer.is_local_process_zeroc                 C   s   t  r	t dkS | jjdkS )z
        Whether or not this process is the global main process (when training in a distributed fashion on several
        machines, this is only going to be `True` for one process).
        r   )r   rP  r  r   r  rm  r   r   r   r    s   zTrainer.is_world_process_zeror   rs  c                 C   s~  |du r| j j}t r| | nt r9tj|dd | j }| j j	r+| j
||d tr8ttj|d  nt| jrcdt| jjjjv rbtttdkrb| j| j}| j j	rb| j
||d nJ| jrz| j| j}| j j	ry| j
||d W n2 ty   td | j j	r| j
|i d t | j j	|t!t"g | j#| Y n
w | j j	r| 
| | j j$r|s| j$d	d
 dS dS dS )z
        Will save the model, so you can reload it using `from_pretrained()`.

        Will only save from the main process.
        NTr   )rz  r+  ZFULL_STATE_DICTz0.24.1z| stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use zero_to_fp32.py to recover weightsz
Model save)commit_message)%r   r   r   	_save_tpur   rD  rE  r3  rz  rC  _saverO  r   rt  r  touchr0  r  r4  rQ  r  state_dict_typer   r   accelerate_versionZget_state_dictr   r)  r  r	  r  r#  rM   rp   rn   rx  rA  )rf  r   rs  rz  r   r   r   rv    sN   

zTrainer.save_modelc              	   C   s2  |d ur|n| j j}td|  | j}t  tjddr2tj	|dd t
| j tj|t tf}td | jr| | d}tj|d| j j d	| j j d
t }tj||dd td | j jrddlm} |tj|ddt dd\}}|jj}| j|}	t|	|r|	j||tj| j jd ndtd t|tj|t nSt||st| j||r| j|j|| j jt | tj| j jd n,td t | }
t|
tj|t n|j|| j jtj| j jt | d | j!d ur| j jr| j!| d S d S d S )NSaving model checkpoint to F)localTr   Zsaving_checkpoint)r   rp  r  rq  r  rr  Zsave_full_checkpointsr   )%consolidate_sharded_model_checkpointsr  zrank*-of-*-)Zckpt_prefixZckpt_suffixrv  )rz  save_functionsafe_serializationETrainer.model is not a `PreTrainedModel`, only saving its state dict.)is_main_processrz  r  r  )r  r  r  rz  )"r   r   r  r  r   r  r  Zis_master_ordinalrD  rE  r   ry  rt  r  TRAINING_ARGS_NAMErs   r  re  rz  rv  r  r  rp   rC  r  r  r  r4  r*   r   save_pretrainedr<  Z_maybe_convert_to_cpur   )rf  r   r   supported_classesZckptZ	ckpt_pathr  Zfull_state_dictr  rh  rz  r   r   r   r  M  sv   






zTrainer._save_tpuc                 C   sp  |d ur|n| j j}tj|dd td|  t stfnttf}t	| j
|sv|d u r2| j
 }t	| jj| j
dd|rP| jj| j
ddj||| j jd n1td | j jrjtjj|tj|tdd	id
 nt|tj|t n| j
j||| j jd | jd ur| j| n| jd urt| jdr| jjd urtd | jj| t| j tj|t d S )NTr   r  Fr  )rz  r  r  r  pt)r   r   zWSaving Trainer.data_collator.tokenizer by default as Trainer.processing_class is `None`)r   r   rD  rE  r  r  r   r(   r   r   r   rz  r4  r*   r  r<  r=  r   Z	save_filert  r  rn   ry  rp   r   r   r"  r   r  )rf  r   rz  r  r   r   r   r    s:   







zTrainer._savec                 C   s\   | j jtjkr | j jt| jg| j jd	 
 7  _d| _d S | j j| j7  _d| _d S )Nr  r   )r   r*  rg   r+  rQ  r  rB   rW  r   r  r  rm  r   r   r   r    s   

zTrainer.store_flosc                 C   s^  g }dd t || dD }|D ]1}|r#|tj||f qtd| d|}|d urD| d urD|t	| d |f qt
|}|rnt|dkrn|d d |d d  }	|	d	k rntd
 | jd||dS dd |D }| jjd urtt | jj|v r|tt | jj}
t|
t|d D ]}||d  || ||< ||d < q|S )Nc                 S   s    g | ]}t j|rt|qS r   )rD  rt  r&  r  )r   xr   r   r   r     s     z/Trainer._sorted_checkpoints.<locals>.<listcomp>-*z.*z	-([0-9]+)r   r   r  r  zPmtime may not be reliable on this filesystem, falling back to numerical orderingF)r  r   checkpoint_prefixc                 S   r  )r   r   )r   rd  r   r   r   r     r  r  )r   r  r  rD  rt  getmtimerematchgroupsr7  sortedr!  r  r  r  rQ  r  r  indexrd  )rf  r   r  r  Zordering_and_checkpoint_pathZglob_checkpointsrt  Zregex_matchr"  Z
mtime_diffZbest_model_indexr  r   r   r   r    s0   
$zTrainer._sorted_checkpointsc                 C   s   | j jd u s| j jdkrd S | j||d}t|| j jkrd S | j j}| jjd ur9| j jdkr9|d | jjkr9d}tdt|| }|d | }|D ]}td| d t	j
|dd	 qJd S )
Nr   r  r   r  r  r  r  Tr  )r   r  r  r!  rQ  r  r  r  r  r  r  )rf  r  r   r"  r  Znumber_of_checkpoints_to_deleteZcheckpoints_to_be_deletedrd  r   r   r   rm    s    zTrainer._rotate_checkpointsr  rH  metric_key_prefixc              
   C   s  |du}|r|n| j }t|tr3i }| D ]\}}| j|r |n||| d| d}|| q|S | j  | |}	| j	rDt
|	}	t }
| jjrO| jn| j}||	d| jdu r\dnd||d}| jj| jj }| d|jv r||
|j| d 7 }
| d|jv r|
|j| d 7 }
|jt||
|jt|j| d	 | |j tj| jjv rtt  | j !| j| j"| j#|j| _#| j$|j |jS )
a  
        Run evaluation and returns metrics.

        The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
        (pass it to the init `compute_metrics` argument).

        You can also subclass and override this method to inject custom behavior.

        Args:
            eval_dataset (Union[`Dataset`, Dict[str, `Dataset`]), *optional*):
                Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns
                not accepted by the `model.forward()` method are automatically removed. If it is a dictionary, it will
                evaluate on each dataset, prepending the dictionary key to the metric name. Datasets must implement the
                `__len__` method.

                <Tip>

                If you pass a dictionary with names of datasets as keys and datasets as values, evaluate will run
                separate evaluations on each dataset. This can be useful to monitor how training affects other
                datasets or simply to get a more fine-grained evaluation.
                When used with `load_best_model_at_end`, make sure `metric_for_best_model` references exactly one
                of the datasets. If you, for example, pass in `{"data1": data1, "data2": data2}` for two datasets
                `data1` and `data2`, you could specify `metric_for_best_model="eval_data1_loss"` for using the
                loss on `data1` and `metric_for_best_model="eval_data2_loss"` for the loss on `data2`.

                </Tip>

            ignore_keys (`List[str]`, *optional*):
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions.
            metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
                An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
                "eval_bleu" if the prefix is "eval" (default)

        Returns:
            A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
            dictionary also contains the epoch number which comes from the training state.
        Nr  )r   rH  r  r  T)r  prediction_loss_onlyrH  r  _jit_compilation_time_model_preparation_timer  r  )%r   r   r3  rT  rL  r  r  r  r  r_  r&   r  r   r  prediction_loopevaluation_loopr   r  r  r_  re   r  mathceilr	  r   r  r  r  r   r  r  r=  Zon_evaluaterQ  rV  r]  )rf  r   rH  r  overrider_  Zeval_dataset_nameZ_eval_datasetZdataset_metricseval_dataloaderr  	eval_loopr  total_batch_sizer   r   r   rL  	  sX   -




	zTrainer.evaluater  c           	   
   C   s   | j   | |}t }| jjr| jn| j}||d||d}| jj| jj	 }| d|j
v r:||j
| d 7 }| d|j
v rL||j
| d 7 }|j
t|||jt|j| d | j| j| j| j|j
| _| j |j
 t|j|j|j
dS )a  
        Run prediction and returns predictions and potential metrics.

        Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
        will also return metrics, like in `evaluate()`.

        Args:
            test_dataset (`Dataset`):
                Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the
                `model.forward()` method are automatically removed. Has to implement the method `__len__`
            ignore_keys (`List[str]`, *optional*):
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions.
            metric_key_prefix (`str`, *optional*, defaults to `"test"`):
                An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
                "test_bleu" if the prefix is "test" (default)

        <Tip>

        If your predictions or labels have different sequence length (for instance because you're doing dynamic padding
        in a token classification task) the predictions will be padded (on the right) to allow for concatenation into
        one array. The padding index is -100.

        </Tip>

        Returns: *NamedTuple* A namedtuple with the following keys:

            - predictions (`np.ndarray`): The predictions on `test_dataset`.
            - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
            - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
              labels).
        
Prediction)r  rH  r  r  r  r  )predictionsr~  r_  )r  r  r  r  r   r  r  r  r  r  r_  r  re   r  r  r  r=  Z
on_predictrQ  rV  r]  rU   r  r~  )	rf  r  rH  r  Ztest_dataloaderr  r  r  r  r   r   r   predictq  s.   
$
	zTrainer.predictr  c                 C   s  | j }|dur	|n|j}| jr| jdu rt| ddd\}}| j| jd|d}t| jj	dkrt|| ju rtt

 }	| jsG| jrM| jjdkrM| j jsM| j|n| jj|dd}tt

 |	 d	| _| jre|| _|| jurm|| _| jrt| j| _| js|jr|jtj|jd
}n|jr|jtj|jd
}| j j}
td| d t|rtd| |  ntd td|
  |   t!| j"drt#| j"j r| j"   || j$_%t&|dd}|j'dkrd| _(t)| j j*dd}t)| j j*dd}t)| j j*dd}t)| j j*dd}d}i }d}t+|D ]N\}}t,|}|dur$||7 }|
du r$|}
| j-||||d\}}}t&| jdd}d|j.v rD| /|| nd}t0 rNt12  |dur`| 3|4|
}|5| |dur| jj6|ddd}| 3|}| j j7r}|dkr|5| |dur| jj6|ddd}|dur| jj6|ddd}| j8dur| 8||}| 3|}| j j7r|dkr|5| |dur| 3|}| j j7r|dkr|5| | j$9|| j:| j;| _;| j j7r0| j<dur%|dur%|dur%| jj=j>}i }d|j.v r|nd|d< d|j.v r|nd|d< | j<t?d(||d||d}~~~~tj@A  q|jBdurY|d |jB dkrY|C  |C  |C  |C  ~~~~tj@A  q| jjD| _3|j'rot!| drotE| d |F }|F }|F }|F }t|rt|}n tG|tHrt&|d ddkr|j}nt|r| |}n|}|dkr|dkr|}| j<dur|dur|dur| j j7sd|j.v r|nd|d< d|j.v r|nd|d< | <t?d(||d|}n|du ri }tI|}tG|tJr|rtKL|M N || d!< ntG|tKjOr%|M N || d!< t!| d"r3| jP|| d#< t!| d$rA| j|| d%< tJ|Q D ]}|R| d&s^|S||| d&| < qGtT||||d'S ))
        Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.

        Works both with or without labels.
        Nr   Tr  Z	inferenceFr  r  r  Zevaluation_moder  r   r   
***** Running  *****r  z  Num examples: Unknown  Batch size = r  r  Zpadding_indexrG  r  rC  r  r   )dimZ	pad_indexr  r  lossesr  r~  r   r  rA  _lossr  r  model_preparation_timer  r  r  r~  r_  r  r   )Ur   r  r)  r  r#   r  r   r!  r4  _modelsr  r0  r  Ztorch_compiler  prepare_modelr  r  r3  r  r-  r|  r   float16r   r.  rS  r  r  r  r`   rA  r  r"  r9  rF  r=  r  r  r  r  r<   Zeval_do_concat_batchesr  rD   prediction_stepinclude_for_metricsr  r   r  r  gather_functionrepeatr  Zpad_across_processesr  r   on_prediction_steprQ  rV  r   r  end_of_dataloaderrR   r\  r  eval_accumulation_stepsZto_cpu_and_numpygather_for_metricsr  Z
get_arraysr   r=   r\   r   r   ZconcatenaterP  r  r   r  r  rM  r  rQ   )rf  r  r  r  rH  r  r   r  r   r  r  r   Z
all_lossesZ	all_predsZ
all_labelsZ
all_inputsr_  eval_set_kwargsZobserved_num_examplesr  r  Zobserved_batch_sizer  r  r  r  inputs_decodeis_last_stepbatch_kwargsr  r;  r   r   r   r    s  


















 
 








zTrainer.evaluation_loopc                 C   s|   |du rdS t  r|du rd}t||}|S t rt|}|S | jjdur,| jjjdks8| jjdu r<| jjdkr<t|}|S )
        Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
        concatenating them to `gathered`
        NZnested_gatherNOr  )	r   rK   r   r   r   Zdistributed_stater  Z
local_rankrC   rf  Ztensorsnamer   r   r   rO    s   
zTrainer._nested_gatherc              	      s  t | jdkr	dntfdd| jD }dd}|du r"| j}t | jdkr-|r-dnd}|  du rJt| jdrHt| jj	d	d
g ng  |sN|rgt
tfdd| jD }t |dkrf|d }nd}t  t rt|}	|sz|rt|	tr|	d }
t fdd|	 D }n
|	d }
|	dd }|
   }t|}nd}t|	trt fdd|	 D }n|	}t|}n|s|r|   | j|dd\}}W d   n1 sw   Y  |  }t|trt fdd| D }nK|dd }nDd}|   |di }W d   n	1 s!w   Y  t|tr:t fdd| D }n|}| jjdkrL|| jjd  | _W d   n	1 sWw   Y  |rd|ddfS t
|}t |dkrs|d }|||fS )a  
        Perform an evaluation step on `model` using `inputs`.

        Subclass and override to inject custom behavior.

        Args:
            model (`nn.Module`):
                The model to evaluate.
            inputs (`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument `labels`. Check your model's documentation for all accepted arguments.
            prediction_loss_only (`bool`):
                Whether or not to return the loss only.
            ignore_keys (`List[str]`, *optional*):
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions.

        Return:
            Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
            logits and labels (each being optional).
        r   Fc                 3   s    | ]
}  |d uV  qd S r   r^  r   r  r   r   r     r  z*Trainer.prediction_step.<locals>.<genexpr>return_lossNTr  Zkeys_to_ignore_at_inferenceZpast_key_valuesc                 3   r  r   r  )r   r  r  r   r   r     r  r   r  c                 3   &    | ]\}}| d g vr|V  qdS r  Nr   rJ  rG  r   r   r        $ c                 3        | ]\}}| vr|V  qd S r   r   rJ  rG  r   r   r         )r  c                 3   r  r  r   rJ  rG  r   r   r     r  c                 3   r  r   r   rJ  rG  r   r   r     r  r   )r!  rY  allr^  rt   r  r"  r   r  r  rI   r5  r   r  r   r   r   r3  rT  r  r  r   r   r  r  rP  r   r  r  )rf  r   r  r  rH  Z
has_labelsr  Zloss_without_labelsr  Zraw_outputsr  Z	logits_mbr  r  r  r   )rH  r  r   r    sr   *









*

zTrainer.prediction_stepc                 C   s   t | jdr| j|S dS )a  
        For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point
        operations for every backward + forward pass. If using another model, either implement such a method in the
        model or subclass and override this method.

        Args:
            inputs (`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

        Returns:
            `int`: The number of floating-point operations.
        r  r   )r"  r   r  r  r   r   r   r    s   zTrainer.floating_point_opstokenc                 C   sn   |   sdS | jjdu rt| jj j}n| jj}|dur |n| jj}t||| jj	dd}|j
| _d| _dS )zE
        Initializes a git repo in `self.args.hub_model_id`.
        NT)r  privater   )r  r   r@  r   r   absoluter  	hub_tokenr   Zhub_private_reporepo_idpush_in_progress)rf  r  	repo_namerepo_urlr   r   r   rB    s   
zTrainer.init_hf_repolanguagelicensetagsr  finetuned_fromtasksdataset_tagsdataset_argsc
                 C   s  |   sdS tj| jjd}
d}tj|
rIt|
j	
d}|dk}t|
j	j}|durI|durIt|tr;|g}|D ]}||vrH|| q=tj| |||||||||	d
}| }t|
d}|| W d   n1 sqw   Y  |r| j| j| jj dS dS )a  
        Creates a draft of a model card using the information available to the `Trainer`.

        Args:
            language (`str`, *optional*):
                The language of the model (if applicable)
            license (`str`, *optional*):
                The license of the model. Will default to the license of the pretrained model used, if the original
                model given to the `Trainer` comes from a repo on the Hub.
            tags (`str` or `List[str]`, *optional*):
                Some tags to be included in the metadata of the model card.
            model_name (`str`, *optional*):
                The name of the model.
            finetuned_from (`str`, *optional*):
                The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo
                of the original model given to the `Trainer` (if it comes from the Hub).
            tasks (`str` or `List[str]`, *optional*):
                One or several task identifiers, to be included in the metadata of the model card.
            dataset_tags (`str` or `List[str]`, *optional*):
                One or several dataset tags, to be included in the metadata of the model card.
            dataset (`str` or `List[str]`, *optional*):
                One or several dataset identifiers, to be included in the metadata of the model card.
            dataset_args (`str` or `List[str]`, *optional*):
               One or several dataset arguments, to be included in the metadata of the model card.
        Nz	README.mdFlibrary_namer   )	r  r  r  r  r  r	  r
  r  r  w)r  rD  rt  r  r   r   r@  r   r:  rJ  r^  r  r   r  r  r'   Zfrom_trainerZto_model_cardopenwriter4  r*   r   Zcreate_or_update_model_card)rf  r  r  r  r  r  r	  r
  r  r  Zmodel_card_filepathZis_peft_libraryr  Zexisting_tagsr-  Ztraining_summaryZ
model_cardr*  r   r   r   create_model_card(  sD   %

zTrainer.create_model_cardc              	   C   sB  |   r| jjtjkrd S | jjs| jd ur| j sd S | jj}t	t
tg}ttfD ]>}tj||}tj|rh|| t|}t| }W d    n1 sTw   Y  tt|d  }|| q*t rt|tttg |D ]}	tjtj||	rt tj||	tj||	 qv| j!d ur| j!"| t#$| jtj|t% | jj&t'j(krd| j)j* }
n	dt+| j)j, }
t-| j.||
| jj/ddt0 dgd}|g}| jjtj1tj2fv r| jjtj1krdnt3|j4}t-| j.|||
d	 | jj/dd
}|| | jd u s| j rt5|| _d S | jj6| d S )NZ
weight_mapzTraining in progress, step zTraining in progress, epoch Tr{  r  )r  folder_pathr  r  run_as_futureignore_patternszlast-checkpointz, checkpoint)r  r  path_in_repor  r  r  )7r  r   Zhub_strategyrT   ZENDZhub_always_pushr  is_doner   rl   rp   rn   ro   rm   rD  rt  r  r  r  r  jsonloadsreadr   r  r   extendr   ri   rk   rj   r  re  r   r  r   ry  r  r
  rW   rd  rQ  ru  r7  r  r   r@  r   rO   
CHECKPOINTZALL_CHECKPOINTSr   r  rr   jobs)rf  rn  r   Zmodeling_filesZ
index_fileZ
index_pathr*  r  Zshard_filesZmodeling_filer  Zmodel_push_jobZ	push_jobsr  Zcheckpoint_pushr   r   r   rl  r  sh   



 
	
zTrainer._push_from_checkpointc                 C   sB   t | dsd S | jd ur| j std | j  d S d S d S )Nr  z\Waiting for the current checkpoint push to be finished, this might take a couple of minutes.)r"  r  r  r  r  Zwait_until_donerm  r   r   r   r    s   

zTrainer._finish_current_pushEnd of trainingr  blockingrevisionc              	   K   s6  | dd}|du r%| jjr%| jjdu rt| jjj}n	| jjdd }|dur+|n| jj}| jdu r:| j	|d | j
dd |  sFdS t| jddduryd	|vrWg |d	< t|d	 tre|d	 g|d	< | jjD ]}||d	 vrx|d	 | qi| jdd|i| |   t| j| jj||| d
t dg|dS )u  
        Upload `self.model` and `self.processing_class` to the 🤗 model hub on the repo `self.args.hub_model_id`.

        Parameters:
            commit_message (`str`, *optional*, defaults to `"End of training"`):
                Message to commit while pushing.
            blocking (`bool`, *optional*, defaults to `True`):
                Whether the function should return only when the `git push` has finished.
            token (`str`, *optional*, defaults to `None`):
                Token with write permission to overwrite Trainer's original args.
            revision (`str`, *optional*):
                The git revision to commit from. Defaults to the head of the "main" branch.
            kwargs (`Dict[str, Any]`, *optional*):
                Additional keyword arguments passed along to [`~Trainer.create_model_card`].

        Returns:
            The URL of the repository where the model was pushed if `blocking=False`, or a `Future` object tracking the
            progress of the commit if `blocking=True`.
        r  N/r  )r  Trr  
model_tagsr  r{  r  )r  r  r  r  r  r  r  r   )r  r   rC  r@  r   r   r  r1  r   rB  rv  r  r  r   r   r  r   r  r  r  r   rO   )rf  r  r  r  r  r  r  Z	model_tagr   r   r   rA    s>   
zTrainer.push_to_hubc           &      C   s  | j }t|std|dur|n|j}| jr%| jdu r%t| ddd\}}| j| jd|d}t	| j
jdkrd|| ju rd| jsA| jrG| j
|n| j
j|dd}| jrU|| _|| jur]|| _| jrd| j| _| js|jrt|jtj|jd	}n|jr|jtj|jd	}t|d
dr|jn|j}	|	du rtd| |}
td| d td|
  td|	  d}d}d}d}d}i }td|j}t ||
|	d}|sd}t!|drt"|j#t$r|j#j}t ||
|d}t ||
|d}t ||
|d}|%  t!| j&dr
t'| j&j%r
| j&%  |j(dkrd| _)|| j*_+t,|D ]\}}| j-||||d\}}}t| jdd}d|j.v r@| /|| nd}|dur\|0|	}|du rS|ntj1||fdd}|duro|du rh|nt2||dd}|dur|du r{|nt2||dd}|dur|du r|nt2||dd}| j*3|| j4| j5| _5| j j6r| j7dur|dur|dur| j
j8j9}i } d|j.v r|nd| d< d|j.v r|nd| d< | j7t:d(||d| |d}| j j6s|j;dur.|d |j; dkr.|<| =|d |s|<| =|d  |<| =|d! |<| =|d" ~~~~tj>?  d#\}}}}q|j(r?t!| d$r?t@| d$ |<| =|d |sf|<| =|d  |<| =|d! |<| =|d" |A }!|sq|A nd}"|sz|A nd}#|s|A nd}$| j7dur|"dur|#dur| j j6sd|j.v r|!nd|d< d|j.v r|$nd|d< | 7t:d(|"|#d|}n|du ri }tB|}|!dur|!C D || d%< tE|F D ]}%|%G| d&s|H|%|| d&|% < qtI|"|#||
d'S ))r  z+dataloader must implement a working __len__Nr   Tr  Fr  r  r  Z_is_accelerate_preparedz\Batch size cannot be None. Ensure the dataloader has a valid batch_size or total_batch_size.r  r  r  r  r   )make_multiple_ofr  r  rG  r  rC  r  )r  r  r  r  r  r  r  Zeval_lossesZ
eval_predsZeval_label_idsZeval_inputs_ids)NNNNr  r  r  r  r   )Jr   r`   r	  r  r)  r  r#   r  r   r!  r4  r  r0  r  r  r3  r  r-  r|  r   r  r   r.  rS  r  r  r  rA  r  r  r  r  r;   r"  r   r  rA   r  r9  rF  r  r  r=  r  r  r  r  r  r  catrH   r  rQ  rV  r  r   r  r  rR   r  Z
add_arrays_gather_and_numpifyr\  r  r  finalizer\   rP  r  r   r  rM  r  rQ   )&rf  r  r  r  rH  r  r   r  r   r  rA  Zlosses_hostZ
preds_hostZlabels_hostZinputs_hostr_  r  r  Zeval_losses_gathererr!  Zpreds_gathererZlabels_gathererZinputs_gathererr  r  r  r  r  r  r  r  r  r  Z	eval_losspredsr~  Z
inputs_idsr;  r   r   r   r    s   





 




 

 





zTrainer.prediction_loopc                 C   sX   |du rdS t  rt||}t	|S t rt|}t	|S | jjtjkr(t|}t	|S )r  N)
r   rK   r   r   r   r*  rg   r+  rC   rJ   r  r   r   r   r#    s   
zTrainer._gather_and_numpifyc                 C   sB  |   sdS ddg}tjtj| jjdr7ttj| jjd}| }W d   n1 s1w   Y  nd}|}|D ]}||vrT|	drM||7 }q=|d| 7 }q=||krttj| jjdd}t
d|  || W d   n1 s}w   Y  | jd td	 | j s| jd
 | j  dS dS )z8Add SageMaker Checkpointing patterns to .gitignore file.Nz*.sagemaker-uploadingz*.sagemaker-uploadedz
.gitignorer  
r  z"Writing .gitignore file. Content: g      ?z'Add *.sagemaker patterns to .gitignore.)r  rD  rt  r@  r  repoZ	local_dirr  r  r  r  r  r  Zgit_addr  sleepZis_repo_cleanZ
git_commitZgit_push)rf  patternsr*  Zcurrent_contentcontentpatternr   r   r   _add_sm_patterns_to_gitignore  s6   




z%Trainer._add_sm_patterns_to_gitignorec           	   
      s  i }t dr| jjjd ur| jjj}d|v r&| jjdkr td|d | j_| jj  t drKg d}td#i  fdd|D }t drK| jj|_ 	d	}t d
s[|rZt
dn|rf| jjsftd ||_ 	d d| jji}t dr}||d< n|  t| jdr| jjd ur| jjdkrd| _tttdkrt| jjd|d< ntdtd#i || _| jj| _dt| jj  v rt!j"| j| jj#d| _t$| jj%dd d u| _&t$| jj%dd d u| _'t$| jj%dd d u| _| j'r| jj%j(}dD ]}t)||| jj*+|t$|| q|j,r| jj-rtd| j&r/t$| jdd d u r/| .  | jj/rP| j&s<| j'rP| jj0rP| j&rGdnd}t| d| j&rf| jj%jj1dkrf| jj2rftd | jj/r~| j'rd!t3| jj%j(j4v rtd"d S d S d S )$Nr   r  r   zThe `AcceleratorConfig`'s `num_steps` is set but `gradient_accumulation_steps` is greater than 1 in the passed `TrainingArguments`If using the passed `AcceleratorConfig` is desired, do not set the `TrainingArguments` `gradient_accumulation_steps`.)Zsplit_batchesZdispatch_batchesZeven_batchesZuse_seedable_samplerc                    s   i | ]}|  |qS r   )r  )r   ri  accelerator_configr   r   r    r   z>Trainer.create_accelerator_and_postprocess.<locals>.<dictcomp>z1.1.0non_blockingr  zp`non_blocking` is only supported in accelerate v0.30.0 and above. Please upgrade accelerate to use this feature.zx`non_blocking` is enabled but `dataloader_pin_memory` is not. For the best performance, it's recommended to enable both.gradient_accumulation_kwargsr\  dataloader_configtp_sizeTr   )r2  Ztorch_tp_pluginz4Requires accelerate>1.3.0 to use Tensor Parallelism.use_gather_object)r3  r  )Zlimit_all_gathersactivation_checkpointingzThe activation_checkpointing in FSDP config and the gradient_checkpointing in training arg can't be set to True simultaneously. Please use FSDP's activation_checkpointing logic when using FSDP.rZ  Z	DeepSpeedr  zJ can't be used with `save_only_model` along with `load_best_model_at_end`.   zo`auto_find_batch_size` isn't supported yet with DeepSpeed Zero-3. Please consider using Zero-2, Zero-1, or FSDPZSHARDED_STATE_DICTzWsave_only_model option is not compatible with FSDP state dict type 'SHARDED_STATE_DICT'r   )5rw   r   r.  r0  r  r	  to_dictr   Z	data_seedr  r$  r  r  r#  r/  r\  r  r"  r   r2  Zis_tp_enabledr   r   r  r   r   r4  r  r  r   r   r   r  r  r   Zeval_use_gather_objectr  rQ  r)  r0  r  rV  r'  r^  r4  r  r  rh  r  Z
zero_stager  r  r  )	rf  Zgrad_acc_kwargsr  r1  r/  r   r  ri  wrapperr   r-  r   r    s   





$


 z*Trainer.create_accelerator_and_postprocessc                 C   sB   ddl m} | jjj}||jj|_|jj|_|j| j	| dS )zO
        Sets values in the deepspeed plugin based on the Trainer args
        r   rM  N)
rY  rN  r4  rQ  r\  rO  r  Zdeepspeed_configr[  r   )rf  r  rN  Z	ds_pluginr   r   r   r  m  s
   

z#Trainer.propagate_args_to_deepspeedc                 C   s   | j rOt| jrQddlm} ddlm} t| jj|r$|| j| j	j
j_t| jdd tjkrS| jjjjjrUtttdkrW| j	j
jj| jjjjdd d S d S d S d S d S d S )Nr   )
PeftConfig)fsdp_auto_wrap_policyr   z0.27.0T)r  )r0  r   r   r   r8  Zpeft.utils.otherr9  r   Zactive_peft_configr4  rQ  r  r  r  r   r1  r   r&  Zbnb_4bit_quant_storager  r   r   r  Zset_mixed_precision)rf  r8  r9  r   r   r   r  y  s   

z"Trainer._fsdp_qlora_plugin_updatesc              	   C   s   g }d }t |D ]}z	|t| W q ty   Y  nw t|dko1d|d v o1| jp1| jd u}|rKztdd |D }W n tt	fyJ   Y nw |d urv| j
jr[| j| }t|rv||}| j
jdkrv| dkrv|d}||fS )Nr   r  c                 S   s   g | ]}|d   d qS )r  r  )ner  )r   rE  r   r   r   r     s    z-Trainer.get_batch_samples.<locals>.<listcomp>r   )rd  r  r  StopIterationr!  r6  r   r  r?  r>  r   r  r4  r  r   Z	is_tensorr|  r  r  Z	unsqueeze)rf  r  r  r   r  r  r  Zcount_num_items_in_batchr   r   r   r    s8   



zTrainer.get_batch_samplesr  c                 C   s  |j }|dk }t|rt|nd}|dur0t||j t||j dk d}|r0t|j| }|r\| 	|}|j dkrM|| t|| dk }	|| }
n,t|j}	| 	||j }
n|j dkrqt
j}	|}||j  }|j | }
ntd|j  |	|||
|||fS )a  
        Calculates and returns the following values:
        - `num_train_epochs`
        - `num_update_steps_per_epoch`
        - `num_examples`
        - `num_train_samples`
        - `epoch_based`
        - `len_dataloader`
        - `max_steps`
        r   Nr   zYargs.max_steps must be set to a positive value if dataloader does not have a length, was )rG  r`   r!  r  r  r7  r  r  rH  rA  sysmaxsizer	  )rf  r   r  r  rG  r  r  r  rA  rH  r  r   r   r   r    sN   





z#Trainer.set_initial_training_values)NNNNNNNNNNr   NN)rk  Nr   )NFN)F)TN)NNN)NNNNN)NNr  r  NNr0  )FN)NF)NNr  )Nr  )	NNNNNNNNN)r  TNN)r  
__module____qualname____doc__trainer_pt_utilsr   r   r   r   r   r   r
   r(   r   Modulerh   r	   r   r   r   r3  r  r2   r"   r   r/   r   rR   r   r8   r5  r   r  Z	Optimizerr:  ZLambdaLRr   r   r  rj  propertyr   setterrr  rt  r>  ry  rz  r2  r  r  r  rI  rJ  ZSamplerr  r7  r4  r   r  r  r  r  r  r  r  r  r  r  Z	parameterr   r  staticmethodr  r  rA  rG  r^  r6  rq  rm  r  r  r  r  r  r  rn  r  r
  r  r  r?  r  r  r  rQ  rR  rk  ri  r  rj  r  r  rS   rP   r  r	  r  r  r  r  r  r  r   r  rv  r  r  r  rO   r  rm  rL  rU   r  rQ   r  rO  r  r  rB  r  rl  r  rA  r  r#  r,  r  r  r  r  r  r   r   r   r   r   >  s   _

   !
$"
/"-'C$
   < .8
# 

r   h |)+&32As)	
&T&6
P43H+
%


i

D

 X

 i	

JB	
T

 ;'q-r   (  r@  r   re  r  r  importlib.metadatar   r   r  r  rD  r  r  r  r<  rk  r  r  collections.abcr   r   pathlibr   typingr   r   r   r	   r
   Zintegrationsr   Zhuggingface_hub.utilsrI  r  rY  r   r   Ztorch.distributeddistributedr  Zhuggingface_hubr   r   r   	packagingr   r   Ztorch.utils.datar   r   r   r   r   r  r   Zconfiguration_utilsr   Zdata.data_collatorr   r   r   Zdebug_utilsr   r   Z!feature_extraction_sequence_utilsr   Zfeature_extraction_utilsr   r  r    r!   Zimage_processing_utilsr"   Zintegrations.deepspeedr#   r$   r%   Zintegrations.tpur&   Z	modelcardr'   Zmodeling_utilsr(   r)   r*   Zmodels.auto.modeling_autor+   r,   optimizationr-   r.   Zprocessing_utilsr/   Zpytorch_utilsr0   r1   Ztokenization_utils_baser2   Ztrainer_callbackr3   r4   r5   r6   r7   r8   r9   r:   rA  r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   Ztrainer_utilsrO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   re   r  rf   rg   rh   ri   rj   rk   rl   rm   rn   ro   rp   rq   rr   rs   rt   ru   rv   rw   rx   ry   rz   r{   r|   r}   r~   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   Zutils.deprecationr   Zutils.import_utilsr   Zutils.quantization_configr   r<  r?  Zutils.notebookr   r   r   r  Ztorch_xla.core.xla_modelr   Z	xla_modelr  Ztorch_xla.debug.metricsr  r_  r  Ztorch_xla.runtimeZruntimera  Z	torch_xlaZXLA_VERSIONr   r`  Ztorch_xla.distributed.spmdZspmdrb  Z!smdistributed.modelparallel.torchZmodelparallelrP  Zsmdistributed.modelparallelZSMP_VERSIONrO  r   r   r   r   Zsafetensors.torchr=  r   r   r   r   r   r  Zaccelerate.stater   rX  r   r   r   r   r   r   r   ZDATA_SAMPLERSr   Zaccelerate.data_loaderr   r   r   r   r   r   rg  Z
get_loggerr  r  r  rx  r{  r  r|  r|  r'  r   r   r   r   r   <module>   s   (X
d+$




