
    bi3                     X    d dl Z d dlmZmZ d dlmZmZ d dlmZ e G d de             Z	y)    N)	dataclassfield)AnyOptional)TrainingArgumentsc                   j    e Zd ZU dZej
                  dgz   Z edddi      Zee	d<    eddd	i      Z
ee	d
<    edddi      Zee   e	d<    edddi      Zee	d<    edddi      Zeeeef      e	d<    edddi      Zee   e	d<    edddi      Zee	d<    edddi      Zeeeef      e	d<    edddi      Zee   e	d<    edddi      Zee   e	d<    edddi      Zee   e	d<    eddd i      Zee   e	d!<    ed"dd#i      Zee	d$<    ed%dd&i      Zee	d'<    ed"dd(i      Zee	d)<    eddd*i      Zee   e	d+<    eddd,i      Zee   e	d-<    eddd.i      Z ee   e	d/<    ed"dd0i      Z!ee	d1<    ed"dd2i      Z"ee	d3<    eddd4i      Z#ee   e	d5<    fd6Z$ xZ%S )7	SFTConfigaC  
    Configuration class for the [`SFTTrainer`].

    This class includes only the parameters that are specific to SFT training. For a full list of training arguments,
    please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this class may
    differ from those in [`~transformers.TrainingArguments`].

    Using [`~transformers.HfArgumentParser`] we can turn this class into
    [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
    command line.

    Parameters:
        > Parameters that control the model

        model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
            Keyword arguments for [`~transformers.AutoModelForCausalLM.from_pretrained`], used when the `model`
            argument of the [`SFTTrainer`] is provided as a string.
        chat_template_path (`str` or `None`, *optional*, defaults to `None`):
            If specified, sets the model's chat template. This can either be the path to a tokenizer (local directory
            or Hugging Face Hub model) or a direct path to a Jinja template file. When using a Jinja file, you must
            ensure that any special tokens referenced in the template are added to the tokenizer and that the model's
            embedding layer is resized accordingly.

        > Parameters that control the data preprocessing

        dataset_text_field (`str`, *optional*, defaults to `"text"`):
            Name of the column that contains text data in the dataset.
        dataset_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
            Dictionary of optional keyword arguments for the dataset preparation. The only supported key is
            `skip_prepare_dataset`.
        dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
            Number of processes to use for processing the dataset.
        eos_token (`str` or `None`, *optional*, defaults to `None`):
            Token used to indicate the end of a turn or sequence. If `None`, it defaults to
            `processing_class.eos_token`.
        pad_token (`int` or `None`, *optional*, defaults to `None`):
            Token used for padding. If `None`, it defaults to `processing_class.pad_token`, or if that is also `None`,
            it falls back to `processing_class.eos_token`.
        max_length (`int` or `None`, *optional*, defaults to `1024`):
            Maximum length of the tokenized sequence. Sequences longer than `max_length` are truncated from the right.
            If `None`, no truncation is applied. When packing is enabled, this value sets the sequence length.
        packing (`bool`, *optional*, defaults to `False`):
            Whether to group multiple sequences into fixed-length blocks to improve computational efficiency and reduce
            padding. Uses `max_length` to define sequence length.
        packing_strategy (`str`, *optional*, defaults to `"ffd"`):
            Strategy for packing sequences. Can be either `"ffd"` (first-fit decreasing, default), or `"wrapped"`.
        padding_free (`bool`, *optional*, defaults to `False`):
            Whether to perform forward passes without padding by flattening all sequences in the batch into a single
            continuous sequence. This reduces memory usage by eliminating padding overhead. Currently, this is only
            supported with the `flash_attention_2` attention implementation, which can efficiently handle the flattened
            batch structure. When packing is enabled with strategy `"ffd"`, padding-free is enabled, regardless of the
            value of this parameter.
        pad_to_multiple_of (`int` or `None`, *optional*, defaults to `None`):
            If set, the sequences will be padded to a multiple of this value.
        eval_packing (`bool` or `None`, *optional*, defaults to `None`):
            Whether to pack the eval dataset. If `None`, uses the same value as `packing`.

        > Parameters that control the training

        completion_only_loss (`bool` or `None`, *optional*, defaults to `None`):
            Whether to compute loss only on the completion part of the sequence. If set to `True`, loss is computed
            only on the completion, which is supported only for [prompt-completion](#prompt-completion) datasets. If
            `False`, loss is computed on the entire sequence. If `None` (default), the behavior depends on the dataset:
            loss is computed on the completion for [prompt-completion](#prompt-completion) datasets, and on the full
            sequence for [language modeling](#language-modeling) datasets.
        assistant_only_loss (`bool`, *optional*, defaults to `False`):
            Whether to compute loss only on the assistant part of the sequence. If set to `True`, loss is computed
            only on the assistant responses, which is supported only for [conversational](#conversational) datasets. If `False`,
            loss is computed on the entire sequence.
        activation_offloading (`bool`, *optional*, defaults to `False`):
            Whether to offload the activations to the CPU.
    model_init_kwargsgh㈵>helpz$The initial learning rate for AdamW.)defaultmetadatalearning_rate
   zLog every X updates steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps.logging_stepsNzWhether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. If not set, it defaults to `True` if `fp16` is not set.bf16TzWhether or not to average tokens across devices. If enabled, will use all_reduce to synchronize num_tokens_in_batch for precise loss calculation. Reference: https://github.com/huggingface/transformers/issues/34242 average_tokens_across_deviceszKeyword arguments for `AutoModelForCausalLM.from_pretrained`, used when the `model` argument of the `SFTTrainer` is provided as a string.ac  If specified, sets the model's chat template. This can either be the path to a tokenizer (local directory or Hugging Face Hub model) or a direct path to a Jinja template file. When using a Jinja file, you must ensure that any special tokens referenced in the template are added to the tokenizer and that the model's embedding layer is resized accordingly.chat_template_pathtextz:Name of the column that contains text data in the dataset.dataset_text_fieldzwDictionary of optional keyword arguments for the dataset preparation. The only supported key is `skip_prepare_dataset`.dataset_kwargsz6Number of processes to use for processing the dataset.dataset_num_proczmToken used to indicate the end of a turn or sequence. If `None`, it defaults to `processing_class.eos_token`.	eos_tokenzToken used for padding. If `None`, it defaults to `processing_class.pad_token`, or if that is also `None`, it falls back to `processing_class.eos_token`.	pad_tokeni   zMaximum length of the tokenized sequence. Sequences longer than `max_length` are truncated fromthe right. If `None`, no truncation is applied. When packing is enabled, this value sets the sequence length.
max_lengthFzWhether to group multiple sequences into fixed-length blocks to improve computational efficiency and reduce padding. Uses `max_length` to define sequence length.packingffdzfStrategy for packing sequences. Can be either `'ffd'` (first-fit decreasing, default), or `'wrapped'`.packing_strategya  Whether to perform forward passes without padding by flattening all sequences in the batch into a single continuous sequence. This reduces memory usage by eliminating padding overhead. Currently, this is only supported with the `flash_attention_2` attention implementation, which can efficiently handle the flattened batch structure. When packing is enabled with strategy `'ffd'`, padding-free is enabled, regardless of the value of this parameter.padding_freezAIf set, the sequences will be padded to a multiple of this value.pad_to_multiple_ofzNWhether to pack the eval dataset. If `None`, uses the same value as `packing`.eval_packinga  Whether to compute loss only on the completion part of the sequence. If set to `True`, loss is computed only on the completion, which is supported only for prompt-completion datasets. If `False`, loss is computed on the entire sequence. If `None` (default), the behavior depends on the dataset: loss is computed on the completion for prompt-completion datasets, and on the full sequence for language modeling datasets.completion_only_losszWhether to compute loss only on the assistant part of the sequence. If set to `True`, loss is computed only on the assistant responses, which is supported only for conversational datasets. If `False`, loss is computed on the entire sequence.assistant_only_lossz.Whether to offload the activations to the CPU.activation_offloadingz]This parameter is deprecated and will be removed in version 0.20.0. Use `max_length` instead.max_seq_lengthc                     | j                   | j                   n| j                   | _         t        |           | j                  ,t        j                  dt               | j                  | _        y y )Nz_`max_seq_length` is deprecated and will be removed in version 0.20.0. Use `max_length` instead.)	r   fp16super__post_init__r$   warningswarnDeprecationWarningr   )self	__class__s    Q/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/trl/trainer/sft_config.pyr(   zSFTConfig.__post_init__   s\    '+yy'8Odii	*MMq" #11DO +    )&__name__
__module____qualname____doc__r   _VALID_DICT_FIELDSr   r   float__annotations__r   r   r   boolr   r
   dictstrr   r   r   r   r   intr   r   r   r   r   r   r   r    r!   r"   r#   r$   r(   __classcell__)r-   s   @r.   r	   r	      sC   GR +==AT@UU !@AM5  ! D
M5  ! !
D(4.  +0 E
+!4  38 8
3xS#X/  ). G
)  $VW  05 &
0NHT#s(^,  ',RS'hsm   %  D
 Ix}   % M
 Ix}  !& 
!J   O
GT  " 
c   B
	L$ 	 ).]^)  $)jk$L(4.  ,1.
,(4.  !&;
	! 	 #(JK#4  %*s
%NHSM 
2 
2r/   r	   )
r)   dataclassesr   r   typingr   r   transformersr   r	    r/   r.   <module>r@      s2     (   * n2! n2 n2r/   