
    bi                     P    d dl mZmZ d dlmZmZ d dlmZ e G d de             Zy)    )	dataclassfield)AnyOptional)TrainingArgumentsc                   ,    e Zd ZU dZej
                  dgz   Z edddi      Zee	d<    eddd	i      Z
ee   e	d
<    edddi      Zeeeef      e	d<    edddi      Zee   e	d<    edddi      Zee	d<    edddi      Zee	d<    fdZ xZS )IterativeSFTConfiga  
    Configuration class for the [`IterativeSFTTrainer`].

    This class includes only the parameters that are specific to Iterative SFT training. For a full list of training
    arguments, please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this
    class may differ from those in [`~transformers.TrainingArguments`].

    Using [`~transformers.HfArgumentParser`] we can turn this class into
    [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
    command line.

    Parameters:
        > Parameters that control the model

        model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
            Keyword arguments for [`~transformers.AutoModelForCausalLM.from_pretrained`], used when the `model`
            argument of the [`IterativeSFTTrainer`] is provided as a string.

        > Parameters that control the data preprocessing

        max_length (`int` or `None`, *optional*, defaults to `None`):
            Maximum length of the tokenized sequence. Sequences longer than `max_length` are truncated.
        truncation_mode (`str`, *optional*, defaults to `"keep_end"`):
            The truncation mode to use, either `"keep_end"` or `"keep_start"`.
        optimize_device_cache (`bool`, *optional*, defaults to `False`):
            Whether to optimize accelerator cache for slightly more memory-efficient training.
    model_init_kwargs
   helpzLog every X updates steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps.)defaultmetadatalogging_stepsNzWhether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. If not set, it defaults to `True` if `fp16` is not set.bf16zKeyword arguments for `AutoModelForCausalLM.from_pretrained`, used when the `model` argument of the `IterativeSFTTrainer` is provided as a string.z[Maximum length of the tokenized sequence. Sequences longer than `max_length` are truncated.
max_lengthkeep_endz>The truncation mode to use, either 'keep_end' or 'keep_start'.truncation_modeFzRWhether to optimize accelerator cache for slightly more memory-efficient training.optimize_device_cachec                     | j                   | j                   n| j                   | _         t        |           | j                  dvrt        d| j                         y )N)r   
keep_startz?truncation_mode must be either 'keep_end' or 'keep_start', got )r   fp16super__post_init__r   
ValueError)self	__class__s    [/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/trl/trainer/iterative_sft_config.pyr   z IterativeSFTConfig.__post_init___   sZ    '+yy'8Odii	'AA^_c_s_s^tuvv B    )__name__
__module____qualname____doc__r   _VALID_DICT_FIELDSr   r   float__annotations__r   r   boolr
   dictstrr   r   intr   r   r   __classcell__)r   s   @r   r	   r	      s   8 +==AT@UU ! D
M5  ! !
D(4.  38 A
3xS#X/  !&q
!J  !Z[OS  #(no#4 
w wr   r	   N)	dataclassesr   r   typingr   r   transformersr   r	    r   r   <module>r/      s2    )   * Ow* Ow Owr   