Ë
    §Ùbi¬&  ã                   óP   — d dl mZmZ d dlmZmZ d dlmZ e G d„ de«      «       Zy)é    )Ú	dataclassÚfield)ÚAnyÚOptional)ÚTrainingArgumentsc                   ó  ‡ — e Zd ZU dZej
                  ddgz   Z edddi¬«      Zee	d<    ed	dd
i¬«      Z
ee   e	d<    edddi¬«      Zee   e	d<    edddi¬«      Zee   e	d<    ed	ddi¬«      Zee   e	d<    edddi¬«      Zee	d<    edddi¬«      Zee	d<    ed	ddi¬«      Zee   e	d<    edddi¬«      Zee	d<    eddd i¬«      Zee	d!<    ed"dd#i¬«      Zee	d$<    ed	dd%i¬«      Zee   e	d&<    ed"dd'i¬«      Zee	d(<    ed	dd)i¬«      Zeeeef      e	d<    ed	dd*i¬«      Zeeeef      e	d<    ed	dd+i¬«      Zee   e	d,<    eddd-i¬«      Zee	d.<    ed/dd0i¬«      Z ee	d1<    ed2dd3i¬«      Z!ee	d4<   ˆ fd5„Z"ˆ xZ#S )6Ú	BCOConfiguï  
    Configuration class for the [`BCOTrainer`].

    This class includes only the parameters that are specific to BCO training. For a full list of training arguments,
    please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this class may
    differ from those in [`~transformers.TrainingArguments`].

    Using [`~transformers.HfArgumentParser`] we can turn this class into
    [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
    command line.

    Parameters:
        max_length (`int` or `None`, *optional*, defaults to `1024`):
            Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want
            to use the default data collator.
        max_prompt_length (`int` or `None`, *optional*, defaults to `512`):
            Maximum length of the prompt. This argument is required if you want to use the default data collator.
        max_completion_length (`int` or `None`, *optional*, defaults to `None`):
            Maximum length of the completion. This argument is required if you want to use the default data collator
            and your model is an encoder-decoder.
        beta (`float`, *optional*, defaults to `0.1`):
            Parameter controlling the deviation from the reference model. Higher Î² means less deviation from the
            reference model.
        label_pad_token_id (`int`,  *optional*, defaults to `-100`):
            Label pad token id. This argument is required if you want to use the default data collator.
        padding_value (`int` or `None`, *optional*, defaults to `None`):
            Padding value to use. If `None`, the padding value of the tokenizer is used.
        truncation_mode (`str`, *optional*, defaults to `"keep_end"`):
            Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`.
            This argument is required if you want to use the default data collator.
        disable_dropout (`bool`, *optional*, defaults to `True`):
            Whether to disable dropout in the model and reference model.
        generate_during_eval (`bool`, *optional*, defaults to `False`):
            If `True`, generates and logs completions from both the model and the reference model to W&B or Comet
            during evaluation.
        is_encoder_decoder (`bool` or `None`, *optional*, defaults to `None`):
            When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument,
            you need to specify if the model returned by the callable is an encoder-decoder model.
        precompute_ref_log_probs (`bool`, *optional*, defaults to `False`):
            Whether to precompute reference model log probabilities for training and evaluation datasets. This is
            useful when training without the reference model to reduce the total GPU memory needed.
        model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
            Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a
            string.
        ref_model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
            Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the reference model
            from a string.
        dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
            Number of processes to use for processing the dataset.
        prompt_sample_size (`int`, *optional*, defaults to `1024`):
            Number of prompts that are fed to density ratio classifier.
        min_density_ratio (`float`, *optional*, defaults to `0.5`):
            Minimum value of the density ratio. The estimated density ratio is clamped to this value.
        max_density_ratio (`float`, *optional*, defaults to `10.0`):
            Maximum value of the density ratio. The estimated density ratio is clamped to this value.
    Úmodel_init_kwargsÚref_model_init_kwargsé
   Úhelpz•Log every X updates steps. Should be an integer or a float in range `[0,1)`. If smaller than 1, will be interpreted as ratio of total training steps.)ÚdefaultÚmetadataÚlogging_stepsNzÑWhether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA architecture or Intel XPU or using CPU (use_cpu) or Ascend NPU. If not set, it defaults to `True` if `fp16` is not set.Úbf16i   z‹Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want to use the default data collator.Ú
max_lengthi   zeMaximum length of the prompt. This argument is required if you want to use the default data collator.Úmax_prompt_lengthzŽMaximum length of the completion. This argument is required if you want to use the default data collator and your model is an encoder-decoder.Úmax_completion_lengthgš™™™™™¹?uv   Parameter controlling the deviation from the reference model. Higher Î² means less deviation from the reference model.Úbetaiœÿÿÿz[Label pad token id. This argument is required if you want to use the default data collator.Úlabel_pad_token_idzLPadding value to use. If `None`, the padding value of the tokenizer is used.Úpadding_valueÚkeep_endz«Truncation mode to use when the prompt is too long. Possible values are `keep_end` or `keep_start`. This argument is required if you want to use the default data collator.Útruncation_modeTz<Whether to disable dropout in the model and reference model.Údisable_dropoutFzoIf `True`, generates and logs completions from both the model and the reference model to W&B during evaluation.Úgenerate_during_evalzÀWhen using the `model_init` argument (callable) to instantiate the model instead of the `model` argument, you need to specify if the model returned by the callable is an encoder-decoder model.Úis_encoder_decoderz½Whether to precompute reference model log probabilities for training and evaluation datasets. This is useful when training without the reference model to reduce the total GPU memory needed.Úprecompute_ref_log_probszoKeyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a string.zyKeyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the reference model from a string.z6Number of processes to use for processing the dataset.Údataset_num_procz;Number of prompts that are fed to density ratio classifier.Úprompt_sample_sizeg      à?zYMinimum value of the density ratio. The estimated density ratio is clamped to this value.Úmin_density_ratiog      $@zYMaximum value of the density ratio. The estimated density ratio is clamped to this value.Úmax_density_ratioc                 óv   •— | j                   €| j                   n| j                   | _         t        ‰|   «        y )N)r   Úfp16ÚsuperÚ__post_init__)ÚselfÚ	__class__s    €úQ/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/trl/trainer/bco_config.pyr%   zBCOConfig.__post_init__Ë   s*   ø€ Ø'+§y¡yÐ'8˜Ÿ™‘O¸d¿i¹iˆŒ	ä‰ÑÕó    )$Ú__name__Ú
__module__Ú__qualname__Ú__doc__r   Ú_VALID_DICT_FIELDSr   r   ÚfloatÚ__annotations__r   r   Úboolr   Úintr   r   r   r   r   r   Ústrr   r   r   r   r
   Údictr   r   r   r   r    r!   r%   Ú__classcell__)r'   s   @r(   r	   r	      só  ø… ñ7ðr +×=Ñ=ÐATÐVmÐ@nÑnÐñ !Øàð Dð
ô€M5ó ñ !Øàð !ð
ô€Dˆ(4‰.ó ñ !&Øàð Vð
ô!€J˜‘ó ñ (-Øàð Vð
ô(Ðx ‘}ó ñ ,1Øàð Jð
ô,Ð˜8 C™=ó ñ Øàð Gð
ô€Dˆ%ó ñ $ØàÐqð
ôÐ˜ó ñ $)ØØÐhÐiô$€M8˜C‘=ó ñ !Øàð %ð
ô€OSó ñ "ØØÐXÐYô€OTó ñ "'Øàð (ð
ô"Ð˜$ó ñ */Øàð %ð
ô*Ð˜ ™ó ñ &+Øàð ð
ô&Ð˜dó ñ 38Øàð #ð
ô3Ðx  S¨# X¡Ñ/ó ñ 7<Øàð -ð
ô7Ð˜8 D¨¨c¨¡NÑ3ó ñ ',ØØÐRÐSô'Ðh˜s‘mó ñ $ØØÐWÐXôÐ˜ó ñ  %ØØÐuÐvô Ðuó ñ  %ØØÐuÐvô Ðuó ÷
 ð  r)   r	   N)	Údataclassesr   r   Útypingr   r   Útransformersr   r	   © r)   r(   ú<module>r:      s/   ð÷ )ß  å *ð ôx Ð!ó x ó ñx r)   