
    bi                     h    d dl mZ d dlmZmZ d dlmZmZ d dlm	Z	 d dl
mZ e G d de	             Zy)	    )annotations)	dataclassfield)OptionalUnion)
PeftConfig)PeftTypec                      e Zd ZU dZ edddd      Zded<    ed	d
dd      Zded<    edddd      Zded<    edddd      Z	ded<    edddi      Z
ded<    edddi      Zded<    edddi      Zded <    ed!dd"i      Zd#ed$<    edd%d&d      Zd'ed(<    ed)dd*i      Zded+<    eddd,i      Zd-ed.<    eddd/i      Zded0<    fd1Z xZS )2
BOFTConfigaf	  
    This is the configuration class to store the configuration of a [`BOFTModel`].

    Args:
        boft_block_size (`int`): BOFT block size across different layers.
        boft_block_num (`int`): Number of BOFT blocks per injected layer.
        boft_n_butterfly_factor (`int`): Number of butterfly factors across different layers.
        target_modules (`Union[List[str],str]`): The names of the modules to apply the adapter to.
        exclude_modules (`Optional[Union[List[str], str]]`):
            The names of the modules to not apply the adapter. When passing a string, a regex match will be performed.
            When passing a list of strings, either an exact match will be performed or it is checked if the name of the
            module ends with any of the passed strings.
        boft_dropout (`float`):
            The multiplicative dropout probability, by setting OFT blocks to identity during training, similar to the
            dropout layer in LoRA.
        fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out).
            For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set
            to `True`.
        bias (`str`): Bias type for BOFT. Can be 'none', 'all' or 'boft_only'. If 'all' or 'boft_only', the
            corresponding biases will be updated during training. Be aware that this means that, even when disabling
            the adapters, the model will not produce the same output as the base model would have without adaptation.
        modules_to_save (`List[str]`):List of modules apart from BOFT layers to be set as trainable
            and saved in the final checkpoint.
        layers_to_transform (`Union[List[int],int]`):
            The layer indexes to transform, if this argument is specified, it will apply the BOFT transformations on
            the layer indexes that are specified in this list. If a single integer is passed, it will apply the BOFT
            transformations on the layer at this index.
        layers_pattern (`Optional[Union[List[str], str]]`):
            The layer pattern name, used only if `layers_to_transform` is different from `None` and if the layer
            pattern is not in the common layers pattern. This should target the `nn.ModuleList` of the model, which is
            often called `'layers'` or `'h'`.
       z(BOFT block size across different layers.zYou can only specify either boft_block_size or boft_block_num, but not both simultaneously, because boft_block_size x boft_block_num = layer dimension.)helpnote)defaultmetadataintboft_block_sizer   z)Number of BOFT blocks per injected layer.boft_block_num   zNumber of butterfly factors.)zfor example, boft_n_butterfly_factor=2, the effective block size of OFT becomes twice as big and the number of blocks become half.zEnote: for boft_n_butterfly_factor=1, BOFT is the same as vanilla OFT.boft_n_butterfly_factorNzRList of module names or regex expression of the module names to replace with BOFT.zPFor example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' )r   examplezOptional[Union[list[str], str]]target_modulesr   zRList of module names or regex expression of the module names to exclude from BOFT.exclude_modulesg        zxBOFT multiplicative dropout, randomly setting blocks of OFT to be identity matrix, similar to the dropout layer in LoRA.floatboft_dropoutFzMSet this to True if the layer to replace stores weight like (fan_in, fan_out)boolfan_in_fan_outnonez7Bias type for BOFT. Can be 'none', 'all' or 'boft_only'strbiaszaList of modules apart from BOFT layers to be set as trainable and saved in the final checkpoint. )zGFor example, in Sequence Classification or Token Classification tasks, zgthe final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.zOptional[list[str]]modules_to_saveT)zeWhether to initialize the weights of the BOFT layers with their default initialization. Don't change z;this setting, except if you know exactly what you're doing.init_weightszThe layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index.zOptional[Union[list[int], int]]layers_to_transformzThe layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern. This should target the `nn.ModuleList` of the model, which is often called `'layers'` or `'h'`.layers_patternc                   t         |           t        j                  | _        t        | j                  t              rt        | j                        n| j                  | _        t        | j                  t              rt        | j                        n| j                  | _	        | j                  r| j                  st        d      | j                  dk(  r5| j                  dk(  r&t        d| j                   d| j                   d      | j                  dk7  | j                  dk7  z  s&t        d| j                   d| j                   d      y )	NzRWhen `layers_pattern` is specified, `layers_to_transform` must also be specified. r   z\Either `boft_block_size` or `boft_block_num` must be non-zero. Currently, boft_block_size = z and boft_block_num = .z-You can only specify either boft_block_size (z) or boft_block_num (zX), but not both simultaneously, because boft_block_size x boft_block_num == in_features.)super__post_init__r	   BOFT	peft_type
isinstancer   listsetr   r#   r"   
ValueErrorr   r   )self	__class__s    R/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/peft/tuners/boft/config.pyr'   zBOFTConfig.__post_init__   sf   !(243F3F(MC##$SWSfSf 	 *4D4H4H$)OC$$%UYUiUi 	 t'?'?qrr1$)<)<)Anos  pD  pD  oE  E[  \`  \o  \o  [p  pq  r  $$)d.A.AQ.FG?@T@T?UUjkok~k~j  @X  Y  H    )__name__
__module____qualname____doc__r   r   __annotations__r   r   r   r   r   r   r   r    r!   r"   r#   r'   __classcell__)r/   s   @r0   r   r      s   B !> n
OS   ? n
NC  $)2
	$S 	 7<hi
7N3  8=no8O4     O
L%  !ijND  f8q/rsD#s+0w
	,O( 	  
L$  <A  w
<8  7< n
7N3  r1   r   N)
__future__r   dataclassesr   r   typingr   r   peft.configr   
peft.utilsr	   r    r1   r0   <module>r>      s4   $ # ( " "  D D Dr1   