
    bi                    l    d dl mZ d dlmZmZ d dlmZmZmZ d dl	m
Z
 d dlmZ e G d de
             Zy)	    )annotations)	dataclassfield)LiteralOptionalUnion)
PeftConfig)PeftTypec                  (   e Zd ZU dZ edddi      Zded<    eddd	i      Zd
ed<    edddi      Zded<    edddi      Z	ded<    edddi      Z
ded<    edddi      Zd
ed<    eeddi      Zded<    edddi      Zded<   d  Zy)!	C3AConfiga  This is the configuration class to store the configuration of a [`C3AModel`].

    Args:
        block_size (`int`):
            block size for C3A, must be divisible by both the input size and the output size of the target layer. If
            you have no idea what block_size you should use, set it to the greatest common divisor of all input &
            output sizes of your target layers. Increasing this would result in less parameters.
        target_modules (`Union[list[str],str]`): The names of the modules to apply C3A to.
        bias (`str`): Bias type for C3A. Can be 'none', 'all' or 'c3a_only'. If 'all' or 'c3a_only', the
            corresponding biases will be updated during training. Be aware that this means that, even when disabling
            the adapters, the model will not produce the same output as the base model would have without adaptation.
        modules_to_save (`list[str]`):list of modules apart from C3A layers to be set as trainable
            and saved in the final checkpoint.
        layers_to_transform (`Union[list[int],int]`):
            The layer indexes to transform, if this argument is specified, it will apply C3A on the layer indexes that
            are specified in this list. If a single integer is passed, it will apply C3A on the layer at this index.
        layers_pattern (`str`):
            The layer pattern name, used only if `layers_to_transform` is different from `None` and if the layer
            pattern is not in the common layers pattern.
        block_size_pattern (`dict`):
            The mapping from layer names or regexp expression to block_size which are different from the default
            specified. For example, `{"model.decoder.layers.0.encoder_attn.k_proj": 1280`}
        init_weights (`Union[bool, Literal["gaussian", "kaiming_uniform", "xavier_uniform"]]`):
            The initialization of the C3A weights. Set this to False if the weights should be initialized to a commonly
            used distribution. Set this to True if the weights should be initialized to zeros.
       helpa#  block size for C3A, must be divisible by both the input size and the output size of the target layer. If you have no idea what block_size you should use, set it to the greatest common divisor of all input & output sizes of your target layers. Increasing this would result in less parameters.)defaultmetadataint
block_sizeNzlist of module names or regex expression of the module names to replace with C3A. For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' zOptional[Union[list[str], str]]target_modulesnonez5Bias type for C3A. Can be 'none', 'all' or 'c3a_only'strbiasa  list of modules apart from C3A layers to be set as trainable and saved in the final checkpoint. For example, in Sequence Classification or Token Classification tasks, the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.zOptional[list[str]]modules_to_savea  The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index. This only works when target_modules is a list of str.zOptional[Union[list[int], int]]layers_to_transformzThe layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern. This only works when target_modules is a list of str.layers_patternzThe mapping from layer names or regexp expression to block_size which are different from the default specified. For example, `{model.decoder.layers.0.encoder_attn.k_proj: 1280`})default_factoryr   zOptional[dict]block_size_patternxavier_uniformzThe initialization of the C3A weights. Leave it as default or set it to False if the weights should be initialized with Xavier uniform, which is experimentally suitable for C3A. Set this to True if the weights should be initialized to zeros.zOOptional[Union[bool, Literal['gaussian', 'kaiming_uniform', 'xavier_uniform']]]init_weightsc                t   t         j                  | _        t        | j                  t
              rt        | j                        n| j                  | _        t        | j                  t              r| j                  t        d      t        | j                  t              r| j                  t        d      y y )NzD`layers_to_transform` cannot be used when `target_modules` is a str.z?`layers_pattern` cannot be used when `target_modules` is a str.)r
   C3A	peft_type
isinstancer   listsetr   r   
ValueErrorr   )selfs    Q/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/peft/tuners/c3a/config.py__post_init__zC3AConfig.__post_init__   s    !(243F3F(MC##$SWSfSf 	 d))3/D4L4L4Xcdd d))3/D4G4G4S^__ 5T/    )__name__
__module____qualname____doc__r   r   __annotations__r   r   r   r   r   dictr   r   r'    r(   r&   r   r      s*   6 p
	J 	 7<d
7N3  f8o/pqD#q+0?

,O( 
 <AI

<8 
 7<I
	7N3 	 */U
	* 	 ej S

eLa 
`r(   r   N)
__future__r   dataclassesr   r   typingr   r   r   peft.configr	   
peft.utilsr
   r   r/   r(   r&   <module>r5      s:    # ( + + "  q`
 q` q`r(   