
    bi                     J    d dl Z  G d de j                  j                        Zy)    Nc                   B     e Zd ZdZ fdZdej                  fdZ xZS )PrefixEncoderaC  
    The `torch.nn` model to encode the prefix.

    Args:
        config ([`PrefixTuningConfig`]): The configuration of the prefix encoder.

    Example:

    ```py
    >>> from peft import PrefixEncoder, PrefixTuningConfig

    >>> config = PrefixTuningConfig(
    ...     peft_type="PREFIX_TUNING",
    ...     task_type="SEQ_2_SEQ_LM",
    ...     num_virtual_tokens=20,
    ...     token_dim=768,
    ...     num_transformer_submodules=1,
    ...     num_attention_heads=12,
    ...     num_layers=12,
    ...     encoder_hidden_size=768,
    ... )
    >>> prefix_encoder = PrefixEncoder(config)
    ```

    **Attributes**:
        - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder.
        - **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if
          `prefix_projection` is `True`.
        - **prefix_projection** (`bool`) -- Whether to project the prefix embeddings.

    Input shape: (`batch_size`, `num_virtual_tokens`)

    Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`)
    c           	      ~   t         |           |j                  | _        |j                  }|j                  }|j
                  }|j                  }| j                  r|j                  st        j                  j                  ||      | _        t        j                  j                  t        j                  j                  ||      t        j                  j                         t        j                  j                  ||dz  |z              | _        y t        j                  j                  ||dz  |z        | _        y )N   )super__init__prefix_projection	token_dim
num_layersencoder_hidden_sizenum_virtual_tokensinference_modetorchnn	Embedding	embedding
SequentialLinearTanh	transform)selfconfigr
   r   r   r   	__class__s         Z/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/peft/tuners/prefix_tuning/model.pyr   zPrefixEncoder.__init__8   s    !'!9!9$$	&&
$88#66!!&*?*?"XX//0BINDN"XX00	+>? 3Z!^i5OPDN #XX//0BJQRNU^D^_DN    prefixc                     | j                   r$| j                  |      }| j                  |      }|S | j                  |      }|S )N)r	   r   r   )r   r   prefix_tokenspast_key_valuess       r   forwardzPrefixEncoder.forwardJ   sE    !! NN62M"nn];O  #nnV4Or   )	__name__
__module____qualname____doc__r   r   Tensorr    __classcell__)r   s   @r   r   r      s    !F`$ell r   r   )r   r   Moduler    r   r   <module>r)      s   " <EHHOO <r   