
    bi                     b    d dl Z d dlZddlmZmZ  G d dej
                  j                        Zy)    N   )PromptEncoderConfig#PromptEncoderReparameterizationTypec                   (     e Zd ZdZ fdZd Z xZS )PromptEncodera  
    The prompt encoder network that is used to generate the virtual token embeddings for p-tuning.

    Args:
        config ([`PromptEncoderConfig`]): The configuration of the prompt encoder.

    Example:

    ```py
    >>> from peft import PromptEncoder, PromptEncoderConfig

    >>> config = PromptEncoderConfig(
    ...     peft_type="P_TUNING",
    ...     task_type="SEQ_2_SEQ_LM",
    ...     num_virtual_tokens=20,
    ...     token_dim=768,
    ...     num_transformer_submodules=1,
    ...     num_attention_heads=12,
    ...     num_layers=12,
    ...     encoder_reparameterization_type="MLP",
    ...     encoder_hidden_size=768,
    ... )

    >>> prompt_encoder = PromptEncoder(config)
    ```

    **Attributes**:
        - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt encoder.
        - **mlp_head** (`torch.nn.Sequential`) -- The MLP head of the prompt encoder if `inference_mode=False`.
        - **lstm_head** (`torch.nn.LSTM`) -- The LSTM head of the prompt encoder if `inference_mode=False` and
        `encoder_reparameterization_type="LSTM"`.
        - **token_dim** (`int`) -- The hidden embedding dimension of the base transformer model.
        - **input_size** (`int`) -- The input size of the prompt encoder.
        - **output_size** (`int`) -- The output size of the prompt encoder.
        - **hidden_size** (`int`) -- The hidden size of the prompt encoder.
        - **total_virtual_tokens** (`int`): The total number of virtual tokens of the
        prompt encoder.
        - **encoder_type** (Union[[`PromptEncoderReparameterizationType`], `str`]): The encoder type of the prompt
          encoder.


    Input shape: (`batch_size`, `total_virtual_tokens`)

    Output shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)
    c                    t         |           |j                  | _        | j                  | _        | j                  | _        |j
                  | _        |j                  |j                  z  | _	        |j                  | _        t        j                  j                  | j                  | j                        | _        |j                   s| j                  t"        j$                  k(  r|j&                  }|j(                  }t        j                  j%                  | j                  | j                  ||dd      | _        t        j                  j-                  t        j                  j/                  | j                  dz  | j                  dz        t        j                  j1                         t        j                  j/                  | j                  dz  | j                              | _        y | j                  t"        j4                  k(  rFt6        j(                  }|j(                  |k7  r0t9        j:                  d| j                  j<                   d| d       t        j                  j/                  | j                  | j                        t        j                  j1                         t        j                  j/                  | j                  | j                        t        j                  j1                         t        j                  j/                  | j                  | j                        g}t        j                  j,                  | | _        y t?        d      y )NT)
input_sizehidden_size
num_layersdropoutbidirectionalbatch_first   zfor z8, the argument `encoder_num_layers` is ignored. Exactly z MLP layers are used.PPrompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.) super__init__	token_dimr	   output_sizeencoder_hidden_sizer
   num_virtual_tokensnum_transformer_submodulestotal_virtual_tokensencoder_reparameterization_typeencoder_typetorchnn	Embedding	embeddinginference_moder   LSTMencoder_dropoutencoder_num_layers	lstm_head
SequentialLinearReLUmlp_headMLPr   warningswarnvalue
ValueError)selfconfiglstm_dropoutr   encoder_num_layers_defaultlayers	__class__s         U/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/peft/tuners/p_tuning/model.pyr   zPromptEncoder.__init__G   sq   ))..>>!55$*$=$=@a@a$a!"BB ++D,E,Et~~V$$  $G$L$LL%55#66
!&# $ 0 0)("& $ "/ " !& 3 3HHOOD$4$4q$8$:J:JQ:NOHHMMOHHOOD$4$4q$8$:J:JK! ""&I&M&MM-@-S-S*,,0JJMMt00667 8##=">>SU
 HHOODOOT5E5EFHHMMOHHOOD$4$4d6F6FGHHMMOHHOOD$4$4d6F6FG !& 3 3V < !!sttK %    c                    | j                  |      }| j                  t        j                  k(  r%| j	                  | j                  |      d         }|S | j                  t        j                  k(  r| j	                  |      }|S t        d      )Nr   r   )r   r   r   r    r'   r#   r(   r,   )r-   indicesinput_embedsoutput_embedss       r3   forwardzPromptEncoder.forwardy   s    ~~g. C H HH MM$..*Fq*IJM  "E"I"II MM,7M  oppr4   )__name__
__module____qualname____doc__r   r9   __classcell__)r2   s   @r3   r   r      s    ,\0ud	r4   r   )r)   r   r.   r   r   r   Moduler    r4   r3   <module>rA      s&   "   LjEHHOO jr4   