
    bi                     j    d dl Z d dlZd dlmZ ddlmZ  G d dej                  j                        Zy)    N)gather_params_ctx   )PromptTuningInitc                   (     e Zd ZdZ fdZd Z xZS )PromptEmbeddinga  
    The model to encode virtual tokens into prompt embeddings.

    Args:
        config ([`PromptTuningConfig`]): The configuration of the prompt embedding.
        word_embeddings (`torch.nn.Module`): The word embeddings of the base transformer model.

    **Attributes**:
        - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt embedding.

    Example:

    ```py
    >>> from peft import PromptEmbedding, PromptTuningConfig

    >>> config = PromptTuningConfig(
    ...     peft_type="PROMPT_TUNING",
    ...     task_type="SEQ_2_SEQ_LM",
    ...     num_virtual_tokens=20,
    ...     token_dim=768,
    ...     num_transformer_submodules=1,
    ...     num_attention_heads=12,
    ...     num_layers=12,
    ...     prompt_tuning_init="TEXT",
    ...     prompt_tuning_init_text="Predict if sentiment of this review is positive, negative or neutral",
    ...     tokenizer_name_or_path="t5-base",
    ... )

    >>> # t5_model.shared is the word embeddings of the base model
    >>> prompt_embedding = PromptEmbedding(config, t5_model.shared)
    ```

    Input Shape: (`batch_size`, `total_virtual_tokens`)

    Output Shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)
    c                    t         |           |j                  |j                  z  }t        j
                  j                  ||j                        | _        |j                  t        j                  k(  ra|j                  sSddlm} |j                  xs i } |j                   |j"                  fi |}|j$                  } ||      d   }t'        |      }	|	|kD  r|d | }n"|	|k  rt)        j*                  ||	z        }
||
z  }|d | }t	        j,                  |      j/                  |j0                  j2                        }t5        |j7                               5   ||      j9                         j;                         }d d d        j/                  t        j<                        }t        j
                  j?                  |      | j                  _        y y y # 1 sw Y   YxY w)Nr   )AutoTokenizer	input_ids) super__init__num_virtual_tokensnum_transformer_submodulestorchnn	Embedding	token_dim	embeddingprompt_tuning_initr   TEXTinference_modetransformersr	   tokenizer_kwargsfrom_pretrainedtokenizer_name_or_pathprompt_tuning_init_textlenmathceil
LongTensortoweightdevicer   
parametersdetachclonefloat32	Parameter)selfconfigword_embeddingstotal_virtual_tokensr	   r   	tokenizer	init_textinit_token_idsnum_text_tokensnum_repsword_embedding_weights	__class__s               Z/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/peft/tuners/prompt_tuning/model.pyr   zPromptEmbedding.__init__>   s   %886;\;\\++,@&BRBRS$$(8(=(==fF[F[2%66<"555f6S6ShWghI66I&y1+>N!.1O!55!/0E1E!F #7799%9O%KL!/(!:+,A-ABN"--n=@@AWAWA^A^_N"?#=#=#?@ Z)8)H)O)O)Q)W)W)Y&Z%;%>%>u}}%M"$)HH$6$67M$NDNN!' G\= Z Zs   %GG!c                 (    | j                  |      }|S )N)r   )r(   indicesprompt_embeddingss      r3   forwardzPromptEmbedding.forwardX   s     NN73      )__name__
__module____qualname____doc__r   r7   __classcell__)r2   s   @r3   r   r      s    #JO4!r8   r   )	r   r   peft.utils.integrationsr   r)   r   r   Moduler    r8   r3   <module>rA      s)      5 $C!ehhoo C!r8   