
    bi                        d dl Z d dlZd dlZd dlmZ d dlmZ d dlmZm	Z	m
Z
mZmZ d dlZd dlZddlmZmZmZmZmZmZmZmZmZmZmZ ddlmZmZ dd	lmZm Z  dd
l!m"Z"  ejF                  e$      Z%i de"de"dd dd dd dd dd dd dd dd dd dd  d!d" d#d$ d%d& d'd( d)d* d+ d, d-Z& G d. d/      Z'y)0    N)partial)Path)DictListLiteralOptionalUnion   )MIN_PEFT_VERSIONUSE_PEFT_BACKENDcheck_peft_versionconvert_unet_state_dict_to_peftdelete_adapter_layersget_adapter_nameis_peft_availableis_peft_versionloggingset_adapter_layers!set_weights_and_activate_adapters)_create_lora_config_maybe_warn_for_unhandled_keys   )_fetch_state_dict#_func_optionally_disable_offloading)_maybe_expand_lora_scalesUNet2DConditionModelUNetMotionModelSD3Transformer2DModelc                     |S N 	model_clsweightss     Q/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/diffusers/loaders/peft.py<lambda>r&   0            FluxTransformer2DModelc                     |S r    r!   r"   s     r%   r&   r&   1        r(   CogVideoXTransformer3DModelc                     |S r    r!   r"   s     r%   r&   r&   2       g r(   ConsisIDTransformer3DModelc                     |S r    r!   r"   s     r%   r&   r&   3       W r(   MochiTransformer3DModelc                     |S r    r!   r"   s     r%   r&   r&   4   s    ' r(   HunyuanVideoTransformer3DModelc                     |S r    r!   r"   s     r%   r&   r&   5        r(   LTXVideoTransformer3DModelc                     |S r    r!   r"   s     r%   r&   r&   6   r1   r(   SanaTransformer2DModelc                     |S r    r!   r"   s     r%   r&   r&   7   r+   r(   AuraFlowTransformer2DModelc                     |S r    r!   r"   s     r%   r&   r&   8   r1   r(   Lumina2Transformer2DModelc                     |S r    r!   r"   s     r%   r&   r&   9       G r(   WanTransformer3DModelc                     |S r    r!   r"   s     r%   r&   r&   :   r'   r(   CogView4Transformer2DModelc                     |S r    r!   r"   s     r%   r&   r&   ;   r1   r(   HiDreamImageTransformer2DModelc                     |S r    r!   r"   s     r%   r&   r&   <   r6   r(   'HunyuanVideoFramepackTransformer3DModelc                     |S r    r!   r"   s     r%   r&   r&   =   s    ' r(   WanVACETransformer3DModelc                     |S r    r!   r"   s     r%   r&   r&   >   r?   r(   c                     |S r    r!   r"   s     r%   r&   r&   ?   s    7 r(   c                     |S r    r!   r"   s     r%   r&   r&   @   r.   r(   )ChromaTransformer2DModelQwenImageTransformer2DModelc                   z   e Zd ZU dZdZdZee   ed<   e	d        Z
	 d"defdZ	 	 	 	 d#ded	ed
edee   fdZ	 d$deee   ef   deeeeee   ee   ed   f      fdZd%deddfdZdeeee   f   ddfdZd&dZd&dZdee   fdZd'dZd$dZd Zd Zd Zd Zd Zdeee   ef   fdZ 	 d(de!de"d    ddfd!Z#y))PeftAdapterMixina  
    A class containing all functions for loading and using adapters weights that are supported in PEFT library. For
    more details about adapters and injecting them in a base model, check out the PEFT
    [documentation](https://huggingface.co/docs/peft/index).

    Install the latest version of PEFT, and use this mixin to:

    - Attach new adapters in the model.
    - Attach multiple adapters and iteratively activate/deactivate them.
    - Activate/deactivate all adapters from the model.
    - Get a list of the active adapters.
    FN_prepare_lora_hotswap_kwargsc                     t        |      S )N)	_pipeline)r   )clsrR   s     r%   _optionally_disable_offloadingz/PeftAdapterMixin._optionally_disable_offloadingV   s     3YGGr(   hotswapc                   01 ddl m}m} ddlm} ddlm} |j                  dd      }	|j                  dd	      }
|j                  d
d      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      0|j                  dd      }|j                  dd      }|j                  dd	      }|j                  dd      }d	}|rt        dd      rt        d      ddd}t        |||||	|
|||||||      \  }}||t        d      |r|rt        d      ||j                         D ci c]/  \  }}|j                  | d      s|j                  | d      |1 }}}|J|j                         D ci c]/  \  }}|j                  | d      s|j                  | d      |1 }}}t        |      dkD  rÉ0t        | d i       v r|st        d!0 d"      0t        | d i       vr|rt        d#0 d$      t!        t#        |j%                                     }d%|vrt'        |      }i }|j                         D ]/  \  1}d&1v s|j(                  d'kD  s|j*                  d'   |d(1 <   1 |zt        |      d'k\  rl|j%                         D cg c]  }|j                  | d      s| }}|j                         D ci c]  \  }}||v s|j                  | d      |! }}}0t-        |       0t/        ||||| j1                         0)      }| j3                  |      \  } }!}"i }#t        d*d+      r||#d<   |s| j4                  $t        d,d-      rdd.lm}$m}%m}& nd/}'t?        |'      |r01fd0}(	 |r. (|      } $| j@                  0   |       	  %| |0|1       d}*n@ ||| f0|d4|#  || |0fi |#}*| j4                   &| fd5|i| j4                   d| _        | jH                  sd6| _$        tU        |*0       | r|jW                          na|!r|jY                          nN|"rL|jZ                  j]                         D ]/  }.tO        |.t^        j`                  jb                        s( ||.       1 |9|s6| jd                  jf                  }/tD        ji                  d8|/ d9|d:|/ d;       yyyc c}}w c c}}w c c}w c c}}w # tB        $ r!})tD        jG                  d20 d3|)         d})~)ww xY w# tB        $ r})tK        | d       rf| jM                         D ]8  }+tO        |+|      s|+jP                  },|,D ]  }-0|-v s|+jS                  0        : | j@                  j                  0       tD        jG                  d70 d3|)         d})~)ww xY w)<an  
        Loads a LoRA adapter into the underlying model.

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                Can be either:

                    - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
                      the Hub.
                    - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
                      with [`ModelMixin.save_pretrained`].
                    - A [torch state
                      dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).

            prefix (`str`, *optional*): Prefix to filter the state dict.

            cache_dir (`Union[str, os.PathLike]`, *optional*):
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
            token (`str` or *bool*, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
            subfolder (`str`, *optional*, defaults to `""`):
                The subfolder location of a model file within a larger model repository on the Hub or locally.
            network_alphas (`Dict[str, float]`):
                The value of the network alpha used for stable learning and preventing underflow. This value has the
                same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this
                link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning).
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap : (`bool`, *optional*)
                Defaults to `False`. Whether to substitute an existing (LoRA) adapter with the newly loaded adapter
                in-place. This means that, instead of loading an additional adapter, this will take the existing
                adapter weights and replace them with the weights of the new adapter. This can be faster and more
                memory efficient. However, the main advantage of hotswapping is that when the model is compiled with
                torch.compile, loading the new adapter does not require recompilation of the model. When using
                hotswapping, the passed `adapter_name` should be the name of an already loaded adapter.

                If the new adapter and the old adapter have different ranks and/or LoRA alphas (i.e. scaling), you need
                to call an additional method before loading the adapter:

                ```py
                pipeline = ...  # load diffusers pipeline
                max_rank = ...  # the highest rank among all LoRAs that you want to load
                # call *before* compiling and loading the LoRA adapter
                pipeline.enable_lora_hotswap(target_rank=max_rank)
                pipeline.load_lora_weights(file_name)
                # optionally compile the model now
                ```

                Note that hotswapping adapters of the text encoder is not yet supported. There are some further
                limitations to this technique, which are documented here:
                https://huggingface.co/docs/peft/main/en/package_reference/hotswap
            metadata:
                LoRA adapter metadata. When supplied, the metadata inferred through the state dict isn't used to
                initialize `LoraConfig`.
        r   )inject_adapter_in_modelset_peft_model_state_dictBaseTunerLayerr
   *_maybe_remove_and_reapply_group_offloading	cache_dirNforce_downloadFproxieslocal_files_onlytokenrevision	subfolderweight_nameuse_safetensorsadapter_namenetwork_alphasrR   low_cpu_mem_usagemetadataz<=z0.13.0zq`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`.attn_procs_weightspytorch)	file_type	framework)%pretrained_model_name_or_path_or_dictrd   re   r`   r]   r^   r_   ra   rb   rc   
user_agentallow_pickleri   z6`network_alphas` cannot be None when `prefix` is None.z9Both `network_alphas` and `metadata` cannot be specified..peft_configAdapter name z@ already in use in the model - please select a new adapter name.z Trying to hotswap LoRA adapter 'z' but there is no existing adapter by that name. Please choose an existing adapter name or set `hotswap=False` to prevent hotswapping.lora_Alora_Br   ^)model_state_dictrf   z>=z0.13.1>z0.14.0) check_hotswap_configs_compatiblehotswap_adapter_from_state_dict"prepare_model_for_compiled_hotswapzeHotswapping requires PEFT > v0.14. Please upgrade PEFT to a higher version or install it from source.c                    i }| j                         D ]j  \  }}|j                  d      sj                  d      r|d t        d        d dz   }n'|j                  d      r|d t        d        d dz   }|||<   l |S )Nzlora_A.weightzlora_B.weightz.weightrq   zlora_B.biasz.bias)itemsendswithlen)sdnew_sdkvrf   keys       r%   map_state_dict_for_hotswapzFPeftAdapterMixin.load_lora_adapter.<locals>.map_state_dict_for_hotswap$  s    F "
 &1::o6#,,:W !"3S^O 4<.7P PAZZ6 !/S\M 2qe5L LA$%q	& "Mr(   )model
state_dictrf   configzHotswapping z- was unsuccessful with the following error: 
)rf   r   r   TzLoading zNo LoRA keys associated to z found with the prefix=zG. This is safe to ignore if LoRA state dict didn't originally have any z related params. You can also try specifying `prefix=None` to resolve the warning. Otherwise, open an issue if you think it's unexpected: https://github.com/huggingface/diffusers/issues/new)5peftrW   rX   peft.tuners.tuners_utilsrZ   hooks.group_offloadingr\   popr   
ValueErrorr   r}   
startswithremoveprefixr   getattrnextiterkeysr   ndimshaper   r   r   rT   rP   peft.utils.hotswapry   rz   r{   ImportErrorrr   	Exceptionloggererror_hf_peft_config_loadedhasattrmodules
isinstanceactive_adaptersdelete_adapterr   enable_model_cpu_offloadenable_sequential_cpu_offload
componentsvaluestorchnnModule	__class____name__warning)2selfrn   prefixrU   kwargsrW   rX   rZ   r\   r]   r^   r_   r`   ra   rb   rc   rd   re   rg   rR   rh   ri   rp   ro   r   r   r   	first_keyrankval
alpha_keyslora_configis_model_cpu_offloadis_sequential_cpu_offloadis_group_offloadpeft_kwargsry   rz   r{   msgr   eincompatible_keysmoduler   active_adapter	componentmodel_class_namerf   r   s2                                                   @@r%   load_lora_adapterz"PeftAdapterMixin.load_lora_adapter[   s   R 	L;WJJ{D1	$4e<**Y-!::&8$?

7D)::j$/JJ{D1	jj5 **%6=zz.$7$4d;JJ{D1	"JJ':EB::j$/x!@ D  $8iP
02W#+-)!% 

H %&.UVVhXYYFPFVFVFXwda\]\h\hlrksstiu\v!..F8169wJw#HPHXw1\]\h\hlrksstiu\vANNfXQ<8!;wwz?Qwt]B?? #L>1qr  WT="%EE' 6|n El l  T*//"345Iy(<ZH
D&,,. 3S s?sxx!|
 '*iilD1SEO3 )c..AQ.F)7)<)<)>]A!,,RXQYYZ|B\a]
]@N@T@T@V"8<1Z[_iZiANNfXQ<0!3" "
 #/5 .!%!2)K QUPsPsQM ";=M KtX.3D/04<<H"31 '  &c**	"2!;J!GJ4T5E5El5SU`a	7"&'1)5#.	 )-%+#T8DQ[_j )B$
T`(pdo(p%88D ; )48<8Y8Y =A9 2226D/ ++<lK $224*779!!*!5!5!<!<!> NI!)UXX__=B9MN
 j#~~66NN-.>-??WPVy YX#$ %FF '1} xw< ^"N % |L>Aopqor%st6  4/"&,,. H%fn=.4.D.DO2A H#/>#A$*$9$9,$GHH $$((6x~5cdecfghsz   TTT*T*TTT#*T# U 4T)  AU )	U2UUU 	W2+W-W-!AW--W2rf   upcast_before_savingsafe_serializationrd   c                 T   ddl m} ddlmm}m} |t        |       }|t        | di       vrt        d| d      | j                  |   j                          || j                  |rt        j                  nd	      |
      }	t        j                  j!                  |      rt        d| d      |rfd}
nt        j"                  }
t        j$                  |d       ||r|}n|}t'        ||      j)                         } |
|	|       t*        j-                  d|        y)al  
        Save the LoRA parameters corresponding to the underlying model.

        Arguments:
            save_directory (`str` or `os.PathLike`):
                Directory to save LoRA parameters to. Will be created if it doesn't exist.
            adapter_name: (`str`, defaults to "default"): The name of the adapter to serialize. Useful when the
                underlying model has multiple adapters loaded.
            upcast_before_saving (`bool`, defaults to `False`):
                Whether to cast the underlying model to `torch.float32` before serialization.
            safe_serialization (`bool`, *optional*, defaults to `True`):
                Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
            weight_name: (`str`, *optional*, defaults to `None`): Name of the file to serialize the state dict with.
        r   )get_peft_model_state_dictr   )LORA_ADAPTER_METADATA_KEYLORA_WEIGHT_NAMELORA_WEIGHT_NAME_SAFENrr   rs   z not found in the model.)dtype)rf   zProvided path (z#) should be a directory, not a filec                     ddi}Rj                         D ]$  \  }}t        |t              st        |      |<   & t	        j
                  dd      |<   t        j                  j                  | ||      S )Nformatptr
   T)indent	sort_keys)ri   )	r}   r   setlistjsondumpssafetensorsr   	save_file)r$   filenameri   r   valuer   lora_adapter_metadatas        r%   save_functionz9PeftAdapterMixin.save_lora_adapter.<locals>.save_function  s    $d+(4&;&A&A&C E
U%eS19=e1#6E ;?**EZcdpt:uH67"((227Hx2XXr(   T)exist_okzModel weights saved in )
peft.utilsr   	lora_baser   r   r   r   r   r   rr   to_dicttor   float32ospathisfilesavemakedirsr   as_posixr   info)r   save_directoryrf   r   r   rd   r   r   r   lora_layers_to_saver   	save_pathr   r   s               @@r%   save_lora_adapterz"PeftAdapterMixin.save_lora_adapter|  s   , 	9aa+D1Lwt]B??}\N:RSTT $ 0 0 > F F H7GG+?%--TGJYe
 77>>.)~.>>abcc	Y "JJM
NT2!3.5>>@	)95-i[9:r(   adapter_namesr$   c                    t         st        d      t        |t              r|gn|}t        |t              s|gt        |      z  }t        |      t        |      k7  r$t        dt        |       dt        |       d      |D cg c]  }||nd
 }}t        | j                  j                     } || |      }t        | ||       yc c}w )aJ  
        Set the currently active adapters for use in the diffusion network (e.g. unet, transformer, etc.).

        Args:
            adapter_names (`List[str]` or `str`):
                The names of the adapters to use.
            adapter_weights (`Union[List[float], float]`, *optional*):
                The adapter(s) weights to use with the UNet. If `None`, the weights are set to `1.0` for all the
                adapters.

        Example:

        ```py
        from diffusers import AutoPipelineForText2Image
        import torch

        pipeline = AutoPipelineForText2Image.from_pretrained(
            "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
        ).to("cuda")
        pipeline.load_lora_weights(
            "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
        )
        pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
        pipeline.unet.set_adapters(["cinematic", "pixel"], adapter_weights=[0.5, 0.5])
        ```
        z.PEFT backend is required for `set_adapters()`.zLength of adapter names z- is not equal to the length of their weights rq   N      ?)
r   r   r   strr   r   _SET_ADAPTER_SCALE_FN_MAPPINGr   r   r   )r   r   r$   wscale_expansion_fns        r%   set_adapterszPeftAdapterMixin.set_adapters  s    >  MNN+5mS+I} '4(i#m"44G}W-*3}+=*>>klopwlxkyyz{  9@@113.@@ ;4>>;R;RS$T73)$wG As   Creturnc                 L   t        t               t               st        d      ddlm}m} | j                  sd| _        n|| j                  v rt        d| d      t        ||      st        dt        |       d	      d
|_         ||| |       | j                  |       y
)a  
        Adds a new adapter to the current model for training. If no adapter name is passed, a default name is assigned
        to the adapter to follow the convention of the PEFT library.

        If you are not familiar with adapters and PEFT methods, we invite you to read more about them in the PEFT
        [documentation](https://huggingface.co/docs/peft).

        Args:
            adapter_config (`[~peft.PeftConfig]`):
                The configuration of the adapter to add; supported adapters are non-prefix tuning and adaption prompt
                methods.
            adapter_name (`str`, *optional*, defaults to `"default"`):
                The name of the adapter to add. If no name is passed, a default name is assigned to the adapter.
        min_versionTPEFT is not available. Please install PEFT to use this function: `pip install peft`.r   )
PeftConfigrW   TzAdapter with name z- already exists. Please use a different name.z8adapter_config should be an instance of PeftConfig. Got z	 instead.N)r   r   r   r   r   r   rW   r   rr   r   r   typebase_model_name_or_pathset_adapter)r   adapter_configrf   r   rW   s        r%   add_adapterzPeftAdapterMixin.add_adapter  s     	'78 "tuu<***.D'T---1,?lmnn.*5J4P^K_J``ij  26.lC&r(   c           	      d   t        t               | j                  st        d      t	        |t
              r|g}t        |      t        | j                        z
  }t        |      dkD  rAt        ddj                  |       dt        | j                  j                                      ddlm} d}| j                         D ]^  \  }}t	        ||      st        |d	      r|j!                  |       n,t        |d	      st        |      d
k7  rt        d      ||_        d}` |st        d      y)a  
        Sets a specific adapter by forcing the model to only use that adapter and disables the other adapters.

        If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
        [documentation](https://huggingface.co/docs/peft).

        Args:
            adapter_name (Union[str, List[str]])):
                The list of adapters to set or the adapter name in the case of a single adapter.
        r   0No adapter loaded. Please load an adapter first.r   z)Following adapter(s) could not be found: z, zV. Make sure you are passing the correct adapter name(s). current loaded adapters are: rY   Fr   r   zYou are trying to set multiple adapters and you have a PEFT version that does not support multi-adapter inference. Please upgrade to the latest version of PEFT. `pip install -U peft` or `pip install -U git+https://github.com/huggingface/peft.git`TzhDid not succeeded in setting the adapter. Please make sure you are using a model that supports adapters.N)r   r   r   r   r   r   r   rr   r   joinr   r   r   rZ   named_modulesr   r   r   )r   rf   missingrZ   _adapters_has_been_set_r   s          r%   r   zPeftAdapterMixin.set_adapter  s:    	'78**OPPlC((>Ll#c$*:*:&;;w<!;DIIg<N;O P115d6F6F6K6K6M1N0OQ 
 	<!&++- 	.IAv&.16=1&&|4 7C<MQR<R$q 
 -9F))-&	. &z  &r(   c                     t        t               | j                  st        d      ddlm} | j                         D ]8  \  }}t        ||      st        |d      r|j                  d       2d|_
        : y	)
a  
        Disable all adapters attached to the model and fallback to inference with the base model only.

        If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
        [documentation](https://huggingface.co/docs/peft).
        r   r   r   rY   enable_adaptersFenabledTNr   r   r   r   r   rZ   r   r   r   r  disable_adaptersr   rZ   r   r   s       r%   r  z!PeftAdapterMixin.disable_adaptersO  sq     	'78**OPP;++- 	3IAv&.16#45**5*9 /3F+	3r(   c                     t        t               | j                  st        d      ddlm} | j                         D ]8  \  }}t        ||      st        |d      r|j                  d       2d|_
        : y	)
aK  
        Enable adapters that are attached to the model. The model uses `self.active_adapters()` to retrieve the list of
        adapters to enable.

        If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
        [documentation](https://huggingface.co/docs/peft).
        r   r   r   rY   r  Tr  FNr  r  s       r%   r  z PeftAdapterMixin.enable_adapterse  sq     	'78**OPP;++- 	4IAv&.16#45**4*8 /4F+	4r(   c                     t        t               t               st        d      | j                  st        d      ddlm} | j                         D ]  \  }}t        ||      s|j                  c S  y)z
        Gets the current list of active adapters of the model.

        If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT
        [documentation](https://huggingface.co/docs/peft).
        r   r   r   r   rY   N)r   r   r   r   r   r   r   rZ   r   r   r   r  s       r%   r   z PeftAdapterMixin.active_adapters|  si     	'78 "tuu**OPP;++- 	-IAv&.1,,,	-r(   c                     t         st        d      || _        || _        | j	                  t        | j                  |             y )Nz+PEFT backend is required for `fuse_lora()`.)r   )r   r   
lora_scale_safe_fusingapplyr   _fuse_lora_apply)r   r  safe_fusingr   s       r%   	fuse_lorazPeftAdapterMixin.fuse_lora  s9    JKK$'

7400NOr(   c                 X   ddl m} d| j                  i}t        ||      r| j                  dk7  r|j                  | j                         t        t        j                  |j                        j                        }d|v r||d<   nd|vr|t        d       |j                  di | y y )Nr   rY   
safe_merger   r   zThe `adapter_names` argument is not supported with your PEFT version. Please upgrade to the latest version of PEFT. `pip install -U peft`r!   )r   rZ   r  r   r  scale_layerr   inspect	signaturemerge
parametersr   )r   r   r   rZ   merge_kwargssupported_merge_kwargss         r%   r  z!PeftAdapterMixin._fuse_lora_apply  s    ;$d&7&78fn-#%""4??3 &*'*;*;FLL*I*T*T%U""880=_- (>>=C\ L 
 FLL(<( .r(   c                 \    t         st        d      | j                  | j                         y )Nz-PEFT backend is required for `unfuse_lora()`.)r   r   r  _unfuse_lora_applyr   s    r%   unfuse_lorazPeftAdapterMixin.unfuse_lora  s#    LMM

4**+r(   c                 J    ddl m} t        ||      r|j                          y y )Nr   rY   )r   rZ   r   unmerge)r   r   rZ   s      r%   r  z#PeftAdapterMixin._unfuse_lora_apply  s    ;fn-NN .r(   c                     t         st        d      ddlm} ddlm}  ||        t        | d      r| `t        | d      rd | _         ||        y )Nz-PEFT backend is required for `unload_lora()`.r
   r[   )recurse_remove_peft_layersrr   r   )	r   r   r   r\   utilsr!  r   rr   r   )r   r\   r!  s      r%   unload_lorazPeftAdapterMixin.unload_lora  sJ    LMMW6"4(4' 412*.D'248r(   c                 @    t         st        d      t        | d       y)a1  
        Disables the active LoRA layers of the underlying model.

        Example:

        ```py
        from diffusers import AutoPipelineForText2Image
        import torch

        pipeline = AutoPipelineForText2Image.from_pretrained(
            "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
        ).to("cuda")
        pipeline.load_lora_weights(
            "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
        )
        pipeline.unet.disable_lora()
        ```
        )PEFT backend is required for this method.Fr  Nr   r   r   r  s    r%   disable_lorazPeftAdapterMixin.disable_lora  s    &  HII4/r(   c                 @    t         st        d      t        | d       y)a/  
        Enables the active LoRA layers of the underlying model.

        Example:

        ```py
        from diffusers import AutoPipelineForText2Image
        import torch

        pipeline = AutoPipelineForText2Image.from_pretrained(
            "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
        ).to("cuda")
        pipeline.load_lora_weights(
            "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_name="cinematic"
        )
        pipeline.unet.enable_lora()
        ```
        r%  Tr  Nr&  r  s    r%   enable_lorazPeftAdapterMixin.enable_lora  s    &  HII4.r(   c                     t         st        d      t        |t              r|g}|D ]7  }t	        | |       t        | d      s| j                  j                  |d       9 y)a  
        Delete an adapter's LoRA layers from the underlying model.

        Args:
            adapter_names (`Union[List[str], str]`):
                The names (single string or list of strings) of the adapter to delete.

        Example:

        ```py
        from diffusers import AutoPipelineForText2Image
        import torch

        pipeline = AutoPipelineForText2Image.from_pretrained(
            "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
        ).to("cuda")
        pipeline.load_lora_weights(
            "jbilcke-hf/sdxl-cinematic-1", weight_name="pytorch_lora_weights.safetensors", adapter_names="cinematic"
        )
        pipeline.unet.delete_adapters("cinematic")
        ```
        r%  rr   N)r   r   r   r   r   r   rr   r   )r   r   rf   s      r%   delete_adaptersz PeftAdapterMixin.delete_adapters  s_    .  HIImS)*OM) 	9L!$5 t]+  $$\48	9r(   target_rankcheck_compiled)r   warnignorec                     t        | di       r?|dk(  rt        d      |dk(  rt        j                  d       n|dk7  rt	        d| d      ||d	| _        y
)a  Enables the possibility to hotswap LoRA adapters.

        Calling this method is only required when hotswapping adapters and if the model is compiled or if the ranks of
        the loaded adapters differ.

        Args:
            target_rank (`int`, *optional*, defaults to `128`):
                The highest rank among all the adapters that will be loaded.

            check_compiled (`str`, *optional*, defaults to `"error"`):
                How to handle the case when the model is already compiled, which should generally be avoided. The
                options are:
                  - "error" (default): raise an error
                  - "warn": issue a warning
                  - "ignore": do nothing
        rr   r   z<Call `enable_lora_hotswap` before loading the first adapter.r.  zhIt is recommended to call `enable_lora_hotswap` before loading the first adapter to avoid recompilation.r/  zCcheck_compiles should be one of 'error', 'warn', or 'ignore', got 'z
' instead.)r,  r-  N)r   RuntimeErrorr   r   r   rP   )r   r,  r-  s      r%   enable_lora_hotswapz$PeftAdapterMixin.enable_lora_hotswap  sr    & 4+("#abb6)~  8+ YZhYiist  =H[i,j)r(   )transformerF)defaultFTNr    )r4  )r   N)r   FN)   r   )$r   
__module____qualname____doc__r   rP   r   dict__annotations__classmethodrT   boolr   r   r   r	   r   floatr   r   r   r   r  r  r   r  r  r  r  r#  r'  r)  r+  intr   r2  r!   r(   r%   rO   rO   D   s    #37 (4.7H H \a_TX_H	 &%*#'%)B; B; #	B;
 !B; c]B;N VZ6HT#Y^,6H %tT%[$t*d4j PQR6Hp$' $'D $'L/c49n(= /$ /b3,4.-c -*P),,
90./."9U49c>-B "9J \ckk6=>W6Xk	kr(   rO   )(r  r   r   	functoolsr   pathlibr   typingr   r   r   r   r	   r   r   r"  r   r   r   r   r   r   r   r   r   r   r   utils.peft_utilsr   r   r   r   r   unet_loader_utilsr   
get_loggerr   r   r   rO   r!   r(   r%   <module>rE     sS     	   7 7      S M 8 
		H	%!5!0! ?! @	!
 "#E! !"D! A! %&H! !"D! @! !"D!  !C! ?! !"D! %&H!  ./Q!!"  !C#!$ !C#E'! .vk vkr(   