
    bi                       d dl Z d dlmZmZmZmZmZ d dlZd dlm	Z	 ddl
mZmZmZmZmZmZmZmZmZmZmZ ddlmZmZmZmZmZmZ ddlmZmZm Z m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z* d	Z+ ed
d      r" e       r ed
d      r e       r edd      rdZ+ ejX                  e-      Z.dZ/dZ0dZ1ddiZ2d Z3 G d de      Z4 G d de      Z5 G d de      Z6 G d de      Z7 G d de      Z8 G d  d!e4      Z9 G d" d#e      Z: G d$ d%e      Z; G d& d'e      Z< G d( d)e      Z= G d* d+e      Z> G d, d-e      Z? G d. d/e      Z@ G d0 d1e      ZA G d2 d3e      ZB G d4 d5e      ZC G d6 d7e      ZD G d8 d9e4      ZEy):    N)CallableDictListOptionalUnion)validate_hf_hub_args   )USE_PEFT_BACKEND	deprecateget_submodule_by_nameis_bitsandbytes_availableis_gguf_availableis_peft_availableis_peft_versionis_torch_versionis_transformers_availableis_transformers_versionlogging   )LORA_WEIGHT_NAMELORA_WEIGHT_NAME_SAFELoraBaseMixin_fetch_state_dict_load_lora_into_text_encoder_pack_dict_with_prefix)+_convert_bfl_flux_control_lora_to_diffusers&_convert_fal_kontext_lora_to_diffusers(_convert_hunyuan_video_lora_to_diffusers%_convert_kohya_flux_lora_to_diffusers%_convert_musubi_wan_lora_to_diffusers0_convert_non_diffusers_hidream_lora_to_diffusers(_convert_non_diffusers_lora_to_diffusers-_convert_non_diffusers_ltxv_lora_to_diffusers0_convert_non_diffusers_lumina2_lora_to_diffusers-_convert_non_diffusers_qwen_lora_to_diffusers,_convert_non_diffusers_wan_lora_to_diffusers%_convert_xlabs_flux_lora_to_diffusers"_maybe_map_sgm_blocks_to_diffusersF>=z1.9.00.13.1>z4.45.2Ttext_encoderunettransformer
x_embedderin_channelsc                 (   t               rddlm} t               rddlm} |j                  j                  j                  dk(  }|j                  j                  j                  dk(  }|j                  j                  j                  dk(  }|rt               st        d      |rt               st        d      |rt               st        d	      d
}|j                  j                  j                  dk(  rd}t        t        d      r(t        j                  j                         j                  nd}|s|ri |r|j                  j!                  |      n|j                  |r|j                  j"                  n|j$                  | j&                        j(                  }	nc|rK |r|j                  j!                  |      n|j                        }	|	j!                  | j&                        }	n|j                  j(                  }	|r|	j+                         }	|	S )Nr	   )dequantize_bnb_weight)dequantize_gguf_tensor
Params4bit
Int8ParamsGGUFParameterz~The checkpoint seems to have been quantized with `bitsandbytes` (4bits). Install `bitsandbytes` to load quantized checkpoints.z~The checkpoint seems to have been quantized with `bitsandbytes` (8bits). Install `bitsandbytes` to load quantized checkpoints.zfThe checkpoint seems to have been quantized with `gguf`. Install `gguf` to load quantized checkpoints.FcpuTacceleratorcuda)statedtype)r   quantizers.bitsandbytesr2   r   quantizers.gguf.utilsr3   weight	__class____name__
ValueErrordevicetypehasattrtorchr8   current_acceleratortoquant_stater:   r;   datar7   )
modelmoduler2   r3   is_bnb_4bit_quantizedis_bnb_8bit_quantizedis_gguf_quantizedweight_on_cpurB   module_weights
             Z/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/diffusers/loaders/lora_pipeline.py*_maybe_dequantize_weight_for_expanded_lorarR   O   s    "CB"MM33<<L"MM33<<L//88OK%>%@ M
 	
 %>%@ M
 	
 !2!4t
 	
 M}}  E)=DUM=ZU22499`fF 5-(5FMMV$6==/D&--++&,,++
 $	 	
 
.(5FMMV$6==
 &((5**%))+    c                   :    e Zd ZdZddgZeZeZ	 	 dde	e
ee
ej                  f   f   dee
   defd	Zeede	e
ee
ej                  f   f   fd
              Ze	 	 	 	 	 ddefd       Ze	 	 	 	 	 	 	 ddefd       Ze	 	 	 	 	 	 	 	 dde	e
ej.                  f   dee
e	ej0                  j2                  ej                  f   f   dee
ej0                  j2                  f   dede
dedefd       Zddgdddfdee
   dededeee
      f fdZddgfdee
   f fdZ xZ S ) StableDiffusionLoraLoaderMixinz
    Load LoRA layers into Stable Diffusion [`UNet2DConditionModel`] and
    [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel).
    r-   r,   NF%pretrained_model_name_or_path_or_dictadapter_namehotswapc                    t         st        d      |j                  dt              }|rt	        dd      st        d      t        |t              r|j                         }d|d<    | j                  |fi |\  }}}t        d |j                         D              }	|	st        d	      | j                  ||t        | d
      st        | | j                        n| j                  ||| ||       | j!                  ||t        | d      st        | | j"                        n| j$                  | j&                  || |||	       y)a  Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
        `self.text_encoder`.

        All kwargs are forwarded to `self.lora_state_dict`.

        See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is
        loaded.

        See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is
        loaded into `self.unet`.

        See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state
        dict is loaded into `self.text_encoder`.

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                Defaults to `False`. Whether to substitute an existing (LoRA) adapter with the newly loaded adapter
                in-place. This means that, instead of loading an additional adapter, this will take the existing
                adapter weights and replace them with the weights of the new adapter. This can be faster and more
                memory efficient. However, the main advantage of hotswapping is that when the model is compiled with
                torch.compile, loading the new adapter does not require recompilation of the model. When using
                hotswapping, the passed `adapter_name` should be the name of an already loaded adapter.

                If the new adapter and the old adapter have different ranks and/or LoRA alphas (i.e. scaling), you need
                to call an additional method before loading the adapter:

                ```py
                pipeline = ...  # load diffusers pipeline
                max_rank = ...  # the highest rank among all LoRAs that you want to load
                # call *before* compiling and loading the LoRA adapter
                pipeline.enable_lora_hotswap(target_rank=max_rank)
                pipeline.load_lora_weights(file_name)
                # optionally compile the model now
                ```

                Note that hotswapping adapters of the text encoder is not yet supported. There are some further
                limitations to this technique, which are documented here:
                https://huggingface.co/docs/peft/main/en/package_reference/hotswap
            kwargs (`dict`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
        )PEFT backend is required for this method.low_cpu_mem_usager)   r*   q`low_cpu_mem_usage=True` is not compatible with this `peft` version. Please update it with `pip install -U peft`.Treturn_lora_metadatac              3   $   K   | ]  }d |v  
 ywloraN .0keys     rQ   	<genexpr>zCStableDiffusionLoraLoaderMixin.load_lora_weights.<locals>.<genexpr>        K##K   Invalid LoRA checkpoint.r-   network_alphasr-   rW   metadata	_pipeliner[   rX   r,   )rj   r,   
lora_scalerW   rl   rk   r[   rX   N)r
   rA   pop_LOW_CPU_MEM_USAGE_DEFAULT_LORAr   
isinstancedictcopylora_state_dictallkeysload_lora_into_unetrD   getattr	unet_namer-   load_lora_into_text_encodertext_encoder_namer,   rm   
selfrV   rW   rX   kwargsr[   
state_dictrj   rk   is_correct_formats
             rQ   load_lora_weightsz0StableDiffusionLoraLoaderMixin.load_lora_weights   sT   p  HII"JJ':<[\_T8%D D 
 ;TB4Y4^4^4`1 *.%&/Ct/C/CDi/tms/t,
NHK9JKK 788  )6=dF6Kt~~.QUQZQZ%/ 	! 		
 	(()40 !t'='=>""%/ 	) 	
rS   c                 (   |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }|j                  dd      }d}|d}d}ddd}t        ||
||||||||	||      \  }}t        d |D              }|r?d}t        j	                  |       |j                         D ci c]  \  }}d|vs|| }}}d}t        d |j                         D              r|t        ||      }t        |      \  }}|r|||f}|S ||f}|S c c}}w )
  
        Return state dict for lora weights and the network alphas.

        <Tip warning={true}>

        We support loading A1111 formatted LoRA checkpoints in a limited capacity.

        This function is experimental and might change in the future.

        </Tip>

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                Can be either:

                    - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
                      the Hub.
                    - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
                      with [`ModelMixin.save_pretrained`].
                    - A [torch state
                      dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).

            cache_dir (`Union[str, os.PathLike]`, *optional*):
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.

            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
            token (`str` or *bool*, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
            subfolder (`str`, *optional*, defaults to `""`):
                The subfolder location of a model file within a larger model repository on the Hub or locally.
            weight_name (`str`, *optional*, defaults to None):
                Name of the serialized state dict file.
            return_lora_metadata (`bool`, *optional*, defaults to False):
                When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
        	cache_dirNforce_downloadFproxieslocal_files_onlytokenrevision	subfolderweight_nameunet_configuse_safetensorsr]   Tattn_procs_weightspytorch	file_type	frameworkrV   r   r   r   r   r   r   r   r   r   
user_agentallow_picklec              3   $   K   | ]  }d |v  
 yw
dora_scaleNra   rc   ks     rQ   re   zAStableDiffusionLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>I       #J!LA$5#Jrg   !  It seems like you are using a DoRA checkpoint that is not compatible in Diffusers at the moment. So, we are going to filter out the keys associated to 'dora_scale` from the state dict. If you think this is a mistake please open an issue https://github.com/huggingface/diffusers/issues/new.r   c              3      K   | ]N  }|j                  d       xs7 |j                  d      xs$ |j                  d      xs |j                  d       P ywlora_te_
lora_unet_	lora_te1_	lora_te2_N
startswithr   s     rQ   re   zAStableDiffusionLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>Q  ]      
  Z( -<<--<<,- <<,-
   AA
rn   r   anyloggerwarningitemsrt   ru   r(   r"   clsrV   r}   r   r   r   r   r   r   r   r   r   r   r]   r   r   r~   rk   is_dora_scale_presentwarn_msgr   vrj   outs                           rQ   rs   z.StableDiffusionLoraLoaderMixin.lora_state_dict   s   r JJ{D1	$4e<**Y-!::&8$?

7D)::j$/JJ{D1	jj5jj5 **%6=%zz*@%H""OL#7iP
02W#+-)!% 

H !$#Jz#J J  {HNN8$+5+;+;+=W41aUVAV!Q$WJW 
  __&
 
 &?
KX
)QR\)]&J8Lz>84
 T^_mRn
) X   ,F9Fc	           
          t         st        d      |rt        dd      st        d      t        j	                  d| j
                   d       |j                  || j
                  ||||||       y	av  
        This will load the LoRA layers specified in `state_dict` into `unet`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            network_alphas (`Dict[str, float]`):
                The value of the network alpha used for stable learning and preventing underflow. This value has the
                same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this
                link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning).
            unet (`UNet2DConditionModel`):
                The UNet model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        rZ   r)   r*   r\   Loading .)prefixrj   rW   rk   rl   r[   rX   Nr
   rA   r   r   inforx   load_lora_adapter	r   r~   rj   r-   rW   rl   r[   rX   rk   s	            rQ   rv   z2StableDiffusionLoraLoaderMixin.load_lora_into_unetc  s}    L  HII_T8%D D  	hs}}oQ/0==)%/ 	 		
rS         ?c                 D    t        |||||| j                  ||
|||	       ya]  
        This will load the LoRA layers specified in `state_dict` into `text_encoder`

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The key should be prefixed with an
                additional `text_encoder` to distinguish between unet lora layers.
            network_alphas (`Dict[str, float]`):
                The value of the network alpha used for stable learning and preventing underflow. This value has the
                same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this
                link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning).
            text_encoder (`CLIPTextModel`):
                The text encoder model to load the LoRA layers into.
            prefix (`str`):
                Expected prefix of the `text_encoder` in the `state_dict`.
            lora_scale (`float`):
                How much to scale the output of the lora linear layer before it is added with the output of the regular
                lora layer.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        )r~   rj   rm   r,   r   rz   rW   rk   rl   r[   rX   Nr   rz   r   r~   rj   r,   r   rm   rW   rl   r[   rX   rk   s              rQ   ry   z:StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder  s4    X 	%!)!%!33%/	
rS   save_directoryunet_lora_layerstext_encoder_lora_layersis_main_processr   save_functionsafe_serializationc
           	         i }
i }|s|st        d      |r+|
j                  | j                  || j                               |r+|
j                  | j                  || j                               |r%|j                  t        || j                               |	r%|j                  t        |	| j                               | j                  |
||||||       y)u8  
        Save the LoRA parameters corresponding to the UNet and text encoder.

        Arguments:
            save_directory (`str` or `os.PathLike`):
                Directory to save LoRA parameters to. Will be created if it doesn't exist.
            unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `unet`.
            text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
                encoder LoRA state dict because it comes from 🤗 Transformers.
            is_main_process (`bool`, *optional*, defaults to `True`):
                Whether the process calling this is the main process or not. Useful during distributed training and you
                need to call this function on all processes. In this case, set `is_main_process=True` only on the main
                process to avoid race conditions.
            save_function (`Callable`):
                The function to use to save the state dictionary. Useful during distributed training when you need to
                replace `torch.save` with another method. Can be configured with the environment variable
                `DIFFUSERS_SAVE_MODE`.
            safe_serialization (`bool`, *optional*, defaults to `True`):
                Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
            unet_lora_adapter_metadata:
                LoRA adapter metadata associated with the unet to be serialized with the state dict.
            text_encoder_lora_adapter_metadata:
                LoRA adapter metadata associated with the text encoder to be serialized with the state dict.
        zPYou must pass at least one of `unet_lora_layers` and `text_encoder_lora_layers`.r~   r   r   r   r   r   lora_adapter_metadataN)rA   updatepack_weightsrx   rz   r   write_lora_layers)r   r   r   r   r   r   r   r   unet_lora_adapter_metadata"text_encoder_lora_adapter_metadatar~   r   s               rQ   save_lora_weightsz0StableDiffusionLoraLoaderMixin.save_lora_weights  s    N 
 " $<oppc../?OP#c../GI^I^_`%!(()?@Z\_\i\i)jk-!((&'I3K`K`a
 	!)+#'1"7 	 	
rS   
componentsrm   safe_fusingadapter_namesc                 .    t        |   d||||d| y  
        Fuses the LoRA parameters into the original parameters of the corresponding blocks.

        <Tip warning={true}>

        This is an experimental API.

        </Tip>

        Args:
            components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
            lora_scale (`float`, defaults to 1.0):
                Controls how much to influence the outputs with the LoRA parameters.
            safe_fusing (`bool`, defaults to `False`):
                Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
            adapter_names (`List[str]`, *optional*):
                Adapter names to be used for fusing. If nothing is passed, all active adapters will be fused.

        Example:

        ```py
        from diffusers import DiffusionPipeline
        import torch

        pipeline = DiffusionPipeline.from_pretrained(
            "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
        ).to("cuda")
        pipeline.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
        pipeline.fuse_lora(lora_scale=0.7)
        ```
        r   rm   r   r   Nra   super	fuse_lorar|   r   rm   r   r   r}   r?   s         rQ   r   z(StableDiffusionLoraLoaderMixin.fuse_lora   .    N 	 	
!!#'		

 	
rS   c                 (    t        |   dd|i| ya  
        Reverses the effect of
        [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).

        <Tip warning={true}>

        This is an experimental API.

        </Tip>

        Args:
            components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
            unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
            unfuse_text_encoder (`bool`, defaults to `True`):
                Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
                LoRA parameters then it won't have any effect.
        r   Nra   r   unfuse_lorar|   r   r}   r?   s      rQ   r   z*StableDiffusionLoraLoaderMixin.unfuse_loraO      $ 	<z<V<rS   NFNNFFNNr   NNFFNNNTNNTNN!r@   
__module____qualname____doc___lora_loadable_modules	UNET_NAMErx   TEXT_ENCODER_NAMErz   r   strr   rE   Tensorr   boolr   classmethodr   rs   rv   ry   osPathLikennModuler   r   r   floatr   r   __classcell__r?   s   @rQ   rU   rU      s9   
 %n5I)
 '+	c
/4S$sELL?P:Q5Q/Rc
 smc
 	c
J p/4S$sELL?P:Q5Q/Rp  pd  :
 :
 :
x  7
 7
 7
r  MQ?C $"&#'#'+/C
c2;;./C
 sE%((//5<<*G$HHIC
 #'sEHHOO';"<	C

 C
 C
  C
 !C
 C
N "( 8!-1-
I-
 -
 	-

  S	*-
^ 4:>2J =d3i = =rS   rU   c                       e Zd ZdZg dZeZeZ	 	 dde	e
ee
ej                  f   f   dee
   defdZeede	e
ee
ej                  f   f   fd	              Ze	 	 	 	 	 ddefd
       Ze	 	 	 	 	 	 	 ddefd       Ze	 	 	 	 	 	 	 	 	 	 dde	e
ej.                  f   dee
e	ej0                  j2                  ej                  f   f   dee
e	ej0                  j2                  ej                  f   f   dee
e	ej0                  j2                  ej                  f   f   dede
dedefd       Zg ddddfdee
   dededeee
      f fdZg dfdee
   f fdZ xZ S )  StableDiffusionXLLoraLoaderMixinaC  
    Load LoRA layers into Stable Diffusion XL [`UNet2DConditionModel`],
    [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), and
    [`CLIPTextModelWithProjection`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection).
    )r-   r,   text_encoder_2NFrV   rW   rX   c                    t         st        d      |j                  dt              }|rt	        dd      st        d      t        |t              r|j                         }d|d<    | j                  |fd| j                  j                  i|\  }}}t        d	 |j                         D              }	|	st        d
      | j                  ||| j                  ||| ||       | j                  ||| j                  | j                   | j"                  ||| ||
       | j                  ||| j$                  | j                    d| j"                  ||| ||
       y)a  
        Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
        `self.text_encoder`.

        All kwargs are forwarded to `self.lora_state_dict`.

        See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is
        loaded.

        See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is
        loaded into `self.unet`.

        See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state
        dict is loaded into `self.text_encoder`.

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            kwargs (`dict`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
        rZ   r[   r)   r*   r\   Tr]   r   c              3   $   K   | ]  }d |v  
 ywr_   ra   rb   s     rQ   re   zEStableDiffusionXLLoraLoaderMixin.load_lora_weights.<locals>.<genexpr>  rf   rg   rh   ri   	rj   r,   r   rm   rW   rk   rl   r[   rX   _2N)r
   rA   rn   ro   r   rp   rq   rr   rs   r-   configrt   ru   rv   ry   r,   rz   rm   r   r{   s
             rQ   r   z2StableDiffusionXLLoraLoaderMixin.load_lora_weightso  s   H  HII"JJ':<[\_T8%D D  ;TB4Y4^4^4`1 *.%&/Ct/C/C10
		((0
 0
,
NH  K9JKK 788  )%/ 	! 		
 	(()**))%/ 	) 	
 	((),,,,-R0%/ 	) 	
rS   c                 (   |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }|j                  dd      }d}|d}d}ddd}t        ||
||||||||	||      \  }}t        d |D              }|r?d}t        j	                  |       |j                         D ci c]  \  }}d|vs|| }}}d}t        d |j                         D              r|t        ||      }t        |      \  }}|r|||f}|S ||f}|S c c}}w )r   r   Nr   Fr   r   r   r   r   r   r   r   r]   Tr   r   r   r   c              3   $   K   | ]  }d |v  
 ywr   ra   r   s     rQ   re   zCStableDiffusionXLLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>.  r   rg   r   r   c              3      K   | ]N  }|j                  d       xs7 |j                  d      xs$ |j                  d      xs |j                  d       P ywr   r   r   s     rQ   re   zCStableDiffusionXLLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>6  r   r   r   r   s                           rQ   rs   z0StableDiffusionXLLoraLoaderMixin.lora_state_dict  s   t JJ{D1	$4e<**Y-!::&8$?

7D)::j$/JJ{D1	jj5jj5 **%6=%zz*@%H""OL#7iP
02W#+-)!% 

H !$#Jz#J J  {HNN8$+5+;+;+=W41aUVAV!Q$WJW 
  __&
 
 &?
KX
)QR\)]&J8Lz>84
 T^_mRn
) Xr   c	           
          t         st        d      |rt        dd      st        d      t        j	                  d| j
                   d       |j                  || j
                  ||||||       yr   r   r   s	            rQ   rv   z4StableDiffusionXLLoraLoaderMixin.load_lora_into_unetH  s}    N  HII_T8%D D  	hs}}oQ/0==)%/ 	 		
rS   r   c                 D    t        |||||| j                  ||
|||	       yr   r   r   s              rQ   ry   z<StableDiffusionXLLoraLoaderMixin.load_lora_into_text_encoder  4    Z 	%!)!%!33%/	
rS   r   r   r   text_encoder_2_lora_layersr   r   r   r   c           	         i }i }|s|s|st        d      |r+|j                  | j                  || j                               |r!|j                  | j                  |d             |r!|j                  | j                  |d             |	%|j                  t	        |	| j                               |
r%|j                  t	        |
| j
                               |r|j                  t	        |d             | j                  |||||||       y)u	  
        Save the LoRA parameters corresponding to the UNet and text encoder.

        Arguments:
            save_directory (`str` or `os.PathLike`):
                Directory to save LoRA parameters to. Will be created if it doesn't exist.
            unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `unet`.
            text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
                encoder LoRA state dict because it comes from 🤗 Transformers.
            text_encoder_2_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `text_encoder_2`. Must explicitly pass the text
                encoder LoRA state dict because it comes from 🤗 Transformers.
            is_main_process (`bool`, *optional*, defaults to `True`):
                Whether the process calling this is the main process or not. Useful during distributed training and you
                need to call this function on all processes. In this case, set `is_main_process=True` only on the main
                process to avoid race conditions.
            save_function (`Callable`):
                The function to use to save the state dictionary. Useful during distributed training when you need to
                replace `torch.save` with another method. Can be configured with the environment variable
                `DIFFUSERS_SAVE_MODE`.
            safe_serialization (`bool`, *optional*, defaults to `True`):
                Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
            unet_lora_adapter_metadata:
                LoRA adapter metadata associated with the unet to be serialized with the state dict.
            text_encoder_lora_adapter_metadata:
                LoRA adapter metadata associated with the text encoder to be serialized with the state dict.
            text_encoder_2_lora_adapter_metadata:
                LoRA adapter metadata associated with the second text encoder to be serialized with the state dict.
        zkYou must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers`, `text_encoder_2_lora_layers`.r,   r   Nr   )rA   r   r   rx   r   rz   r   )r   r   r   r   r  r   r   r   r   r   r   $text_encoder_2_lora_adapter_metadatar~   r   s                 rQ   r   z2StableDiffusionXLLoraLoaderMixin.save_lora_weights  s   \ 
 " $<@Z}  c../?OP#c../GXY%c../IK[\]%1!(()?@Z\_\i\i)jk-!((&'I3K`K`a 0!((&'KM]^ 	!)+#'1"7 	 	
rS   r   rm   r   r   c                 .    t        |   d||||d| yr   r   r   s         rQ   r   z*StableDiffusionXLLoraLoaderMixin.fuse_lora  r   rS   c                 (    t        |   dd|i| yr   r   r   s      rQ   r   z,StableDiffusionXLLoraLoaderMixin.unfuse_loraF  r   rS   r   r   r   
NNNTNNTNNNr   r   s   @rQ   r   r   d  sl    HI)
 '+	b
/4S$sELL?P:Q5Q/Rb
 smb
 	b
H p/4S$sELL?P:Q5Q/Rp  pd  :
 :
 :
x  7
 7
 7
r  MQTXVZ $"&#'#'+/-1S
c2;;./S
 sE%((//5<<*G$HHIS
 #'sE%((//5<<2O,P'P"Q	S

 %)eEHHOOU\\4Q.R)R$SS
 S
 S
  S
 !S
 S
n !K!-1-
I-
 -
 	-

  S	*-
^ 3] =d3i = =rS   r   c                       e Zd ZdZg dZeZeZe	e
deeeeej                  f   f   fd              Z	 	 ddeeeeej                  f   f   defdZe		 	 	 	 	 ddefd	       Ze		 	 	 	 	 	 	 ddefd       Ze		 	 	 	 	 	 	 	 	 	 ddeeej,                  f   deeeej.                  j0                  ej                  f   f   deeeej.                  j0                  ej                  f   f   deeeej.                  j0                  ej                  f   f   dedededefd       Zg dd
ddfdee   dededeee      f fdZg dfdee   f fdZ xZ S )SD3LoraLoaderMixina_  
    Load LoRA layers into [`SD3Transformer2DModel`],
    [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), and
    [`CLIPTextModelWithProjection`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection).

    Specific to [`StableDiffusion3Pipeline`].
    )r.   r,   r   rV   c                    |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }d}|d}d}ddd}t        ||
||||||||	||      \  }}t        d |D              }|r?d}t        j	                  |       |j                         D ci c]  \  }}d|vs|| }}}|r||f}|S |}|S c c}}w )d
  
        Return state dict for lora weights and the network alphas.

        <Tip warning={true}>

        We support loading A1111 formatted LoRA checkpoints in a limited capacity.

        This function is experimental and might change in the future.

        </Tip>

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                Can be either:

                    - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
                      the Hub.
                    - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
                      with [`ModelMixin.save_pretrained`].
                    - A [torch state
                      dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).

            cache_dir (`Union[str, os.PathLike]`, *optional*):
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.

            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
            token (`str` or *bool*, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
            subfolder (`str`, *optional*, defaults to `""`):
                The subfolder location of a model file within a larger model repository on the Hub or locally.
            return_lora_metadata (`bool`, *optional*, defaults to False):
                When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.

        r   Nr   Fr   r   r   r   r   r   r   r]   Tr   r   r   r   c              3   $   K   | ]  }d |v  
 ywr   ra   r   s     rQ   re   z5SD3LoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  r   rg   r   r   rn   r   r   r   r   r   r   rV   r}   r   r   r   r   r   r   r   r   r   r]   r   r   r~   rk   r   r   r   r   r   s                         rQ   rs   z"SD3LoraLoaderMixin.lora_state_dicth  sx   p JJ{D1	$4e<**Y-!::&8$?

7D)::j$/JJ{D1	jj5 **%6=%zz*@%H""OL#7iP
02W#+-)!% 

H !$#Jz#J J  {HNN8$+5+;+;+=W41aUVAV!Q$WJW(<z8$
 CM
 X   D;'D;NFrX   c                    t         st        d      |j                  dt              }|rt	        dd      rt        d      t        |t              r|j                         }d|d<    | j                  |fi |\  }}t        d |j                         D              }|st        d	      | j                  |t        | d
      st        | | j                        n| j                  ||| ||       | j!                  |d| j"                  | j$                  | j&                  ||| ||
       | j!                  |d| j(                  | j$                   d| j&                  ||| ||
       y)aU  
        Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
        `self.text_encoder`.

        All kwargs are forwarded to `self.lora_state_dict`.

        See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is
        loaded.

        See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
        dict is loaded into `self.transformer`.

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            kwargs (`dict`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
        rZ   r[   <0.13.0r\   Tr]   c              3   $   K   | ]  }d |v  
 ywr_   ra   rb   s     rQ   re   z7SD3LoraLoaderMixin.load_lora_weights.<locals>.<genexpr>  rf   rg   rh   r.   r.   rW   rk   rl   r[   rX   Nr   r   )r
   rA   rn   ro   r   rp   rq   rr   rs   rt   ru   load_lora_into_transformerrD   rw   transformer_namer.   ry   r,   rz   rm   r   	r|   rV   rW   rX   r}   r[   r~   rk   r   s	            rQ   r   z$SD3LoraLoaderMixin.load_lora_weights  s   B  HII"JJ':<[\h!? D 
 ;TB4Y4^4^4`1 *.%&3t334Yd]cd
HK9JKK 788''DKDR_D`d&;&;<fjfvfv%/ 	( 	
 	((**))%/ 	) 	
 	((,,,,-R0%/ 	) 	
rS   c           	          |rt        dd      rt        d      t        j                  d| j                   d       |j                  |d|||||       y)a  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            transformer (`SD3Transformer2DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        r  r  r\   r   r   Nrj   rW   rk   rl   r[   rX   r   rA   r   r   r  r   r   r~   r.   rW   rl   r[   rX   rk   s           rQ   r  z-SD3LoraLoaderMixin.load_lora_into_transformer"  sj    B h!? D 
 	hs334A67%%%/ 	& 	
rS   r   c                 D    t        |||||| j                  ||
|||	       yr   r   r   s              rQ   ry   z.SD3LoraLoaderMixin.load_lora_into_text_encoderT  r  rS   r   transformer_lora_layersr   r  r   r   r   r   c           	         i }i }|s|s|st        d      |r+|j                  | j                  || j                               |r!|j                  | j                  |d             |r!|j                  | j                  |d             |	%|j                  t	        |	| j                               |
r%|j                  t	        |
| j
                               |r|j                  t	        |d             | j                  |||||||       y)u"	  
        Save the LoRA parameters corresponding to the UNet and text encoder.

        Arguments:
            save_directory (`str` or `os.PathLike`):
                Directory to save LoRA parameters to. Will be created if it doesn't exist.
            transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `transformer`.
            text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
                encoder LoRA state dict because it comes from 🤗 Transformers.
            text_encoder_2_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `text_encoder_2`. Must explicitly pass the text
                encoder LoRA state dict because it comes from 🤗 Transformers.
            is_main_process (`bool`, *optional*, defaults to `True`):
                Whether the process calling this is the main process or not. Useful during distributed training and you
                need to call this function on all processes. In this case, set `is_main_process=True` only on the main
                process to avoid race conditions.
            save_function (`Callable`):
                The function to use to save the state dictionary. Useful during distributed training when you need to
                replace `torch.save` with another method. Can be configured with the environment variable
                `DIFFUSERS_SAVE_MODE`.
            safe_serialization (`bool`, *optional*, defaults to `True`):
                Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
            transformer_lora_adapter_metadata:
                LoRA adapter metadata associated with the transformer to be serialized with the state dict.
            text_encoder_lora_adapter_metadata:
                LoRA adapter metadata associated with the text encoder to be serialized with the state dict.
            text_encoder_2_lora_adapter_metadata:
                LoRA adapter metadata associated with the second text encoder to be serialized with the state dict.
        zrYou must pass at least one of `transformer_lora_layers`, `text_encoder_lora_layers`, `text_encoder_2_lora_layers`.r,   r   Nr   )rA   r   r   r  r   rz   r   )r   r   r   r   r  r   r   r   r   !transformer_lora_adapter_metadatar   r  r~   r   s                 rQ   r   z$SD3LoraLoaderMixin.save_lora_weights  s$   ^ 
 "'+CGa E  #c../FH\H\]^#c../GXY%c../IK[\],8!((&'H#J^J^_ .!((&'I3K`K`a 0!((&'KM]^ 	!)+#'1"7 	 	
rS   r   rm   r   r   c                 .    t        |   d||||d| yr   r   r   s         rQ   r   zSD3LoraLoaderMixin.fuse_lora  r   rS   c                 (    t        |   dd|i| y)a  
        Reverses the effect of
        [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).

        <Tip warning={true}>

        This is an experimental API.

        </Tip>

        Args:
            components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
            unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
            unfuse_text_encoder (`bool`, defaults to `True`):
                Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
                LoRA parameters then it won't have any effect.
        r   Nra   r   r   s      rQ   r   zSD3LoraLoaderMixin.unfuse_lora  r   rS   r   r   r   r
  )!r@   r   r   r   r   TRANSFORMER_NAMEr  r   rz   r   r   r   r   r   rE   r   rs   r   r   r  ry   r   r   r   r   r   r   r   r   r   r   r   r   r   s   @rQ   r  r  [  s\    O')^/4S$sELL?P:Q5Q/R^  ^F 	V
/4S$sELL?P:Q5Q/RV
 	V
p 
 /
 /
 /
b  7
 7
 7
r 
 TXTXVZ $"&#'*.+/-1U
c2;;./U
 "&c5%,,1N+O&O!PU
 #'sE%((//5<<2O,P'P"Q	U

 %)eEHHOOU\\4Q.R)R$SU
 U
 U
  U
 !U
 U
t !R!-1-
I-
 -
 	-

  S	*-
` 3d =d3i = =rS   r  c                       e Zd ZdZdgZeZeede	e
ee
ej                  f   f   fd              Z	 	 dde	e
ee
ej                  f   f   dee
   defd	Ze	 	 	 	 	 ddefd
       Ze	 	 	 	 	 	 dde	e
ej(                  f   dee
e	ej*                  j,                  ej                  f   f   dede
dededee   fd       Zdgdddfdee
   dededeee
      f fdZddgfdee
   f fdZ xZS )AuraFlowLoraLoaderMixinz`
    Load LoRA layers into [`AuraFlowTransformer2DModel`] Specific to [`AuraFlowPipeline`].
    r.   rV   c                    |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }d}|d}d}ddd}t        ||
||||||||	||      \  }}t        d |D              }|r?d}t        j	                  |       |j                         D ci c]  \  }}d|vs|| }}}|r||f}|S |}|S c c}}w )r  r   Nr   Fr   r   r   r   r   r   r   r]   Tr   r   r   r   c              3   $   K   | ]  }d |v  
 ywr   ra   r   s     rQ   re   z:AuraFlowLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  r   rg   r   r   r  r  s                         rQ   rs   z'AuraFlowLoraLoaderMixin.lora_state_dict6  x   r JJ{D1	$4e<**Y-!::&8$?

7D)::j$/JJ{D1	jj5 **%6=%zz*@%H""OL#7iP
02W#+-)!% 

H !$#Jz#J J  {HNN8$+5+;+;+=W41aUVAV!Q$WJW(<z8$
 CM
 Xr  NFrW   rX   c           	         t         st        d      |j                  dt              }|rt	        dd      rt        d      t        |t              r|j                         }d|d<    | j                  |fi |\  }}t        d |j                         D              }|st        d	      | j                  |t        | d
      st        | | j                        n| j                  ||| ||       y)I  
        Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and
        `self.text_encoder`. All kwargs are forwarded to `self.lora_state_dict`. See
        [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.
        See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
        dict is loaded into `self.transformer`.

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            kwargs (`dict`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
        rZ   r[   r  r  r\   Tr]   c              3   $   K   | ]  }d |v  
 ywr_   ra   rb   s     rQ   re   z<AuraFlowLoraLoaderMixin.load_lora_weights.<locals>.<genexpr>  rf   rg   rh   r.   r  Nr
   rA   rn   ro   r   rp   rq   rr   rs   rt   ru   r  rD   rw   r  r.   r  s	            rQ   r   z)AuraFlowLoraLoaderMixin.load_lora_weights      8  HII"JJ':<[\h!? D 
 ;TB4Y4^4^4`1 *.%&3t334Yd]cd
HK9JKK 788''DKDR_D`d&;&;<fjfvfv%/ 	( 	
rS   c           	          |rt        dd      rt        d      t        j                  d| j                   d       |j                  |d|||||       y)a  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            transformer (`AuraFlowTransformer2DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        r  r  r\   r   r   Nr  r  r  s           rQ   r  z2AuraFlowLoraLoaderMixin.load_lora_into_transformer  j    D h!? D 
 	hs334A67%%%/ 	& 	
rS   r   r   r   r   r   r   r"  c           	          i }i }	|st        d      |j                  | j                  || j                               |%|	j                  t	        || j                               | j                  |||||||	       ya  
        Save the LoRA parameters corresponding to the transformer.

        Arguments:
            save_directory (`str` or `os.PathLike`):
                Directory to save LoRA parameters to. Will be created if it doesn't exist.
            transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `transformer`.
            is_main_process (`bool`, *optional*, defaults to `True`):
                Whether the process calling this is the main process or not. Useful during distributed training and you
                need to call this function on all processes. In this case, set `is_main_process=True` only on the main
                process to avoid race conditions.
            save_function (`Callable`):
                The function to use to save the state dictionary. Useful during distributed training when you need to
                replace `torch.save` with another method. Can be configured with the environment variable
                `DIFFUSERS_SAVE_MODE`.
            safe_serialization (`bool`, *optional*, defaults to `True`):
                Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
            transformer_lora_adapter_metadata:
                LoRA adapter metadata associated with the transformer to be serialized with the state dict.
        z(You must pass `transformer_lora_layers`.Nr   rA   r   r   r  r   r   
r   r   r   r   r   r   r   r"  r~   r   s
             rQ   r   z)AuraFlowLoraLoaderMixin.save_lora_weights      B 
 "&GHH#**+BCDXDXYZ,8!((&'H#J^J^_
 	!)+#'1"7 	 	
rS   r   r   rm   r   r   c                 .    t        |   d||||d| yr   r   r   s         rQ   r   z!AuraFlowLoraLoaderMixin.fuse_loraB  r   rS   r,   c                 (    t        |   dd|i| ya  
        Reverses the effect of
        [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).

        <Tip warning={true}>

        This is an experimental API.

        </Tip>

        Args:
            components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
            unfuse_transformer (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
        r   Nra   r   r   s      rQ   r   z#AuraFlowLoraLoaderMixin.unfuse_lorar       	<z<V<rS   r   r   NTNNTNr@   r   r   r   r   r%  r  r   r   r   r   r   rE   r   rs   r   r   r   r  r   r   r   r   r   rq   r   r   r   r   r   r   r   s   @rQ   r'  r'  .  s    ,_'^/4S$sELL?P:Q5Q/R^  ^H '+	9
/4S$sELL?P:Q5Q/R9
 sm9
 	9
v  /
 /
 /
b 
 TX $"&#'<@5
c2;;./5
 "&c5%,,1N+O&O!P5
 	5

 5
  5
 !5
 ,4D>5
 5
t "/!-1-
I-
 -
 	-

  S	*-
` 4A.2Q =d3i = =rS   r'  c                   "    e Zd ZdZddgZeZeZg dZ	e
e	 d+deeeeej                   f   f   defd              Z	 	 d,deeeeej                   f   f   d
ee   defdZe
	 	 	 	 	 d-defd       Ze
	 	 d,deeej                   f   fd       Ze
	 	 	 	 	 	 	 d.defd       Ze
	 	 	 	 	 	 	 	 d/deeej2                  f   deeeej4                  j6                  ej                   f   f   deeej4                  j6                  f   dedededefd       Zdgddd	fdee   dededeee      f fdZ ddgfdee   f fdZ!d+ fd 	Z"e
	 	 	 d0dej4                  j6                  defd!       Z#e
d"        Z$e%	 	 d1d#d$d%d&d'edd(fd)       Z&e%d2d*       Z' xZ(S )3FluxLoraLoaderMixinz
    Load LoRA layers into [`FluxTransformer2DModel`],
    [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel).

    Specific to [`StableDiffusion3Pipeline`].
    r.   r,   )norm_qnorm_knorm_added_qnorm_added_kFrV   return_alphasc                 t   |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }	|j                  d	d      }
|j                  d
d      }|j                  dd      }|j                  dd      }d}|d}d}ddd}t        |||||||||	|
||      \  }}t        d |D              }|r?d}t        j	                  |       |j                         D ci c]  \  }}d|vs|| }}}t        d |D              }|r!t        |      }| j                  ||d||      S t        d |D              }|r!t        |      }| j                  ||d||      S t        d |D              }|r!t        |      }| j                  ||d||      S t        d |D              }|r!t        |      }| j                  ||d||      S t        |j                               }i }|D ]u  }d|v s|j                  |      }t        j                  |      rt        j                   |      st#        |t$              r|j                  |      ||<   ht'        d| d       |s|r| j                  |||||      S |S c c}}w )c
  
        Return state dict for lora weights and the network alphas.

        <Tip warning={true}>

        We support loading A1111 formatted LoRA checkpoints in a limited capacity.

        This function is experimental and might change in the future.

        </Tip>

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                Can be either:

                    - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
                      the Hub.
                    - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
                      with [`ModelMixin.save_pretrained`].
                    - A [torch state
                      dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).

            cache_dir (`Union[str, os.PathLike]`, *optional*):
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.

            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
            token (`str` or *bool*, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
            subfolder (`str`, *optional*, defaults to `""`):
                The subfolder location of a model file within a larger model repository on the Hub or locally.
            return_lora_metadata (`bool`, *optional*, defaults to False):
                When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
        r   Nr   Fr   r   r   r   r   r   r   r]   Tr   r   r   r   c              3   $   K   | ]  }d |v  
 ywr   ra   r   s     rQ   re   z6FluxLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  r   rg   r   r   c              3   $   K   | ]  }d |v  
 yw)z.lora_down.weightNra   r   s     rQ   re   z6FluxLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  s     DA*a/Drg   )rk   alphasrC  return_metadatac              3   $   K   | ]  }d |v  
 yw)	processorNra   r   s     rQ   re   z6FluxLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  s     <A{a'<rg   c              3   $   K   | ]  }d |v  
 yw)zquery_norm.scaleNra   r   s     rQ   re   z6FluxLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  s     I/14Irg   c              3   $   K   | ]  }d |v  
 yw)
base_modelNra   r   s     rQ   re   z6FluxLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  s     C1\Q.Crg   alphazThe alpha key (zU) seems to be incorrect. If you think this error is unexpected, please open as issue.)rn   r   r   r   r   r   r   _prepare_outputsr'   r   r   listru   getrE   	is_tensoris_floating_pointrp   r   rA   )r   rV   rC  r}   r   r   r   r   r   r   r   r   r   r]   r   r   r~   rk   r   r   r   r   is_kohyais_xlabsis_bfl_controlis_fal_kontextru   rj   alpha_values                                rQ   rs   z#FluxLoraLoaderMixin.lora_state_dict  sA   p JJ{D1	$4e<**Y-!::&8$?

7D)::j$/JJ{D1	jj5 **%6=%zz*@%H""OL#7iP
02W#+-)!% 

H !$#Jz#J J  {HNN8$+5+;+;+=W41aUVAV!Q$WJW DDD>zJJ''!+ 4 (   <<<>zJJ''!+ 4 (   IjIIDZPJ''!+ 4 (   C
CC?
KJ''!+ 4 (   JOO%& 
	A!|(nnQ/OOK0U5L5L[5Y^h_ )3q(9N1%$)!  -B  C 
	 0''!%+ 4 (   U Xs   J4'J4NrW   rX   c                     t         st        d      |j                  dt              }|rt	        dd      st        d      t        |t              r|j                         }d|d<     j                  |fddi|\  }}}t        d	 |j                         D              }	t         fd
|j                         D              }
|	s|
st        d      t        |j                               D ci c]6  }|j                   j                   d      rd|v r||j                  |      8 }}t        |j                               D ci c]P  j                   j                   d      r0t        fd j                  D              r|j                        R }}t!         d      st#          j                        n j$                  }d}t'        |      dkD  r j)                  |||      }|rt*        j-                  d       t'        |      dkD  r0 j/                  ||      }|D ]  |j1                  |   i         j3                  ||||| ||       t'        |      dkD  r j5                  ||d      |_         j9                  || j:                   j<                   j>                  || ||
       yc c}w c c}w )a]  
        Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.transformer` and
        `self.text_encoder`.

        All kwargs are forwarded to `self.lora_state_dict`.

        See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`] for more details on how the state dict is
        loaded.

        See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_transformer`] for more details on how the state
        dict is loaded into `self.transformer`.

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                `Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            kwargs (`dict`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`].
        rZ   r[   r)   r*   r\   Tr]   rC  c              3   $   K   | ]  }d |v  
 ywr_   ra   rb   s     rQ   re   z8FluxLoraLoaderMixin.load_lora_weights.<locals>.<genexpr>m  s     GcFcMGrg   c              3   H   K   | ]  }j                   D ]  }||v  
  y wN!_control_lora_supported_norm_keys)rc   rd   norm_keyr|   s      rQ   re   z8FluxLoraLoaderMixin.load_lora_weights.<locals>.<genexpr>p  s2      
 #IoIo
=EHO

   "rh   r   r`   c              3   &   K   | ]  }|v  
 y wr]  ra   )rc   r`  r   s     rQ   re   z8FluxLoraLoaderMixin.load_lora_weights.<locals>.<genexpr>  s     YhHMYs   r.   Fr   a  The LoRA weights contain parameters that have different shapes that expected by the transformer. As a result, the state_dict of the transformer has been expanded to match the LoRA parameter shapes. To get a comprehensive list of parameter names that were modified, enable debug logging.)r.   rs   )rj   r.   rW   rk   rl   r[   rX   )r.   discard_original_layersr   N) r
   rA   rn   ro   r   rp   rq   rr   rs   r   ru   rQ  r   r  rR  r_  rD   rw   r.   len/_maybe_expand_transformer_param_shape_or_error_r   r   _maybe_expand_lora_state_dictr   r  _load_norm_into_transformer_transformer_norm_layersry   r,   rz   rm   )r|   rV   rW   rX   r}   r[   r~   rj   rk   has_lora_keyshas_norm_keysr   transformer_lora_state_dicttransformer_norm_state_dictr.   has_param_with_expanded_shapes   `          `    rQ   r   z%FluxLoraLoaderMixin.load_lora_weights9  s   B  HII"JJ':<[\_T8%D D 
 ;TB4Y4^4^4`1 *.%&/Ct/C/C10
AE0
IO0
,
NH GZ__5FGG  
'1'8
 
 788 *//+,'
||t445Q78Vq[ z~~a  '
# '
 *//+,'
||t445Q78Y$2X2XYY z~~a  '
# '
 CJ$P]B^gdD$9$9:dhdtdt(-%*+a/,0,`,`8:U-) )KKk
 *+a/*.*L*L'9T +M +' 1 G!!1&A!&D"EFG 	'')#%/ 	( 		
 *+a/373S3S+'(- 4T 4K0 	(()**))%/ 	) 	
e'

'
s   -;J=AKc	           	          |rt        dd      st        d      t        j                  d| j                   d       |j                  |||||||       y)a  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            network_alphas (`Dict[str, float]`):
                The value of the network alpha used for stable learning and preventing underflow. This value has the
                same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this
                link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning).
            transformer (`FluxTransformer2DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        r)   r*   r\   r   r   r  Nr  	r   r~   rj   r.   rW   rk   rl   r[   rX   s	            rQ   r  z.FluxLoraLoaderMixin.load_lora_into_transformer  sj    L _T8%D D 
 	hs334A67%%)%/ 	& 	
rS   returnc                     |xs  j                   }t        |j                               D ]@  }|j                  d      d   |k(  s|j	                  |      ||j                  | d      <   B |j                         }t        |j                               }t        |j                               }t        ||z
        }	|	rt        j                  d|	 d       |	D ]  }|j	                  |        i }
|s+|j                         D ]  }||   j                         |
|<    t        j                  d       |j                  |d      }t        |dd       }|r#t         fd|D              rt        d	| d
      |
S )Nr   r   zUnsupported keys found in state dict when trying to load normalization layers into the transformer. The following keys will be ignored:
aW  The provided state dict contains normalization layers in addition to LoRA layers. The normalization layers will directly update the state_dict of the transformer as opposed to the LoRA layers that will co-exist separately until the "fuse_lora()" method is called. That is to say, the normalization layers will always be directly fused into the transformer and can only be unfused if `discard_original_layers=True` is passed. This might also have implications when dealing with multiple LoRAs. If you notice something unexpected, please open an issue: https://github.com/huggingface/diffusers/issues.Fstrictunexpected_keysc              3   H   K   | ]  }j                   D ]  }||v  
  y wr]  r^  )rc   r   r`  r   s      rQ   re   zBFluxLoraLoaderMixin._load_norm_into_transformer.<locals>.<genexpr>	  s'     pQ#JoJoph8q=p=pra  zFound zJ as unexpected keys while trying to load norm layers into the transformer.)r  rQ  ru   splitrn   removeprefixr~   setr   r   cloner   load_state_dictrw   r   rA   )r   r~   r.   r   rc  rd   transformer_state_dicttransformer_keysstate_dict_keys
extra_keysoverwritten_layers_state_dictincompatible_keysrt  s   `            rQ   rg  z/FluxLoraLoaderMixin._load_norm_into_transformer  s    /3//
)* 	QCyy~a F*=G^^C=P
3++vhaL9:	Q
 "-!7!7!95::<=joo/0/,<<=
NN \  ]g  \h  hi  j  	 CNN3	  )+%&!( Y5KC5P5V5V5X-c2Y 	y	
 (77
57Q!"35FM p/pp _--wx  -,rS   r   c                 D    t        |||||| j                  ||
|||	       yr   r   r   s              rQ   ry   z/FluxLoraLoaderMixin.load_lora_into_text_encoder#	  r  rS   r   r   r   r   r   r   r   c
           	         i }
i }|s|st        d      |r+|
j                  | j                  || j                               |r+|
j                  | j                  || j                               |r%|j                  t        || j                               |	r%|j                  t        |	| j                               | j                  |
||||||       y)uT  
        Save the LoRA parameters corresponding to the UNet and text encoder.

        Arguments:
            save_directory (`str` or `os.PathLike`):
                Directory to save LoRA parameters to. Will be created if it doesn't exist.
            transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `transformer`.
            text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
                encoder LoRA state dict because it comes from 🤗 Transformers.
            is_main_process (`bool`, *optional*, defaults to `True`):
                Whether the process calling this is the main process or not. Useful during distributed training and you
                need to call this function on all processes. In this case, set `is_main_process=True` only on the main
                process to avoid race conditions.
            save_function (`Callable`):
                The function to use to save the state dictionary. Useful during distributed training when you need to
                replace `torch.save` with another method. Can be configured with the environment variable
                `DIFFUSERS_SAVE_MODE`.
            safe_serialization (`bool`, *optional*, defaults to `True`):
                Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
            transformer_lora_adapter_metadata:
                LoRA adapter metadata associated with the transformer to be serialized with the state dict.
            text_encoder_lora_adapter_metadata:
                LoRA adapter metadata associated with the text encoder to be serialized with the state dict.
        zWYou must pass at least one of `transformer_lora_layers` and `text_encoder_lora_layers`.r   N)rA   r   r   r  rz   r   r   )r   r   r   r   r   r   r   r   r"  r   r~   r   s               rQ   r   z%FluxLoraLoaderMixin.save_lora_weights^	  s    P 
 "'+Cvww"c../FH\H\]^#c../GI^I^_`,!((&'H#J^J^_ .!((&'I3K`K`a
 	!)+#'1"7 	 	
rS   r   rm   r   r   c                 L   t        | d      st        | | j                        n| j                  }t        |d      rUt	        |j
                  t              r;t        |j
                  j                               dkD  rt        j                  d       t        | 0  d||||d| y)r   r.   rh  r   a  The provided state dict contains normalization layers in addition to LoRA layers. The normalization layers will be directly updated the state_dict of the transformer as opposed to the LoRA layers that will co-exist separately until the 'fuse_lora()' method is called. That is to say, the normalization layers will always be directly fused into the transformer and can only be unfused if `discard_original_layers=True` is passed.r   Nra   )rD   rw   r  r.   rp   rh  rq   rd  ru   r   r   r   r   )r|   r   rm   r   r   r}   r.   r?   s          rQ   r   zFluxLoraLoaderMixin.fuse_lora	  s    P CJ$P]B^gdD$9$9:dhdtdtK!;<;??FK88==?@1DKKr 	 	
!!#'		

 	
rS   c                     t        | d      st        | | j                        n| j                  }t        |d      r)|j                  r|j                  |j                  d       t        |   dd|i| y)al  
        Reverses the effect of
        [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).

        <Tip warning={true}>

        This is an experimental API.

        </Tip>

        Args:
            components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
        r.   rh  Frr  r   Nra   )rD   rw   r  r.   rh  rz  r   r   )r|   r   r}   r.   r?   s       rQ   r   zFluxLoraLoaderMixin.unfuse_lora	  sn     CJ$P]B^gdD$9$9:dhdtdt; :;@d@d''(L(LUZ'[<z<V<rS   c           
      v   t         |           t        | d      st        | | j                        n| j
                  }t        |d      r0|j                  r$|j                  |j                  d       d|_        |r1t        |dd      "|j                  }t               }|D ]5  }|j                  d      s|j                  |j                  dd             7 |j                         D ]  \  }}t        |t        j                   j"                        s,||v s1|j$                  j&                  }|j(                  |j(                  j&                  nd}	|	du}
|j+                  d	      \  }}}|j-                  |      }|| d   }|j.                  d
   |j.                  d   }}t        j0                  d      5  t        j                   j#                  |||
|j2                        }ddd       d|i}|	|j5                  d|| d   i       j                  |dd       t7        |||       ~|t8        v sQt8        |   }t;        |j.                  d
         }t        |j<                  |      }t7        |j<                  ||       t>        jA                  d| d| d| d	        yyy# 1 sw Y   xY w)a  
        Unloads the LoRA parameters.

        Args:
            reset_to_overwritten_params (`bool`, defaults to `False`): Whether to reset the LoRA-loaded modules
                to their original params. Refer to the [Flux
                documentation](https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux) to learn more.

        Examples:

        ```python
        >>> # Assuming `pipeline` is already loaded with the LoRA parameters.
        >>> pipeline.unload_lora_weights()
        >>> ...
        ```
        r.   rh  Frr  N_overwritten_params.weight r   r   r   metabiasr;   r>   r  .biasT)assignrs  Set the  attribute of the model to  from )!r   unload_lora_weightsrD   rw   r  r.   rh  rz  r  rx  endswithaddreplacenamed_modulesrp   rE   r   Linearr>   rI   r  
rpartitionget_submoduleshaperB   r;   r   setattr"_MODULE_NAME_TO_ATTRIBUTE_MAP_FLUXintr   r   r   )r|   reset_to_overwritten_paramsr.   overwritten_paramsmodule_names
param_namenamerK   rP   module_biasr  parent_module_name_current_module_nameparent_modulecurrent_param_weightin_featuresout_featuresoriginal_moduletmp_state_dictattribute_name	new_value	old_valuer?   s                          rQ   r  z'FluxLoraLoaderMixin.unload_lora_weights	  s   " 	#%BI$P]B^gdD$9$9:dhdtdt; :;@d@d''(L(LUZ'[37K0&7;@UW[+\+h!,!@!@5L0 H
&&y1 $$Z%7%7	2%FGH !, 9 9 ; "ffehhoo64<;O$*MM$6$6M6<kk6M&++"2"2SWK&d2DAEQTAU>&+>$/$=$=>P$QM+=g>N+O(0D0J0J10MOcOiOijkOlKf- */((//'(!%"/"5"5	 +: + '/0D%EN".&--v7ITFRW.7Y.Z[#33N4X\3]M+>P&*.PP)KL_)`$'(<(B(B1(E$F	$+K,>,>$O	 2 2NIN&~&66QR[Q\\bclbmmnoA" ,i&& s   ;.J//J8	c           
      p   i }||j                  |       ||j                  |       |xs | j                  }t        |j                               D ]@  }|j	                  d      d   |k(  s|j                  |      ||j                  | d      <   B d}i }t        |dd      du}	t        |d      }
|j                         D ]  \  }}t        |t        j                  j                        s,|j                  j                  }|j                   |j                   j                  nd}|du}|	r|j#                  dd      n|}| d	}| d
}||vr||   j$                  d   }||   j$                  d   }| j'                  ||      }t)        |      ||fk(  r|\  }}d}||kD  r|d| d| d| z  }||kD  r|d| d| dz  }n|dz  }|rt*        j-                  |       ||kD  s||kD  s+d}|j/                  d      \  }}}|j1                  |      }|
rt3        ||      }t        j4                  d      5  t        j                  j                  ||||j6                        }ddd       t        j8                  j                  j                  |j4                  |j6                        }t)        d |D              }|||<   d|i} ||| d<   |j;                  | dd       t=        |||       ~ |t>        v rt>        |   }!tA        |j                  j                  j$                  d         }"t        |jB                  |!      }#t=        |jB                  |!|"       t*        jE                  d|! d|" d|# d       ||| d<   |||| d<    tG        |      dkD  r||_$        |S # 1 sw Y   ExY w)z
        Control LoRA expands the shape of the input layer from (3072, 64) to (3072, 128). This method handles that and
        generalizes things a bit so that any parameter that needs expansion receives appropriate treatment.
        Nr   r   Fpeft_confighf_quantizerz.base_layerr  .lora_A.weight.lora_B.weightr   )rJ   base_modulez:Expanding the nn.Linear input/output features for module="z" because the provided LoRA checkpoint contains higher number of features than expected. The number of input_features will be expanded from z to z:, and the number of output features will be expanded from Tr  r  )rB   r;   c              3   4   K   | ]  }t        d |        yw)r   N)slice)rc   dims     rQ   re   zVFluxLoraLoaderMixin._maybe_expand_transformer_param_shape_or_error_.<locals>.<genexpr>
  s     "PS5C="Ps   r>   r  )rs  r  r  r  r  r  r  )%r   r  rQ  ru   rv  rn   rw  rw   rD   r  rp   rE   r   r  r>   rI   r  r  r  _calculate_module_shapetupler   debugr  r  rR   rB   r;   
zeros_likerz  r  r  r  r   r   rd  r  )$r   r.   rs   norm_state_dictr   r~   rd   has_param_with_shape_updater  is_peft_loadedis_quantizedr  rK   rP   r  r  lora_base_namelora_A_weight_namelora_B_weight_namer  r  module_weight_shapemodule_out_featuresmodule_in_featuresdebug_messager  r  r  r  expanded_module
new_weightslicesr  r  r  r  s$                                       rQ   re  zCFluxLoraLoaderMixin._maybe_expand_transformer_param_shape_or_error_<
  sS    
&o.&o. /3//
)* 	QCyy~a F*=G^^C=P
3++vhaL9:	Q
 ',# mTB$N{N;'557 W	XLD&&%((//2 & 2 228++2Ifkk..t"$.DRmR!@X\(6'7~%F"(6'7~%F"%Z7();<BB1E)*<=CCAF
 '*&A&Aag&A&h# ,-,1LL:M7#%7 "!33!TUYTZ [));(<DOM
  "55!))<(=T,qRM
 "S(M LL/"55GY9Y26/AEQTAU>&+>$/$=$=>P$QM#(RS^`f(g f- */((//'DH[H[ +: + "'!1!1'..33M<P<PXeXkXk"J #"P<O"PPF)6Jv&&.
%;N".1<v.#33N4X\3]M+>P&*.PP)KL_)`$'(>(>(C(C(I(I!(L$M	$+K,>,>$O	 2 2NIN&~&66QR[Q\\bclbmmno KX&*=)>g'FG".LW*.A-B%+HIoW	Xr !"Q&.@K+**S s   :.N++N5	c                    t               }|j                         }| j                   d}|D cg c]#  }|j                  d      s|d t	        d        % }}|D cg c]"  }|j                  |      s|t	        |      d  $ }}t        t        |            }t        |j                         D 	ch c]  \  }}	|	 c}	}      }
t        |      t        |
      z
  }|rt        j                  d| d       |D ]4  }||v r	|j                  |d       d|v r|j                  |d       dn|j                  |d       d}||   }|| | d   }| j                  ||      }|d	   |j                  d	   kD  r|j                  d
   |j                  d	   f}t        j                  ||j                        }|d d d |j                  d	   f   j!                  |       ||| | d<   |j#                  |       |d	   |j                  d	   k  st%        d| d|j                   d       |r&t        j'                  d| j                   d| d       |S c c}w c c}w c c}	}w )Nr   r  zFound unexpected modules: z. These will be ignored.r  z.base_layer.weightr  )rJ   base_weight_param_namer   r   rB   zThis LoRA param (z*.lora_A.weight) has an incompatible shape zk. Please open an issue to file for a feature request - https://github.com/huggingface/diffusers/issues/new.zGThe following LoRA modules were zero padded to match the state dict of z: zn. Please open an issue if you think this was unexpected - https://github.com/huggingface/diffusers/issues/new.)rx  r~   r  r  rd  r   sortedr  r   r  r  r  r  rE   zerosrB   copy_r  NotImplementedErrorr   )r   r.   rs   expanded_module_namesr{  r   rd   lora_module_namesr  r  transformer_module_namesunexpected_modulesr   base_param_namebase_weight_paramlora_A_parambase_module_shaper  expanded_state_dict_weights                      rQ   rf  z1FluxLoraLoaderMixin._maybe_expand_lora_state_dict
  s    #!,!7!7!9(()+ 6E
.1UeHfC(3'(()
 
 >OjTRVRaRabhRiT#f+-0jj"3'8#9:#){?X?X?Z*[GD!4*[#\  !23c:R6SSLL56H5IIabc" 	A&& ii+,,>?CYY 99VR());<		&"-.g6 
 !7 G*fXaS+GHL !$ ; ;+fu ; v #l&8&8&;;%++A.0A0G0G0JK-2[[GXG_G_-`**1.E0B0B10E.E+EFLL\Z@Z6(1#^ <=%))!,"1%(:(:1(==)'s*TUaUgUgTh  iT  U -	4 !KKYZ]ZnZnYooq  sH  rI  Iw  x S
 k*[s   III 3I 1I%
rJ   ztorch.nn.Moduler  ztorch.nn.Linearr  z
torch.Sizec                    dt         j                  fd}| ||j                        S |S|j                  d      st	        d|d      |j                  dd      d   }t        | |      } ||j                        S t	        d      )	Nr>   c                     | j                   j                  dk(  r| j                  j                  S | j                   j                  dk(  r| j                  S | j                  S )Nr4   r6   )r?   r@   rH   r  quant_shape)r>   s    rQ   _get_weight_shapezFFluxLoraLoaderMixin._calculate_module_shape.<locals>._get_weight_shape
  sR    ((L8))///!!**o=)))||#rS   r  zaInvalid `base_weight_param_name` passed as it does not end with '.weight' base_weight_param_name=r   r   r   zBEither `base_module` or `base_weight_param_name` must be provided.)rE   r   r>   r  rA   rsplitr   )rJ   r  r  r  module_path	submodules         rQ   r  z+FluxLoraLoaderMixin._calculate_module_shape
  s    	$ell 	$ "$[%7%788#/)229= xaw`yyz{  177	1EaHK-e[AI$Y%5%566]^^rS   c                 v    | g}|r|j                  |       |r|j                  |       |s|rt        |      S | S r]  )appendr  )r~   rk   rH  rC  rI  outputss         rQ   rP  z$FluxLoraLoaderMixin._prepare_outputs  s:    ,NN6"NN8$"/?uW~SSrS   )Fr   NNNFFr   r   )NNN)NN)NFF))r@   r   r   r   r   r%  r  r   rz   r_  r   r   r   r   r   rE   r   r   rs   r   r   r  rg  ry   r   r   r   r   r   r   r   r   r   r   r  re  rf  staticmethodr  rP  r   r   s   @rQ   r>  r>    s2    ,^<')(\% $d/4S$sELL?P:Q5Q/Rd d  dR '+	{
/4S$sELL?P:Q5Q/R{
 sm{
 	{
z  4
 4
 4
l 
  %3- 
c5<<	 3- 3-j  7
 7
 7
r 
 TX?C $"&#'*.+/E
c2;;./E
 "&c5%,,1N+O&O!PE
 #'sEHHOO';"<	E

 E
 E
  E
 !E
 E
R "/!-1:
I:
 :
 	:

  S	*:
x 4A.2Q =d3i =*BH  y+XX__y+ 
y+ y+v . .`  *.&*_ _&_ !$_ 
	_ _4 T TrS   r>  c                   "   e Zd ZddgZeZeZe	 	 	 	 	 dde	fd       Z
e	 	 	 	 	 	 	 dde	fd       Ze	 	 	 	 	 	 ddeeej                  f   deeej$                  j&                  f   d	eeej$                  j&                  f   d
e	dedede	fd       Zy)AmusedLoraLoaderMixinr.   r,   NrX   c	           	          |rt        dd      st        d      t        j                  d| j                   d       |j                  |||||||       y)a  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            network_alphas (`Dict[str, float]`):
                The value of the network alpha used for stable learning and preventing underflow. This value has the
                same meaning as the `--network_alpha` option in the kohya-ss trainer script. Refer to [this
                link](https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning).
            transformer (`UVit2DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        r)   r*   r\   r   r   r  Nr  ro  s	            rQ   r  z0AmusedLoraLoaderMixin.load_lora_into_transformer  sj    N _T8%D D 
 	hs334A67%%)%/ 	& 	
rS   c                 D    t        |||||| j                  ||
|||	       yr   r   r   s              rQ   ry   z1AmusedLoraLoaderMixin.load_lora_into_text_encoderM  r  rS   r   r   r   r   r   r   r   c                    i }|s|st        d      |r+|j                  | j                  || j                               |r+|j                  | j                  || j                               | j                  ||||||       y)u  
        Save the LoRA parameters corresponding to the UNet and text encoder.

        Arguments:
            save_directory (`str` or `os.PathLike`):
                Directory to save LoRA parameters to. Will be created if it doesn't exist.
            unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `unet`.
            text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
                encoder LoRA state dict because it comes from 🤗 Transformers.
            is_main_process (`bool`, *optional*, defaults to `True`):
                Whether the process calling this is the main process or not. Useful during distributed training and you
                need to call this function on all processes. In this case, set `is_main_process=True` only on the main
                process to avoid race conditions.
            save_function (`Callable`):
                The function to use to save the state dictionary. Useful during distributed training when you need to
                replace `torch.save` with another method. Can be configured with the environment variable
                `DIFFUSERS_SAVE_MODE`.
            safe_serialization (`bool`, *optional*, defaults to `True`):
                Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
        zVYou must pass at least one of `transformer_lora_layers` or `text_encoder_lora_layers`.)r~   r   r   r   r   r   N)rA   r   r   r  rz   r   )	r   r   r   r   r   r   r   r   r~   s	            rQ   r   z'AmusedLoraLoaderMixin.save_lora_weights  s    B 
'+Cuvv"c../FH\H\]^#c../GI^I^_` 	!)+#'1 	 	
rS   r  r   )NNTNNT)r@   r   r   r   r%  r  r   rz   r   r   r  ry   r   r   r   r   r   rE   r   r   r   r   ra   rS   rQ   r  r    s   +^<') 4
 4
 4
l  7
 7
 7
r  @D>B $"&#'3
c2;;./3
 #'sEHHOO';"<3
 "&c588??&:!;	3

 3
 3
  3
 !3
 3
rS   r  c                       e Zd ZdZdgZeZeede	e
ee
ej                  f   f   fd              Z	 	 dde	e
ee
ej                  f   f   dee
   defd	Ze	 	 	 	 	 ddefd
       Ze	 	 	 	 	 	 dde	e
ej(                  f   dee
e	ej*                  j,                  ej                  f   f   dede
dededee   fd       Zdgdddfdee
   dededeee
      f fdZdgfdee
   f fdZ xZS )CogVideoXLoraLoaderMixinzc
    Load LoRA layers into [`CogVideoXTransformer3DModel`]. Specific to [`CogVideoXPipeline`].
    r.   rV   c                    |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }d}|d}d}ddd}t        ||
||||||||	||      \  }}t        d |D              }|r?d}t        j	                  |       |j                         D ci c]  \  }}d|vs|| }}}|r||f}|S |}|S c c}}w )r  r   Nr   Fr   r   r   r   r   r   r   r]   Tr   r   r   r   c              3   $   K   | ]  }d |v  
 ywr   ra   r   s     rQ   re   z;CogVideoXLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>!  r   rg   r   r   r  r  s                         rQ   rs   z(CogVideoXLoraLoaderMixin.lora_state_dict  r*  r  NFrW   rX   c           	         t         st        d      |j                  dt              }|rt	        dd      rt        d      t        |t              r|j                         }d|d<    | j                  |fi |\  }}t        d |j                         D              }|st        d	      | j                  |t        | d
      st        | | j                        n| j                  ||| ||       y)r,  rZ   r[   r  r  r\   Tr]   c              3   $   K   | ]  }d |v  
 ywr_   ra   rb   s     rQ   re   z=CogVideoXLoraLoaderMixin.load_lora_weights.<locals>.<genexpr>W  rf   rg   rh   r.   r  Nr.  r  s	            rQ   r   z*CogVideoXLoraLoaderMixin.load_lora_weights*  r/  rS   c           	          |rt        dd      rt        d      t        j                  d| j                   d       |j                  |d|||||       y)a  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            transformer (`CogVideoXTransformer3DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        r  r  r\   r   r   Nr  r  r  s           rQ   r  z3CogVideoXLoraLoaderMixin.load_lora_into_transformere  r1  rS   r   r   r   r   r   r   r"  c           	          i }i }	|st        d      |j                  | j                  || j                               |%|	j                  t	        || j                               | j                  |||||||	       yr3  r4  r5  s
             rQ   r   z*CogVideoXLoraLoaderMixin.save_lora_weights  r6  rS   r   r   rm   r   r   c                 .    t        |   d||||d| yr   r   r   s         rQ   r   z"CogVideoXLoraLoaderMixin.fuse_lora  r   rS   c                 (    t        |   dd|i| yr9  r   r   s      rQ   r   z$CogVideoXLoraLoaderMixin.unfuse_lora   r:  rS   r   r   r;  r<  r   s   @rQ   r  r    s    ,_'^/4S$sELL?P:Q5Q/R^  ^F '+	9
/4S$sELL?P:Q5Q/R9
 sm9
 	9
v  /
 /
 /
b 
 TX $"&#'<@5
c2;;./5
 "&c5%,,1N+O&O!P5
 	5

 5
  5
 !5
 ,4D>5
 5
r "/!-1-
I-
 -
 	-

  S	*-
^ 4A/ =d3i = =rS   r  c                       e Zd ZdZdgZeZeede	e
ee
ej                  f   f   fd              Z	 	 dde	e
ee
ej                  f   f   dee
   defd	Ze	 	 	 	 	 ddefd
       Ze	 	 	 	 	 	 dde	e
ej(                  f   dee
e	ej*                  j,                  ej                  f   f   dede
dededee   fd       Zdgdddfdee
   dededeee
      f fdZdgfdee
   f fdZ xZS )Mochi1LoraLoaderMixinz[
    Load LoRA layers into [`MochiTransformer3DModel`]. Specific to [`MochiPipeline`].
    r.   rV   c                    |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }d}|d}d}ddd}t        ||
||||||||	||      \  }}t        d |D              }|r?d}t        j	                  |       |j                         D ci c]  \  }}d|vs|| }}}|r||f}|S |}|S c c}}w )r  r   Nr   Fr   r   r   r   r   r   r   r]   Tr   r   r   r   c              3   $   K   | ]  }d |v  
 ywr   ra   r   s     rQ   re   z8Mochi1LoraLoaderMixin.lora_state_dict.<locals>.<genexpr>t  r   rg   r   r   r  r  s                         rQ   rs   z%Mochi1LoraLoaderMixin.lora_state_dict  r*  r  NFrW   rX   c           	         t         st        d      |j                  dt              }|rt	        dd      rt        d      t        |t              r|j                         }d|d<    | j                  |fi |\  }}t        d |j                         D              }|st        d	      | j                  |t        | d
      st        | | j                        n| j                  ||| ||       y)r,  rZ   r[   r  r  r\   Tr]   c              3   $   K   | ]  }d |v  
 ywr_   ra   rb   s     rQ   re   z:Mochi1LoraLoaderMixin.load_lora_weights.<locals>.<genexpr>  rf   rg   rh   r.   r  Nr.  r  s	            rQ   r   z'Mochi1LoraLoaderMixin.load_lora_weights~  r/  rS   c           	          |rt        dd      rt        d      t        j                  d| j                   d       |j                  |d|||||       y)a   
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            transformer (`MochiTransformer3DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        r  r  r\   r   r   Nr  r  r  s           rQ   r  z0Mochi1LoraLoaderMixin.load_lora_into_transformer  r1  rS   r   r   r   r   r   r   r"  c           	          i }i }	|st        d      |j                  | j                  || j                               |%|	j                  t	        || j                               | j                  |||||||	       yr3  r4  r5  s
             rQ   r   z'Mochi1LoraLoaderMixin.save_lora_weights  r6  rS   r   r   rm   r   r   c                 .    t        |   d||||d| yr   r   r   s         rQ   r   zMochi1LoraLoaderMixin.fuse_lora&  r   rS   c                 (    t        |   dd|i| yr9  r   r   s      rQ   r   z!Mochi1LoraLoaderMixin.unfuse_loraV  r:  rS   r   r   r;  r<  r   s   @rQ   r  r        ,_'^/4S$sELL?P:Q5Q/R^  ^H '+	9
/4S$sELL?P:Q5Q/R9
 sm9
 	9
v  /
 /
 /
b 
 TX $"&#'<@5
c2;;./5
 "&c5%,,1N+O&O!P5
 	5

 5
  5
 !5
 ,4D>5
 5
t "/!-1-
I-
 -
 	-

  S	*-
` 4A/ =d3i = =rS   r  c                       e Zd ZdZdgZeZeede	e
ee
ej                  f   f   fd              Z	 	 dde	e
ee
ej                  f   f   dee
   defd	Ze	 	 	 	 	 ddefd
       Ze	 	 	 	 	 	 dde	e
ej(                  f   dee
e	ej*                  j,                  ej                  f   f   dede
dededee   fd       Zdgdddfdee
   dededeee
      f fdZdgfdee
   f fdZ xZS )LTXVideoLoraLoaderMixinz\
    Load LoRA layers into [`LTXVideoTransformer3DModel`]. Specific to [`LTXPipeline`].
    r.   rV   c                    |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }d}|d}d}ddd}t        ||
||||||||	||      \  }}t        d |D              }|r?d}t        j	                  |       |j                         D ci c]  \  }}d|vs|| }}}t        d |D              }|rt        |      }|r||f}|S |}|S c c}}w )rE  r   Nr   Fr   r   r   r   r   r   r   r]   Tr   r   r   r   c              3   $   K   | ]  }d |v  
 ywr   ra   r   s     rQ   re   z:LTXVideoLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  r   rg   r   r   c              3   >   K   | ]  }|j                  d         ywzdiffusion_model.Nr   r   s     rQ   re   z:LTXVideoLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  s     %[1all3E&F%[   )rn   r   r   r   r   r   r#   r   rV   r}   r   r   r   r   r   r   r   r   r   r]   r   r   r~   rk   r   r   r   r   is_non_diffusers_formatr   s                          rQ   rs   z'LTXVideoLoraLoaderMixin.lora_state_dictp  s   n JJ{D1	$4e<**Y-!::&8$?

7D)::j$/JJ{D1	jj5 **%6=%zz*@%H""OL#7iP
02W#+-)!% 

H !$#Jz#J J  {HNN8$+5+;+;+=W41aUVAV!Q$WJW"%%[PZ%["["FzRJ(<z8$
 CM
 X   E'ENFrW   rX   c           	         t         st        d      |j                  dt              }|rt	        dd      rt        d      t        |t              r|j                         }d|d<    | j                  |fi |\  }}t        d |j                         D              }|st        d	      | j                  |t        | d
      st        | | j                        n| j                  ||| ||       y)r,  rZ   r[   r  r  r\   Tr]   c              3   $   K   | ]  }d |v  
 ywr_   ra   rb   s     rQ   re   z<LTXVideoLoraLoaderMixin.load_lora_weights.<locals>.<genexpr>  rf   rg   rh   r.   r  Nr.  r  s	            rQ   r   z)LTXVideoLoraLoaderMixin.load_lora_weights  r/  rS   c           	          |rt        dd      rt        d      t        j                  d| j                   d       |j                  |d|||||       y)a  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            transformer (`LTXVideoTransformer3DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        r  r  r\   r   r   Nr  r  r  s           rQ   r  z2LTXVideoLoraLoaderMixin.load_lora_into_transformer  r1  rS   r   r   r   r   r   r   r"  c           	          i }i }	|st        d      |j                  | j                  || j                               |%|	j                  t	        || j                               | j                  |||||||	       yr3  r4  r5  s
             rQ   r   z)LTXVideoLoraLoaderMixin.save_lora_weightsD  r6  rS   r   r   rm   r   r   c                 .    t        |   d||||d| yr   r   r   s         rQ   r   z!LTXVideoLoraLoaderMixin.fuse_lora~  r   rS   c                 (    t        |   dd|i| yr9  r   r   s      rQ   r   z#LTXVideoLoraLoaderMixin.unfuse_lora  r:  rS   r   r   r;  r<  r   s   @rQ   r  r  h      ,_'a/4S$sELL?P:Q5Q/Ra  aN '+	9
/4S$sELL?P:Q5Q/R9
 sm9
 	9
v  /
 /
 /
b 
 TX $"&#'<@5
c2;;./5
 "&c5%,,1N+O&O!P5
 	5

 5
  5
 !5
 ,4D>5
 5
t "/!-1-
I-
 -
 	-

  S	*-
` 4A/ =d3i = =rS   r  c                       e Zd ZdZdgZeZeede	e
ee
ej                  f   f   fd              Z	 	 dde	e
ee
ej                  f   f   dee
   defd	Ze	 	 	 	 	 ddefd
       Ze	 	 	 	 	 	 dde	e
ej(                  f   dee
e	ej*                  j,                  ej                  f   f   dede
dededee   fd       Zdgdddfdee
   dededeee
      f fdZdgfdee
   f fdZ xZS )SanaLoraLoaderMixinzY
    Load LoRA layers into [`SanaTransformer2DModel`]. Specific to [`SanaPipeline`].
    r.   rV   c                    |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }d}|d}d}ddd}t        ||
||||||||	||      \  }}t        d |D              }|r?d}t        j	                  |       |j                         D ci c]  \  }}d|vs|| }}}|r||f}|S |}|S c c}}w )r  r   Nr   Fr   r   r   r   r   r   r   r]   Tr   r   r   r   c              3   $   K   | ]  }d |v  
 ywr   ra   r   s     rQ   re   z6SanaLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>"  r   rg   r   r   r  r  s                         rQ   rs   z#SanaLoraLoaderMixin.lora_state_dict  r*  r  NFrW   rX   c           	         t         st        d      |j                  dt              }|rt	        dd      rt        d      t        |t              r|j                         }d|d<    | j                  |fi |\  }}t        d |j                         D              }|st        d	      | j                  |t        | d
      st        | | j                        n| j                  ||| ||       y)r,  rZ   r[   r  r  r\   Tr]   c              3   $   K   | ]  }d |v  
 ywr_   ra   rb   s     rQ   re   z8SanaLoraLoaderMixin.load_lora_weights.<locals>.<genexpr>Y  rf   rg   rh   r.   r  Nr.  r  s	            rQ   r   z%SanaLoraLoaderMixin.load_lora_weights,  r/  rS   c           	          |rt        dd      rt        d      t        j                  d| j                   d       |j                  |d|||||       y)a  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            transformer (`SanaTransformer2DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        r  r  r\   r   r   Nr  r  r  s           rQ   r  z.SanaLoraLoaderMixin.load_lora_into_transformerg  r1  rS   r   r   r   r   r   r   r"  c           	          i }i }	|st        d      |j                  | j                  || j                               |%|	j                  t	        || j                               | j                  |||||||	       yr3  r4  r5  s
             rQ   r   z%SanaLoraLoaderMixin.save_lora_weights  r6  rS   r   r   rm   r   r   c                 .    t        |   d||||d| yr   r   r   s         rQ   r   zSanaLoraLoaderMixin.fuse_lora  r   rS   c                 (    t        |   dd|i| yr9  r   r   s      rQ   r   zSanaLoraLoaderMixin.unfuse_lora  r:  rS   r   r   r;  r<  r   s   @rQ   r  r    r  rS   r  c                       e Zd ZdZdgZeZeede	e
ee
ej                  f   f   fd              Z	 	 dde	e
ee
ej                  f   f   dee
   defd	Ze	 	 	 	 	 ddefd
       Ze	 	 	 	 	 	 dde	e
ej(                  f   dee
e	ej*                  j,                  ej                  f   f   dede
dededee   fd       Zdgdddfdee
   dededeee
      f fdZdgfdee
   f fdZ xZS )HunyuanVideoLoraLoaderMixinzi
    Load LoRA layers into [`HunyuanVideoTransformer3DModel`]. Specific to [`HunyuanVideoPipeline`].
    r.   rV   c                    |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }d}|d}d}ddd}t        ||
||||||||	||      \  }}t        d |D              }|r?d}t        j	                  |       |j                         D ci c]  \  }}d|vs|| }}}t        d |D              }|rt        |      }|r||f}|S |}|S c c}}w )aZ
  
        Return state dict for lora weights and the network alphas.

        <Tip warning={true}>

        We support loading original format HunyuanVideo LoRA checkpoints.

        This function is experimental and might change in the future.

        </Tip>

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                Can be either:

                    - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
                      the Hub.
                    - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
                      with [`ModelMixin.save_pretrained`].
                    - A [torch state
                      dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).

            cache_dir (`Union[str, os.PathLike]`, *optional*):
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.

            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
            token (`str` or *bool*, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
            subfolder (`str`, *optional*, defaults to `""`):
                The subfolder location of a model file within a larger model repository on the Hub or locally.
            return_lora_metadata (`bool`, *optional*, defaults to False):
                When enabled, additionally return the LoRA adapter metadata, typically found in the state dict.
        r   Nr   Fr   r   r   r   r   r   r   r]   Tr   r   r   r   c              3   $   K   | ]  }d |v  
 ywr   ra   r   s     rQ   re   z>HunyuanVideoLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>v  r   rg   r   r   c              3   $   K   | ]  }d |v  
 yw)img_attn_qkvNra   r   s     rQ   re   z>HunyuanVideoLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>|  s     'P!(;'Prg   )rn   r   r   r   r   r   r   )r   rV   r}   r   r   r   r   r   r   r   r   r   r]   r   r   r~   rk   r   r   r   r   is_original_hunyuan_videor   s                          rQ   rs   z+HunyuanVideoLoraLoaderMixin.lora_state_dict  s   n JJ{D1	$4e<**Y-!::&8$?

7D)::j$/JJ{D1	jj5 **%6=%zz*@%H""OL#7iP
02W#+-)!% 

H !$#Jz#J J  {HNN8$+5+;+;+=W41aUVAV!Q$WJW$''PZ'P$P!$A*MJ(<z8$
 CM
 Xr	  NFrW   rX   c           	         t         st        d      |j                  dt              }|rt	        dd      rt        d      t        |t              r|j                         }d|d<    | j                  |fi |\  }}t        d |j                         D              }|st        d	      | j                  |t        | d
      st        | | j                        n| j                  ||| ||       y)r,  rZ   r[   r  r  r\   Tr]   c              3   $   K   | ]  }d |v  
 ywr_   ra   rb   s     rQ   re   z@HunyuanVideoLoraLoaderMixin.load_lora_weights.<locals>.<genexpr>  rf   rg   rh   r.   r  Nr.  r  s	            rQ   r   z-HunyuanVideoLoraLoaderMixin.load_lora_weights  r/  rS   c           	          |rt        dd      rt        d      t        j                  d| j                   d       |j                  |d|||||       y)a  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            transformer (`HunyuanVideoTransformer3DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        r  r  r\   r   r   Nr  r  r  s           rQ   r  z6HunyuanVideoLoraLoaderMixin.load_lora_into_transformer  r1  rS   r   r   r   r   r   r   r"  c           	          i }i }	|st        d      |j                  | j                  || j                               |%|	j                  t	        || j                               | j                  |||||||	       yr3  r4  r5  s
             rQ   r   z-HunyuanVideoLoraLoaderMixin.save_lora_weights  r6  rS   r   r   rm   r   r   c                 .    t        |   d||||d| yr   r   r   s         rQ   r   z%HunyuanVideoLoraLoaderMixin.fuse_lora,  r   rS   c                 (    t        |   dd|i| yr9  r   r   s      rQ   r   z'HunyuanVideoLoraLoaderMixin.unfuse_lora\  r:  rS   r   r   r;  r<  r   s   @rQ   r  r    r  rS   r  c                       e Zd ZdZdgZeZeede	e
ee
ej                  f   f   fd              Z	 	 dde	e
ee
ej                  f   f   dee
   defd	Ze	 	 	 	 	 ddefd
       Ze	 	 	 	 	 	 dde	e
ej(                  f   dee
e	ej*                  j,                  ej                  f   f   dede
dededee   fd       Zdgdddfdee
   dededeee
      f fdZdgfdee
   f fdZ xZS )Lumina2LoraLoaderMixinzg
    Load LoRA layers into [`Lumina2Transformer2DModel`]. Specific to [`Lumina2Text2ImgPipeline`].
    r.   rV   c                    |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }d}|d}d}ddd}t        ||
||||||||	||      \  }}t        d |D              }|r?d}t        j	                  |       |j                         D ci c]  \  }}d|vs|| }}}t        d |D              }|rt        |      }|r||f}|S |}|S c c}}w )rE  r   Nr   Fr   r   r   r   r   r   r   r]   Tr   r   r   r   c              3   $   K   | ]  }d |v  
 ywr   ra   r   s     rQ   re   z9Lumina2LoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  r   rg   r   r   c              3   >   K   | ]  }|j                  d         ywr  r   r   s     rQ   re   z9Lumina2LoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  s     QALL);<Qr  )rn   r   r   r   r   r   r$   )r   rV   r}   r   r   r   r   r   r   r   r   r   r]   r   r   r~   rk   r   r   r   r   non_diffusersr   s                          rQ   rs   z&Lumina2LoraLoaderMixin.lora_state_dictv  s   n JJ{D1	$4e<**Y-!::&8$?

7D)::j$/JJ{D1	jj5 **%6=%zz*@%H""OL#7iP
02W#+-)!% 

H !$#Jz#J J  {HNN8$+5+;+;+=W41aUVAV!Q$WJW QjQQI*UJ(<z8$
 CM
 Xr	  NFrW   rX   c           	         t         st        d      |j                  dt              }|rt	        dd      rt        d      t        |t              r|j                         }d|d<    | j                  |fi |\  }}t        d |j                         D              }|st        d	      | j                  |t        | d
      st        | | j                        n| j                  ||| ||       y)r,  rZ   r[   r  r  r\   Tr]   c              3   $   K   | ]  }d |v  
 ywr_   ra   rb   s     rQ   re   z;Lumina2LoraLoaderMixin.load_lora_weights.<locals>.<genexpr>
  rf   rg   rh   r.   r  Nr.  r  s	            rQ   r   z(Lumina2LoraLoaderMixin.load_lora_weights  r/  rS   c           	          |rt        dd      rt        d      t        j                  d| j                   d       |j                  |d|||||       y)a  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            transformer (`Lumina2Transformer2DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        r  r  r\   r   r   Nr  r  r  s           rQ   r  z1Lumina2LoraLoaderMixin.load_lora_into_transformer  r1  rS   r   r   r   r   r   r   r"  c           	          i }i }	|st        d      |j                  | j                  || j                               |%|	j                  t	        || j                               | j                  |||||||	       yr3  r4  r5  s
             rQ   r   z(Lumina2LoraLoaderMixin.save_lora_weightsK  r6  rS   r   r   rm   r   r   c                 .    t        |   d||||d| yr   r   r   s         rQ   r   z Lumina2LoraLoaderMixin.fuse_lora  r   rS   c                 (    t        |   dd|i| yr9  r   r   s      rQ   r   z"Lumina2LoraLoaderMixin.unfuse_lora  r:  rS   r   r   r;  r<  r   s   @rQ   r)  r)  n  s    ,_'b/4S$sELL?P:Q5Q/Rb  bP '+	9
/4S$sELL?P:Q5Q/R9
 sm9
 	9
v  /
 /
 /
b 
 TX $"&#'<@5
c2;;./5
 "&c5%,,1N+O&O!P5
 	5

 5
  5
 !5
 ,4D>5
 5
t "/!-1-
I-
 -
 	-

  S	*-
` 4A/ =d3i = =rS   r)  c                       e Zd ZdZddgZeZeede	e
ee
ej                  f   f   fd              Zedej                  j                   fd       Z	 	 dde	e
ee
ej                  f   f   d	ee
   d
efdZe	 	 	 	 	 dd
efd       Ze	 	 	 	 	 	 dde	e
ej.                  f   dee
e	ej                  j                   ej                  f   f   dede
dededee   fd       Zdgdddfdee
   dededeee
      f fdZdgfdee
   f fdZ xZS )WanLoraLoaderMixinzw
    Load LoRA layers into [`WanTransformer3DModel`]. Specific to [`WanPipeline`] and `[WanImageToVideoPipeline`].
    r.   transformer_2rV   c                    |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }d}|d}d}ddd}t        ||
||||||||	||      \  }}t        d |D              rt        |      }nt        d |D              rt	        |      }t        d |D              }|r?d}t
        j                  |       |j                         D ci c]  \  }}d|vs|| }}}|r||f}|S |}|S c c}}w )rE  r   Nr   Fr   r   r   r   r   r   r   r]   Tr   r   r   r   c              3   >   K   | ]  }|j                  d         ywr  r   r   s     rQ   re   z5WanLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>&       DAq||./Dr  c              3   >   K   | ]  }|j                  d         ywr   Nr   r   s     rQ   re   z5WanLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>(       @l+@r  c              3   $   K   | ]  }d |v  
 ywr   ra   r   s     rQ   re   z5WanLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>+  r   rg   r   r   rn   r   r   r&   r    r   r   r   r  s                         rQ   rs   z"WanLoraLoaderMixin.lora_state_dict  s   n JJ{D1	$4e<**Y-!::&8$?

7D)::j$/JJ{D1	jj5 **%6=%zz*@%H""OL#7iP
02W#+-)!% 

H DDDEjQJ@Z@@>zJJ ##Jz#J J  {HNN8$+5+;+;+=W41aUVAV!Q$WJW(<z8$
 CM
 X   E6"E6c           	         |j                   j                  |S |j                  }t        d |D              r?t	        |D ch c]-  }d|v s|j                  d      d   j                  d      d   / c}      }t        d |D              xr t        d |D              }t        d |D              }|r|S t        |      D ]  }t        d	d
gddg      D ]  \  }	}
d| d}d| d}||vs||vrt        j                  |d| d   |      |d| d|
 d<   t        j                  |d| d   |      |d| d|
 d<   d| d}|st||v sy||   }t        j                  ||      |d| d|
 d<     |S c c}w )Nc              3   >   K   | ]  }|j                  d         ywtransformer.blocks.Nr   r   s     rQ   re   zDWanLoraLoaderMixin._maybe_expand_t2v_lora_for_i2v.<locals>.<genexpr>?       Gqq||12Gr  blocks.r   r   r   c              3   $   K   | ]  }d |v  
 yw
add_k_projNra   r   s     rQ   re   zDWanLoraLoaderMixin._maybe_expand_t2v_lora_for_i2v.<locals>.<genexpr>A       DAla/Drg   c              3   $   K   | ]  }d |v  
 yw
add_v_projNra   r   s     rQ   re   zDWanLoraLoaderMixin._maybe_expand_t2v_lora_for_i2v.<locals>.<genexpr>A       Lscd\]^M^Lsrg   c              3   $   K   | ]  }d |v  
 yw.lora_B.biasNra   r   s     rQ   re   zDWanLoraLoaderMixin._maybe_expand_t2v_lora_for_i2v.<locals>.<genexpr>B       C1>Q.Crg   k_imgv_imgrH  rL  rC  .attn2.to_k.lora_A.weight.attn2.to_k.lora_B.weightr  .attn2.r  r  .attn2.to_k.lora_B.biasrP  
r   	image_dimrB   r   rd  rv  rangeziprE   r  r   r.   r~   target_devicer   
num_blocksis_i2v_lorahas_biasiocref_key_lora_Aref_key_lora_Bref_key_lora_B_biasref_lora_B_bias_tensors                  rQ   _maybe_expand_t2v_lora_for_i2vz1WanLoraLoaderMixin._maybe_expand_t2v_lora_for_i2v4  s    ''/#**GJGG:haYbfgYgaggi0399#>qAhiJDDDsLshrLsIsKC
CCH!!:& ' 2\<4PQ DAq':1#=V%WN':1#=V%WN%Z7>Q[;[ TYTdTd"%8;T#UV_lUJ!4QCwqcPQ UZTdTd"%8;T#UV_lUJ!4QCwqcPQ -@sBY*Z'$7:$E1;<O1P.V[VfVf2#0W
%871#\#RS+6 E i   	E9(E9NFrW   rX   c           	      $   t         st        d      |j                  dt              }|rt	        dd      rt        d      t        |t              r|j                         }d|d<    | j                  |fi |\  }}| j                  t        | d      st        | | j                        n| j                  |	      }t        d
 |j                         D              }|st        d      |j                  dd      }	|	rQt        | d      s"t!        dt#        |       j$                   d      | j'                  || j(                  ||| ||       y| j'                  |t        | d      st        | | j                        n| j                  ||| ||       y)r,  rZ   r[   r  r  r\   Tr]   r.   r.   r~   c              3   $   K   | ]  }d |v  
 ywr_   ra   rb   s     rQ   re   z7WanLoraLoaderMixin.load_lora_weights.<locals>.<genexpr>  rf   rg   rh   load_into_transformer_2Fr6  '' object has no attribute transformer_2Note that Wan2.1 models do not have a transformer_2 component.Ensure the model has a transformer_2 component before setting load_into_transformer_2=True.r  Nr
   rA   rn   ro   r   rp   rq   rr   rs   rh  rD   rw   r  r.   rt   ru   AttributeErrorrC   r@   r  r6  
r|   rV   rW   rX   r}   r[   r~   rk   r   rm  s
             rQ   r   z$WanLoraLoaderMixin.load_lora_weightsd     8  HII"JJ':<[\h!? D 
 ;TB4Y4^4^4`1 *.%&3t334Yd]cd
H88DKDR_D`d&;&;<fjfvfv! 9 

  K9JKK 788"(**-F"N"41$T
++, -r r 
 ++ ..)!"3 ,  ++t]3 $D$*?*?@%%)!"3 , 
rS   c           	          |rt        dd      rt        d      t        j                  d| j                   d       |j                  |d|||||       y)a  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            transformer (`WanTransformer3DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        r  r  r\   r   r   Nr  r  r  s           rQ   r  z-WanLoraLoaderMixin.load_lora_into_transformer  r1  rS   r   r   r   r   r   r   r"  c           	          i }i }	|st        d      |j                  | j                  || j                               |%|	j                  t	        || j                               | j                  |||||||	       yr3  r4  r5  s
             rQ   r   z$WanLoraLoaderMixin.save_lora_weights  r6  rS   r   r   rm   r   r   c                 .    t        |   d||||d| yr   r   r   s         rQ   r   zWanLoraLoaderMixin.fuse_lora$  r   rS   c                 (    t        |   dd|i| yr9  r   r   s      rQ   r   zWanLoraLoaderMixin.unfuse_loraT  r:  rS   r   r   r;   r@   r   r   r   r   r%  r  r   r   r   r   r   rE   r   rs   r   r   rh  r   r   r   r  r   r   r   rq   r   r   r   r   r   r   r   s   @rQ   r5  r5    s    ,_='a/4S$sELL?P:Q5Q/Ra  aF -XX__- -d '+	Q/4S$sELL?P:Q5Q/RQ smQ 	Qf  /
 /
 /
b 
 TX $"&#'<@5
c2;;./5
 "&c5%,,1N+O&O!P5
 	5

 5
  5
 !5
 ,4D>5
 5
t "/!-1-
I-
 -
 	-

  S	*-
` 4A/ =d3i = =rS   r5  c                       e Zd ZdZdgZeZeede	e
ee
ej                  f   f   fd              Zedej                  j                   fd       Z	 	 dde	e
ee
ej                  f   f   dee
   d	efd
Ze	 	 	 	 	 dd	efd       Ze	 	 	 	 	 	 dde	e
ej.                  f   dee
e	ej                  j                   ej                  f   f   dede
dededee   fd       Zdgdddfdee
   dededeee
      f fdZdgfdee
   f fdZ xZS )SkyReelsV2LoraLoaderMixinzA
    Load LoRA layers into [`SkyReelsV2Transformer3DModel`].
    r.   rV   c                    |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }d}|d}d}ddd}t        ||
||||||||	||      \  }}t        d |D              rt        |      }nt        d |D              rt	        |      }t        d |D              }|r?d}t
        j                  |       |j                         D ci c]  \  }}d|vs|| }}}|r||f}|S |}|S c c}}w )rE  r   Nr   Fr   r   r   r   r   r   r   r]   Tr   r   r   r   c              3   >   K   | ]  }|j                  d         ywr  r   r   s     rQ   re   z<SkyReelsV2LoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  r9  r  c              3   >   K   | ]  }|j                  d         ywr;  r   r   s     rQ   re   z<SkyReelsV2LoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  r<  r  c              3   $   K   | ]  }d |v  
 ywr   ra   r   s     rQ   re   z<SkyReelsV2LoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  r   rg   r   r   r>  r  s                         rQ   rs   z)SkyReelsV2LoraLoaderMixin.lora_state_dictn  s   p JJ{D1	$4e<**Y-!::&8$?

7D)::j$/JJ{D1	jj5 **%6=%zz*@%H""OL#7iP
02W#+-)!% 

H DDDEjQJ@Z@@>zJJ ##Jz#J J  {HNN8$+5+;+;+=W41aUVAV!Q$WJW(<z8$
 CM
 Xr?  c           	         |j                   j                  |S |j                  }t        d |D              r?t	        |D ch c]-  }d|v s|j                  d      d   j                  d      d   / c}      }t        d |D              xr t        d |D              }t        d |D              }|r|S t        |      D ]  }t        d	d
gddg      D ]  \  }	}
d| d}d| d}||vs||vrt        j                  |d| d   |      |d| d|
 d<   t        j                  |d| d   |      |d| d|
 d<   d| d}|st||v sy||   }t        j                  ||      |d| d|
 d<     |S c c}w )Nc              3   >   K   | ]  }|j                  d         ywrB  r   r   s     rQ   re   zKSkyReelsV2LoraLoaderMixin._maybe_expand_t2v_lora_for_i2v.<locals>.<genexpr>  rD  r  rE  r   r   r   c              3   $   K   | ]  }d |v  
 ywrG  ra   r   s     rQ   re   zKSkyReelsV2LoraLoaderMixin._maybe_expand_t2v_lora_for_i2v.<locals>.<genexpr>  rI  rg   c              3   $   K   | ]  }d |v  
 ywrK  ra   r   s     rQ   re   zKSkyReelsV2LoraLoaderMixin._maybe_expand_t2v_lora_for_i2v.<locals>.<genexpr>  rM  rg   c              3   $   K   | ]  }d |v  
 ywrO  ra   r   s     rQ   re   zKSkyReelsV2LoraLoaderMixin._maybe_expand_t2v_lora_for_i2v.<locals>.<genexpr>  rQ  rg   rR  rS  rH  rL  rC  rT  rU  r  rV  r  r  rW  rP  rX  r\  s                  rQ   rh  z8SkyReelsV2LoraLoaderMixin._maybe_expand_t2v_lora_for_i2v  s    ''/#**GJGG:haYbfgYgaggi0399#>qAhiJDDDsLshrLsIsKC
CCH!!:& ' 2\<4PQ DAq':1#=V%WN':1#=V%WN%Z7>Q[;[ TYTdTd"%8;T#UV_lUJ!4QCwqcPQ UZTdTd"%8;T#UV_lUJ!4QCwqcPQ -@sBY*Z'$7:$E1;<O1P.V[VfVf2#0W
%871#\#RS+6 E iri  NFrW   rX   c           	      $   t         st        d      |j                  dt              }|rt	        dd      rt        d      t        |t              r|j                         }d|d<    | j                  |fi |\  }}| j                  t        | d      st        | | j                        n| j                  |	      }t        d
 |j                         D              }|st        d      |j                  dd      }	|	rQt        | d      s"t!        dt#        |       j$                   d      | j'                  || j(                  ||| ||       y| j'                  |t        | d      st        | | j                        n| j                  ||| ||       y)r,  rZ   r[   r  r  r\   Tr]   r.   rk  c              3   $   K   | ]  }d |v  
 ywr_   ra   rb   s     rQ   re   z>SkyReelsV2LoraLoaderMixin.load_lora_weights.<locals>.<genexpr>7  rf   rg   rh   rm  Fr6  rn  ro  r  Nrp  rr  s
             rQ   r   z+SkyReelsV2LoraLoaderMixin.load_lora_weights  rs  rS   c           	          |rt        dd      rt        d      t        j                  d| j                   d       |j                  |d|||||       y)a  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            transformer (`SkyReelsV2Transformer3DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        r  r  r\   r   r   Nr  r  r  s           rQ   r  z4SkyReelsV2LoraLoaderMixin.load_lora_into_transformerY  r1  rS   r   r   r   r   r   r   r"  c           	          i }i }	|st        d      |j                  | j                  || j                               |%|	j                  t	        || j                               | j                  |||||||	       yr3  r4  r5  s
             rQ   r   z+SkyReelsV2LoraLoaderMixin.save_lora_weights  r6  rS   r   r   rm   r   r   c                 .    t        |   d||||d| yr   r   r   s         rQ   r   z#SkyReelsV2LoraLoaderMixin.fuse_lora  r   rS   c                 (    t        |   dd|i| yr9  r   r   s      rQ   r   z%SkyReelsV2LoraLoaderMixin.unfuse_lora  r:  rS   r   r   r;  rx  r   s   @rQ   rz  rz  f  s    ,_'a/4S$sELL?P:Q5Q/Ra  aF -XX__- -f '+	Q/4S$sELL?P:Q5Q/RQ smQ 	Qf  /
 /
 /
b 
 TX $"&#'<@5
c2;;./5
 "&c5%,,1N+O&O!P5
 	5

 5
  5
 !5
 ,4D>5
 5
t "/!-1-
I-
 -
 	-

  S	*-
` 4A/ =d3i = =rS   rz  c                       e Zd ZdZdgZeZeede	e
ee
ej                  f   f   fd              Z	 	 dde	e
ee
ej                  f   f   dee
   defd	Ze	 	 	 	 	 ddefd
       Ze	 	 	 	 	 	 dde	e
ej(                  f   dee
e	ej*                  j,                  ej                  f   f   dede
dededee   fd       Zdgdddfdee
   dededeee
      f fdZdgfdee
   f fdZ xZS )CogView4LoraLoaderMixinz\
    Load LoRA layers into [`WanTransformer3DModel`]. Specific to [`CogView4Pipeline`].
    r.   rV   c                    |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }d}|d}d}ddd}t        ||
||||||||	||      \  }}t        d |D              }|r?d}t        j	                  |       |j                         D ci c]  \  }}d|vs|| }}}|r||f}|S |}|S c c}}w )r  r   Nr   Fr   r   r   r   r   r   r   r]   Tr   r   r   r   c              3   $   K   | ]  }d |v  
 ywr   ra   r   s     rQ   re   z:CogView4LoraLoaderMixin.lora_state_dict.<locals>.<genexpr>j  r   rg   r   r   r  r  s                         rQ   rs   z'CogView4LoraLoaderMixin.lora_state_dict  r*  r  NFrW   rX   c           	         t         st        d      |j                  dt              }|rt	        dd      rt        d      t        |t              r|j                         }d|d<    | j                  |fi |\  }}t        d |j                         D              }|st        d	      | j                  |t        | d
      st        | | j                        n| j                  ||| ||       y)r,  rZ   r[   r  r  r\   Tr]   c              3   $   K   | ]  }d |v  
 ywr_   ra   rb   s     rQ   re   z<CogView4LoraLoaderMixin.load_lora_weights.<locals>.<genexpr>  rf   rg   rh   r.   r  Nr.  r  s	            rQ   r   z)CogView4LoraLoaderMixin.load_lora_weightst  r/  rS   c           	          |rt        dd      rt        d      t        j                  d| j                   d       |j                  |d|||||       y)a  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            transformer (`CogView4Transformer2DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        r  r  r\   r   r   Nr  r  r  s           rQ   r  z2CogView4LoraLoaderMixin.load_lora_into_transformer  r1  rS   r   r   r   r   r   r   r"  c           	          i }i }	|st        d      |j                  | j                  || j                               |%|	j                  t	        || j                               | j                  |||||||	       yr3  r4  r5  s
             rQ   r   z)CogView4LoraLoaderMixin.save_lora_weights  r6  rS   r   r   rm   r   r   c                 .    t        |   d||||d| yr   r   r   s         rQ   r   z!CogView4LoraLoaderMixin.fuse_lora  r   rS   c                 (    t        |   dd|i| yr9  r   r   s      rQ   r   z#CogView4LoraLoaderMixin.unfuse_loraL  r:  rS   r   r   r;  r<  r   s   @rQ   r  r    r  rS   r  c                       e Zd ZdZdgZeZeede	e
ee
ej                  f   f   fd              Z	 	 dde	e
ee
ej                  f   f   dee
   defd	Ze	 	 	 	 	 ddefd
       Ze	 	 	 	 	 	 dde	e
ej(                  f   dee
e	ej*                  j,                  ej                  f   f   dede
dededee   fd       Zdgdddfdee
   dededeee
      f fdZdgfdee
   f fdZ xZS )HiDreamImageLoraLoaderMixinzi
    Load LoRA layers into [`HiDreamImageTransformer2DModel`]. Specific to [`HiDreamImagePipeline`].
    r.   rV   c                    |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }d}|d}d}ddd}t        ||
||||||||	||      \  }}t        d |D              }|r?d}t        j	                  |       |j                         D ci c]  \  }}d|vs|| }}}t        d |D              }|rt        |      }|r||f}|S |}|S c c}}w )rE  r   Nr   Fr   r   r   r   r   r   r   r]   Tr   r   r   r   c              3   $   K   | ]  }d |v  
 ywr   ra   r   s     rQ   re   z>HiDreamImageLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  r   rg   r   r   c              3   $   K   | ]  }d |v  
 yw)diffusion_modelNra   r   s     rQ   re   z>HiDreamImageLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  s     %Q&71&<%Qrg   )rn   r   r   r   r   r   r!   r  s                          rQ   rs   z+HiDreamImageLoraLoaderMixin.lora_state_dictf  s   n JJ{D1	$4e<**Y-!::&8$?

7D)::j$/JJ{D1	jj5 **%6=%zz*@%H""OL#7iP
02W#+-)!% 

H !$#Jz#J J  {HNN8$+5+;+;+=W41aUVAV!Q$WJW"%%Qj%Q"Q"I*UJ(<z8$
 CM
 Xr	  NFrW   rX   c           	         t         st        d      |j                  dt              }|rt	        dd      rt        d      t        |t              r|j                         }d|d<    | j                  |fi |\  }}t        d |j                         D              }|st        d	      | j                  |t        | d
      st        | | j                        n| j                  ||| ||       y)r,  rZ   r[   r  r  r\   Tr]   c              3   $   K   | ]  }d |v  
 ywr_   ra   rb   s     rQ   re   z@HiDreamImageLoraLoaderMixin.load_lora_weights.<locals>.<genexpr>  rf   rg   rh   r.   r  Nr.  r  s	            rQ   r   z-HiDreamImageLoraLoaderMixin.load_lora_weights  r/  rS   c           	          |rt        dd      rt        d      t        j                  d| j                   d       |j                  |d|||||       y)a  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            transformer (`HiDreamImageTransformer2DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        r  r  r\   r   r   Nr  r  r  s           rQ   r  z6HiDreamImageLoraLoaderMixin.load_lora_into_transformer  r1  rS   r   r   r   r   r   r   r"  c           	          i }i }	|st        d      |j                  | j                  || j                               |%|	j                  t	        || j                               | j                  |||||||	       yr3  r4  r5  s
             rQ   r   z-HiDreamImageLoraLoaderMixin.save_lora_weights:  r6  rS   r   r   rm   r   r   c                 .    t        |   d||||d| yr   r   r   s         rQ   r   z%HiDreamImageLoraLoaderMixin.fuse_lorat  r   rS   c                 (    t        |   dd|i| yr9  r   r   s      rQ   r   z'HiDreamImageLoraLoaderMixin.unfuse_lora  r:  rS   r   r   r;  r<  r   s   @rQ   r  r  ^  r  rS   r  c                       e Zd ZdZdgZeZeede	e
ee
ej                  f   f   fd              Z	 	 dde	e
ee
ej                  f   f   dee
   defd	Ze	 	 	 	 	 ddefd
       Ze	 	 	 	 	 	 dde	e
ej(                  f   dee
e	ej*                  j,                  ej                  f   f   dede
dededee   fd       Zdgdddfdee
   dededeee
      f fdZdgfdee
   f fdZ xZS )QwenImageLoraLoaderMixinzc
    Load LoRA layers into [`QwenImageTransformer2DModel`]. Specific to [`QwenImagePipeline`].
    r.   rV   c                    |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }d}|d}d}ddd}t        ||
||||||||	||      \  }}t        d |D              }|r?d}t        j	                  |       |j                         D ci c]  \  }}d|vs|| }}}t        d |D              }t        d |D              }|s|rt        |      }|r||f}|S |}|S c c}}w )r  r   Nr   Fr   r   r   r   r   r   r   r]   Tr   r   r   r   c              3   $   K   | ]  }d |v  
 ywr   ra   r   s     rQ   re   z;QwenImageLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  r   rg   r   r   c              3   >   K   | ]  }|j                  d         yw)z.alphaN)r  r   s     rQ   re   z;QwenImageLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  s     Hqzz(3Hr  c              3   >   K   | ]  }|j                  d         ywr;  r   r   s     rQ   re   z;QwenImageLoraLoaderMixin.lora_state_dict.<locals>.<genexpr>  s     K1ALL6Kr  )rn   r   r   r   r   r   r%   )r   rV   r}   r   r   r   r   r   r   r   r   r   r]   r   r   r~   rk   r   r   r   r   has_alphas_in_sdhas_lora_unetr   s                           rQ   rs   z(QwenImageLoraLoaderMixin.lora_state_dict  s   p JJ{D1	$4e<**Y-!::&8$?

7D)::j$/JJ{D1	jj5 **%6=%zz*@%H""OL#7iP
02W#+-)!% 

H !$#Jz#J J  {HNN8$+5+;+;+=W41aUVAV!Q$WJWHZHHK
KK}FzRJ(<z8$
 CM
 Xs   E.'E.NFrW   rX   c           	         t         st        d      |j                  dt              }|rt	        dd      rt        d      t        |t              r|j                         }d|d<    | j                  |fi |\  }}t        d |j                         D              }|st        d	      | j                  |t        | d
      st        | | j                        n| j                  ||| ||       y)r,  rZ   r[   r  r  r\   Tr]   c              3   $   K   | ]  }d |v  
 ywr_   ra   rb   s     rQ   re   z=QwenImageLoraLoaderMixin.load_lora_weights.<locals>.<genexpr>S  rf   rg   rh   r.   r  Nr.  r  s	            rQ   r   z*QwenImageLoraLoaderMixin.load_lora_weights&  r/  rS   c           	          |rt        dd      rt        d      t        j                  d| j                   d       |j                  |d|||||       y)a  
        This will load the LoRA layers specified in `state_dict` into `transformer`.

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            transformer (`QwenImageTransformer2DModel`):
                The Transformer model to load the LoRA layers into.
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
            low_cpu_mem_usage (`bool`, *optional*):
                Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
                weights.
            hotswap (`bool`, *optional*):
                See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`].
            metadata (`dict`):
                Optional LoRA adapter metadata. When supplied, the `LoraConfig` arguments of `peft` won't be derived
                from the state dict.
        r  r  r\   r   r   Nr  r  r  s           rQ   r  z3QwenImageLoraLoaderMixin.load_lora_into_transformera  r1  rS   r   r   r   r   r   r   r"  c           	          i }i }	|st        d      |j                  | j                  || j                               |%|	j                  t	        || j                               | j                  |||||||	       yr3  r4  r5  s
             rQ   r   z*QwenImageLoraLoaderMixin.save_lora_weights  r6  rS   r   r   rm   r   r   c                 .    t        |   d||||d| yr   r   r   s         rQ   r   z"QwenImageLoraLoaderMixin.fuse_lora  r   rS   c                 (    t        |   dd|i| yr9  r   r   s      rQ   r   z$QwenImageLoraLoaderMixin.unfuse_lora  r:  rS   r   r   r;  r<  r   s   @rQ   r  r    s    ,_'c/4S$sELL?P:Q5Q/Rc  cR '+	9
/4S$sELL?P:Q5Q/R9
 sm9
 	9
v  /
 /
 /
b 
 TX $"&#'<@5
c2;;./5
 "&c5%,,1N+O&O!P5
 	5

 5
  5
 !5
 ,4D>5
 5
t "/!-1-
I-
 -
 	-

  S	*-
` 4A/ =d3i = =rS   r  c                        e Zd Z fdZ xZS )LoraLoaderMixinc                 B    d}t        dd|       t        |   |i | y )NzLoraLoaderMixin is deprecated and this will be removed in a future version. Please use `StableDiffusionLoraLoaderMixin`, instead.r  z1.0.0)r   r   __init__)r|   argsr}   deprecation_messager?   s       rQ   r  zLoraLoaderMixin.__init__  s,     b#W.AB$)&)rS   )r@   r   r   r  r   r   s   @rQ   r  r    s    * *rS   r  )Fr   typingr   r   r   r   r   rE   huggingface_hub.utilsr   utilsr
   r   r   r   r   r   r   r   r   r   r   	lora_baser   r   r   r   r   r   lora_conversion_utilsr   r   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   ro   
get_loggerr@   r   r   r   r%  r  rR   rU   r   r  r'  r>  r  r  r  r  r  r  r)  r5  rz  r  r  r  r  ra   rS   rQ   <module>r     s   
 8 8  6       " #( D'"D(+%'#C2*.' 
		H	%" 	  &2M%B ".ba=] a=Ht=} t=nP= P=fS=m S=l
GT- GTXl
: l
^P=} P=f
S=M S=l
U=m U=p
S=- S=l
U=- U=p
V=] V=r
\= \=~_= _=DS=m S=l
U=- U=p
W=} W=t
*4 *rS   