
    bi                     n   d dl mZ d dlmZmZmZmZmZmZ d dl	Z	d dl	m
Z
 d dlmZ ddlmZmZ ddlmZ dd	lmZmZ d
dlmZmZmZmZmZ d
dlmZmZmZm Z m!Z! d
dl"m#Z# d
dl$m%Z%m&Z&m'Z' d
dl(m)Z)  ejT                  e+      Z,e G d de             Z- G d de
j\                        Z/ G d de#ee      Z0d Z1y)    )	dataclass)AnyDictListOptionalTupleUnionN)nn)
functional   )ConfigMixinregister_to_config)FromOriginalModelMixin)
BaseOutputlogging   )ADDED_KV_ATTENTION_PROCESSORSCROSS_ATTENTION_PROCESSORSAttentionProcessorAttnAddedKVProcessorAttnProcessor)TextImageProjectionTextImageTimeEmbeddingTextTimeEmbeddingTimestepEmbedding	Timesteps)
ModelMixin)UNetMidBlock2DUNetMidBlock2DCrossAttnget_down_block)UNet2DConditionModelc                   T    e Zd ZU dZeej                     ed<   ej                  ed<   y)ControlNetOutputa  
    The output of [`ControlNetModel`].

    Args:
        down_block_res_samples (`tuple[torch.Tensor]`):
            A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should
            be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be
            used to condition the original UNet's downsampling activations.
        mid_down_block_re_sample (`torch.Tensor`):
            The activation of the middle block (the lowest sample resolution). Each tensor should be of shape
            `(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`.
            Output can be used to condition the original UNet's middle block activation.
    down_block_res_samplesmid_block_res_sampleN)__name__
__module____qualname____doc__r   torchTensor__annotations__     b/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/diffusers/models/controlnets/controlnet.pyr#   r#   ,   s"     "%,,//,,&r.   r#   c            	       F     e Zd ZdZ	 	 ddededeedf   f fdZd Z xZS )	ControlNetConditioningEmbeddingu  
    Quoting from https://huggingface.co/papers/2302.05543: "Stable Diffusion uses a pre-processing method similar to
    VQ-GAN [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
    training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
    convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
    (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
    model) to encode image-space conditions ... into feature maps ..."
    conditioning_embedding_channelsconditioning_channelsblock_out_channels.c           
         t         |           t        j                  ||d   dd      | _        t        j
                  g       | _        t        t        |      dz
        D ]t  }||   }||dz      }| j                  j                  t        j                  ||dd             | j                  j                  t        j                  ||ddd             v t        t        j                  |d   |dd            | _        y )Nr   r      kernel_sizepaddingr   )r8   r9   stride)super__init__r
   Conv2dconv_in
ModuleListblocksrangelenappendzero_moduleconv_out)selfr2   r3   r4   i
channel_inchannel_out	__class__s          r/   r=   z(ControlNetConditioningEmbedding.__init__J   s     	yy!68J18M[\fghmmB's-.23 	gA+A.J,QU3KKKryyZQXYZ[KKryy[aYZcdef		g $II(,.M[\fgh
r.   c                     | j                  |      }t        j                  |      }| j                  D ]  } ||      }t        j                  |      }! | j	                  |      }|S N)r?   FsilurA   rF   )rG   conditioning	embeddingblocks       r/   forwardz'ControlNetConditioningEmbedding.forward`   s_    LL.	FF9%	[[ 	*Ei(Iy)I	* MM),	r.   )r          `      )	r&   r'   r(   r)   intr   r=   rS   __classcell__rK   s   @r/   r1   r1   @   s>     &'.?	
),
  #
 "#s(O	
,
r.   r1   c            B           e Zd ZdZdZe	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d=dededededee	d	f   d
e
e	   deeee   f   deed	f   dededede	de
e   dededeeeed	f   f   de
e   de
e	   deeeed	f   f   de
eeeed	f   f      dede
e	   de
e	   de
e   de
e   dede	d e
e   d!e	d"e
eed	f      d#ed$ef@ fd%       Ze	 	 	 	 d>d&ed!e	d"e
eed	f      d'edef
d(       Zed)ee	ef   fd*       Zd+eeee	ef   f   fd,Zd- Zd.ee	eee   f   d)dfd/Z	 	 	 	 	 	 	 	 d?d0ej4                  d1eej4                  eef   d2ej4                  d3ej4                  d4ed5e
ej4                     d6e
ej4                     d7e
ej4                     d8e
ee	ej4                  f      d9e
ee	ef      d:ed;ed)eeeeej4                  d	f   ej4                  f   f   fd<Z xZS )@ControlNetModela  
    A ControlNet model.

    Args:
        in_channels (`int`, defaults to 4):
            The number of channels in the input sample.
        flip_sin_to_cos (`bool`, defaults to `True`):
            Whether to flip the sin to cos in the time embedding.
        freq_shift (`int`, defaults to 0):
            The frequency shift to apply to the time embedding.
        down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
            The tuple of downsample blocks to use.
        only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`):
        block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`):
            The tuple of output channels for each block.
        layers_per_block (`int`, defaults to 2):
            The number of layers per block.
        downsample_padding (`int`, defaults to 1):
            The padding to use for the downsampling convolution.
        mid_block_scale_factor (`float`, defaults to 1):
            The scale factor to use for the mid block.
        act_fn (`str`, defaults to "silu"):
            The activation function to use.
        norm_num_groups (`int`, *optional*, defaults to 32):
            The number of groups to use for the normalization. If None, normalization and activation layers is skipped
            in post-processing.
        norm_eps (`float`, defaults to 1e-5):
            The epsilon to use for the normalization.
        cross_attention_dim (`int`, defaults to 1280):
            The dimension of the cross attention features.
        transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
            The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
            [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
            [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
        encoder_hid_dim (`int`, *optional*, defaults to None):
            If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
            dimension to `cross_attention_dim`.
        encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
            If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
            embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
        attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8):
            The dimension of the attention heads.
        use_linear_projection (`bool`, defaults to `False`):
        class_embed_type (`str`, *optional*, defaults to `None`):
            The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None,
            `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
        addition_embed_type (`str`, *optional*, defaults to `None`):
            Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
            "text". "text" will use the `TextTimeEmbedding` layer.
        num_class_embeds (`int`, *optional*, defaults to 0):
            Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
            class conditioning with `class_embed_type` equal to `None`.
        upcast_attention (`bool`, defaults to `False`):
        resnet_time_scale_shift (`str`, defaults to `"default"`):
            Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`.
        projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`):
            The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when
            `class_embed_type="projection"`.
        controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`):
            The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
        conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`):
            The tuple of output channel for each block in the `conditioning_embedding` layer.
        global_pool_conditions (`bool`, defaults to `False`):
            TODO(Patrick) - unused parameter.
        addition_embed_type_num_heads (`int`, defaults to 64):
            The number of heads to use for the `TextTimeEmbedding` layer.
    TNin_channelsr3   flip_sin_to_cos
freq_shiftdown_block_types.mid_block_typeonly_cross_attentionr4   layers_per_blockdownsample_paddingmid_block_scale_factoract_fnnorm_num_groupsnorm_epscross_attention_dimtransformer_layers_per_blockencoder_hid_dimencoder_hid_dim_typeattention_head_dimnum_attention_headsuse_linear_projectionclass_embed_typeaddition_embed_typeaddition_time_embed_dimnum_class_embedsupcast_attentionresnet_time_scale_shift%projection_class_embeddings_input_dim%controlnet_conditioning_channel_order#conditioning_embedding_out_channelsglobal_pool_conditionsaddition_embed_type_num_headsc!                 
   t         /|           |xs |}t        |      t        |      k7  rt        d| d| d      t	        |t
              s)t        |      t        |      k7  rt        d| d| d      t	        |t              s)t        |      t        |      k7  rt        d| d| d      t	        |t              r|gt        |      z  }d}!|!dz
  dz  }"t        j                  ||d	   |!|"
      | _	        |d	   dz  }#t        |d	   ||      | _        |d	   }$t        |$|#|      | _        |+|)d}| j                  |       t        j!                  d       ||t        d| d      |dk(  rt        j"                  ||      | _        n1|dk(  rt'        |||      | _        n|t        d| d      d | _        ||t        j(                  ||#      | _        nc|dk(  rt        |$|#      | _        nL|dk(  rt        j,                  |#|#      | _        n+|dk(  r|t        d      t        ||#      | _        nd | _        |dk(  r||}%n|}%t/        |%|#|       | _        nS|dk(  rt3        |||#      | _        n:|dk(  r$t        |||      | _        t        ||#      | _        n|t        d| d      t7        |d	   ||       | _        t        j:                  g       | _        t        j:                  g       | _        t	        |t
              r|gt        |      z  }t	        |t              r|ft        |      z  }t	        |t              r|ft        |      z  }|d	   }&t        j                  |&|&d!      }'tA        |'      }'| j>                  jC                  |'       tE        |      D ]  \  }(})|&}*||(   }&|(t        |      dz
  k(  }+tG        |)fi d"|	d#||(   d$|*d%|&d&|#d'|+ d(|d)|d*|d+|d,||(   d-||(   ||(   n|&d.|
d/|d0||(   d1|d2|},| j<                  jC                  |,       tI        |	      D ]@  }-t        j                  |&|&d!      }'tA        |'      }'| j>                  jC                  |'       B |+rt        j                  |&|&d!      }'tA        |'      }'| j>                  jC                  |'        |d3   }.t        j                  |.|.d!      }'tA        |'      }'|'| _%        |d4k(  r#tM        |d3   |.|#||||||d3   |||5      | _'        y |d6k(  rtQ        |d3   |#d	|||||d78	      | _'        y t        d9|       ):NzbMust provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: z. `down_block_types`: .zfMust provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: zdMust provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: r   r6   r   r   r7      )rg   	text_proj)rm   zMencoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.zJ`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to text_image_proj)text_embed_dimimage_embed_dimrj   zencoder_hid_dim_type: z0 must be None, 'text_proj' or 'text_image_proj'.timestepidentity
projectionzX`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be settext)	num_heads
text_image)r   r   time_embed_dim	text_timezaddition_embed_type: z& must be None, 'text' or 'text_image'.)r2   r4   r3   )r8   
num_layersrk   r^   out_channelstemb_channelsadd_downsample
resnet_epsresnet_act_fnresnet_groupsrj   ro   rn   re   rp   rc   ru   rv   r;   r   )rk   r^   r   r   r   output_scale_factorrv   rj   ro   r   rp   ru   r   F)	r^   r   r   r   r   r   r   rv   add_attentionzunknown mid_block_type : ))r<   r=   rC   
ValueError
isinstanceboolrY   r
   r>   r?   r   	time_projr   time_embeddingr   loggerinfoLinearencoder_hid_projr   	Embeddingclass_embeddingIdentityr   add_embeddingr   add_time_projr1   controlnet_cond_embeddingr@   down_blockscontrolnet_down_blocksrE   rD   	enumerater    rB   controlnet_mid_blockr   	mid_blockr   )0rG   r^   r3   r_   r`   ra   rb   rc   r4   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   ro   rp   rq   rr   rs   rt   ru   rv   rw   rx   ry   rz   r{   conv_in_kernelconv_in_paddingr   timestep_input_dimtext_time_embedding_from_dimoutput_channelcontrolnet_blockrH   down_block_typeinput_channelis_final_block
down_block_mid_block_channelrK   s0                                                  r/   r=   zControlNetModel.__init__   s+   R 	 2G5G !"c*:&;;t  vH  uI  I_  `p  _q  qr  s  .5#>R:SWZ[kWl:lx  zN  yO  Oe  fv  ew  wx  y  -s3<O8PTWXhTi8iv  xK  wL  Lb  cs  bt  tu  v  2C8,H+ICP`La+a( )A-!3yy+A.NTc

 ,A.2"#5a#8/:V/2/
  'O,G#. ##9M#NKKgh"';'G\]q\rrst   ;.$&IIo?R$SD!!%66 %8. 3$7%D! "-()=(>>no  %)D! #(8(D#%<<0@.#QD +#45G#XD +#%;;~~#ND -4< n  $55Z\j#kD #'D &(*/>,/B,!2,nHe"D !L0 "82DWhv"D !K/!*+BOU_!`D!23XZh!iD ,45H4IIopqq *I,>q,AB"7*
& ==,&(mmB&7#*D1$8#9C@P<Q#Q (#."4!6=M9N!N)3/#6"83?O;P"P ,A.99^^QRS&'78##**+;<"+,<"= #	EA*M/2N#&8"9A"==N'+ .J!-L *	
 , - $21 $ % . %8 %8$: =Oq<Q<]#5a#8cq $6 '<  &:!%<!" "2#$ )@%J( ##J/+, E#%99^^YZ#[ #./?#@ ++223CDE
 "#%99^^YZ#[ #./?#@ ++223CDG#	EL /r299%68IWXY&'78$4!664-I"-M-,#$$:(?$7$7$;-&;!1DN //+.r2,#$$:-(?#
DN 88HIJJr.   unetload_weights_from_unetc                    d|j                   v r|j                   j                  nd}d|j                   v r|j                   j                  nd}d|j                   v r|j                   j                  nd}d|j                   v r|j                   j                  nd}	d|j                   v r|j                   j
                  nd}
 | d"i d|d|d|	d|
d|d|j                   j                  d	|j                   j                  d
|j                   j                  d|j                   j                  d|j                   j                  d|j                   j                  d|j                   j                  d|j                   j                  d|j                   j                  d|j                   j                  d|j                   j                   d|j                   j"                  d|j                   j$                  d|j                   j&                  d|j                   j(                  d|j                   j*                  d|j                   j,                  d|j                   j.                  d|j                   j0                  d|j                   j2                  d|j                   j4                  d|j                   j6                  d|d|d |}|r}|j8                  j;                  |j8                  j=                                |j>                  j;                  |j>                  j=                                |j@                  j;                  |j@                  j=                                |jB                  r3|jB                  j;                  |jB                  j=                                tE        |d!      r3|jF                  j;                  |jF                  j=                                |jH                  j;                  |jH                  j=                                |jJ                  j;                  |jJ                  j=                                |S )#a)  
        Instantiate a [`ControlNetModel`] from [`UNet2DConditionModel`].

        Parameters:
            unet (`UNet2DConditionModel`):
                The UNet model weights to copy to the [`ControlNetModel`]. All configuration options are also copied
                where applicable.
        rk   r6   rl   Nrm   rr   rs   r^   r_   r`   ra   rc   r4   rd   re   rf   rg   rh   ri   rj   rn   ro   rp   rq   rt   ru   rv   rw   rb   rx   ry   r3   r   r-   )&configrk   rl   rm   rr   rs   r^   r_   r`   ra   rc   r4   rd   re   rf   rg   rh   ri   rj   rn   ro   rp   rq   rt   ru   rv   rw   rb   r?   load_state_dict
state_dictr   r   r   hasattrr   r   r   )clsr   rx   ry   r   r3   rk   rl   rm   rr   rs   
controlnets               r/   	from_unetzControlNetModel.from_unet  s   $ 9WZ^ZeZe8eDKK44kl 	% :Kdkk9Y$++55_cCY]a]h]hCht{{??nrAVZ^ZeZeAedkk==ko3LPTP[P[3[DKK//ae 	   
+
!5
 !4
 %<	

 *F
 //
 !KK77
 {{--
 "[[99
 "&!A!A
  ${{==
 "[[99
  ${{==
 $(;;#E#E
 ;;%%
  !KK77!
" [[))#
$ !% ? ?%
&  ${{=='
( !% ? ?)
* #'++"C"C+
, "[[99-
. "[[99/
0 "[[991
2 %)KK$G$G3
4 37++2c2c5
6  ;;557
8 3X9
: 1T;
< #8=

B "..t||/F/F/HI  001J1J1LM%%55d6I6I6T6T6VW))**::4;O;O;Z;Z;\]z?3((889K9K9V9V9XY""2243C3C3N3N3PQ  001J1J1LMr.   returnc                     i }dt         dt        j                  j                  dt        t         t
        f   ffd| j                         D ]  \  }} |||        |S )z
        Returns:
            `dict` of attention processors: A dictionary containing all attention processors used in the model with
            indexed by its weight name.
        namemodule
processorsc                     t        |d      r|j                         ||  d<   |j                         D ]  \  }} |  d| ||        |S )Nget_processor
.processorr}   )r   r   named_children)r   r   r   sub_namechildfn_recursive_add_processorss        r/   r   zDControlNetModel.attn_processors.<locals>.fn_recursive_add_processors  sd    v/282F2F2H
dV:./#)#8#8#: U%+tfAhZ,@%TU r.   )strr*   r
   Moduler   r   r   )rG   r   r   r   r   s       @r/   attn_processorszControlNetModel.attn_processors  sm     
	c 	588?? 	X\]`bt]tXu 	 !//1 	BLD&'fjA	B r.   	processorc           	      T   t        | j                  j                               }t        |t              r,t        |      |k7  rt        dt        |       d| d| d      dt        dt        j                  j                  ffd| j                         D ]  \  }} |||        y)	a4  
        Sets the attention processor to use to compute attention.

        Parameters:
            processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
                The instantiated processor class or a dictionary of processor classes that will be set as the processor
                for **all** `Attention` layers.

                If `processor` is a dict, the key needs to define the path to the corresponding cross attention
                processor. This is strongly recommended when setting trainable attention processors.

        z>A dict of processors was passed, but the number of processors z0 does not match the number of attention layers: z. Please make sure to pass z processor classes.r   r   c                     t        |d      rEt        |t              s|j                  |       n#|j                  |j	                  |  d             |j                         D ]  \  }} |  d| ||        y )Nset_processorr   r}   )r   r   dictr   popr   )r   r   r   r   r   fn_recursive_attn_processors        r/   r   zGControlNetModel.set_attn_processor.<locals>.fn_recursive_attn_processor5  sx    v/!)T2((3(($z7J)KL#)#8#8#: T%+tfAhZ,@%STr.   N)rC   r   keysr   r   r   r   r*   r
   r   r   )rG   r   countr   r   r   s        @r/   set_attn_processorz"ControlNetModel.set_attn_processor   s     D((--/0i&3y>U+BPQTU^Q_P` a005w6QRWQXXkm 
	Tc 	T588?? 	T !//1 	ALD&'fi@	Ar.   c           	      j   t        d | j                  j                         D              rt               }nmt        d | j                  j                         D              rt	               }n8t        dt        t        | j                  j                                            | j                  |       y)ze
        Disables custom attention processors and sets the default attention implementation.
        c              3   @   K   | ]  }|j                   t        v   y wrM   )rK   r   .0procs     r/   	<genexpr>z=ControlNetModel.set_default_attn_processor.<locals>.<genexpr>G  s     i4t~~!>>i   c              3   @   K   | ]  }|j                   t        v   y wrM   )rK   r   r   s     r/   r   z=ControlNetModel.set_default_attn_processor.<locals>.<genexpr>I  s     h$#==hr   zOCannot call `set_default_attn_processor` when attention processors are of type N)	allr   valuesr   r   r   nextiterr   )rG   r   s     r/   set_default_attn_processorz*ControlNetModel.set_default_attn_processorC  s     i4K_K_KfKfKhii,.Ih$J^J^JeJeJghh%Iabfgklp  mA  mA  mH  mH  mJ  hK  cL  bM  N  		*r.   
slice_sizec                    	
 g 
dt         j                  j                  f
fd| j                         D ]
  } |        t	        
      }|dk(  r
D cg c]  }|dz  	 }}n|dk(  r|dgz  }t        |t              s||gz  n|}t	        |      t	        
      k7  r=t        dt	        |       d| j                   d	t	        
       d
t	        
       d	      t        t	        |            D ]&  }||   }
|   }|||kD  st        d| d| d       dt         j                  j                  dt        t           f	fd	t        t        |            }| j                         D ]  } 	||        yc c}w )a  
        Enable sliced attention computation.

        When this option is enabled, the attention module splits the input tensor in slices to compute attention in
        several steps. This is useful for saving some memory in exchange for a small decrease in speed.

        Args:
            slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
                When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
                `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
                provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
                must be a multiple of `slice_size`.
        r   c                     t        | d      rj                  | j                         | j                         D ]
  } |        y Nset_attention_slice)r   rD   sliceable_head_dimchildren)r   r   $fn_recursive_retrieve_sliceable_dimssliceable_head_dimss     r/   r   zQControlNetModel.set_attention_slice.<locals>.fn_recursive_retrieve_sliceable_dimsc  s@    v45#**6+D+DE* <4U;<r.   autor   maxr6   zYou have provided z, but z has zH different attention layers. Make sure to match `len(slice_size)` to be r}   Nzsize z has to be smaller or equal to r   c                     t        | d      r| j                  |j                                | j                         D ]  } ||        y r   )r   r   r   r   )r   r   r    fn_recursive_set_attention_slices      r/   r   zMControlNetModel.set_attention_slice.<locals>.fn_recursive_set_attention_slice  sE    v45**:>>+;<* D0
CDr.   )r*   r
   r   r   rC   r   listr   r   rB   r   rY   reversed)rG   r   r   num_sliceable_layersdimrH   sizereversed_slice_sizer   r   r   s           @@@r/   r   z#ControlNetModel.set_attention_sliceS  s    !	< 	< mmo 	9F08	9  ##67 /BBs#(BJB5 -3J@J:W[@\)ZL8bl
z?c"566$S_$5VDKK=cReNfMg hQQTUhQiPjjkm 
 s:' 	VAa=D%a(CD3J 5.McURS!TUU		V	DUXX__ 	DRVWZR[ 	D #8J#78mmo 	JF,V5HI	J= Cs   E;sampler   encoder_hidden_statescontrolnet_condconditioning_scaleclass_labelstimestep_condattention_maskadded_cond_kwargscross_attention_kwargs
guess_modereturn_dictc                 P   | j                   j                  }|dk(  rn,|dk(  rt        j                  |dg      }nt	        d|       |2d|j                  |j                        z
  dz  }|j                  d      }|}t        j                  |      s|j                  j                  dk(  }|j                  j                  d	k(  }t        |t              r%|s|rt        j                  nt        j                  }n$|s|rt        j                  nt        j                   }t        j"                  |g||j                  
      }n6t%        |j&                        dk(  r|d   j                  |j                        }|j)                  |j&                  d         }| j+                  |      }|j                  |j                        }| j-                  ||      }d}| j.                  g|t	        d      | j                   j0                  dk(  r| j+                  |      }| j/                  |      j                  | j                        }||z   }| j                   j2                  "| j                   j2                  dk(  r| j5                  |      }n| j                   j2                  dk(  rd|	vrt	        | j6                   d      |	j9                  d      }d|	vrt	        | j6                   d      |	j9                  d      }| j;                  |j=                               }|j?                  |j&                  d   df      }t        j@                  ||gd      }|j                  |j                        }| j5                  |      }|||z   n|}| jC                  |      }| jE                  |      }||z   }|f}| jF                  D ]=  }tI        |d      r|jJ                  r ||||||
      \  }}n |||      \  }}||z  }? | jL                  UtI        | jL                  d      r-| jL                  jJ                  r| jM                  |||||
      }n| jM                  ||      }d}tO        || jP                        D ]  \  }} ||      }||fz   } |}| jS                  |      } |rt| j                   jT                  s^t        jV                  ddt%        |      dz   |j                        }!|!|z  }!tO        ||!      D "cg c]
  \  }}"||"z   }}}"| |!d   z  } n|D cg c]  }||z  	 }}| |z  } | j                   jT                  r=|D cg c]  }t        jX                  |dd       }}t        jX                  | dd      } |s|| fS t[        ||        S c c}"}w c c}w c c}w )!a(
  
        The [`ControlNetModel`] forward method.

        Args:
            sample (`torch.Tensor`):
                The noisy input tensor.
            timestep (`Union[torch.Tensor, float, int]`):
                The number of timesteps to denoise an input.
            encoder_hidden_states (`torch.Tensor`):
                The encoder hidden states.
            controlnet_cond (`torch.Tensor`):
                The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
            conditioning_scale (`float`, defaults to `1.0`):
                The scale factor for ControlNet outputs.
            class_labels (`torch.Tensor`, *optional*, defaults to `None`):
                Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
            timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
                Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the
                timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep
                embeddings.
            attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
                An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
                is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
                negative values to the attention scores corresponding to "discard" tokens.
            added_cond_kwargs (`dict`):
                Additional conditions for the Stable Diffusion XL UNet.
            cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
                A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
            guess_mode (`bool`, defaults to `False`):
                In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if
                you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended.
            return_dict (`bool`, defaults to `True`):
                Whether or not to return a [`~models.controlnets.controlnet.ControlNetOutput`] instead of a plain
                tuple.

        Returns:
            [`~models.controlnets.controlnet.ControlNetOutput`] **or** `tuple`:
                If `return_dict` is `True`, a [`~models.controlnets.controlnet.ControlNetOutput`] is returned,
                otherwise a tuple is returned where the first element is the sample tensor.
        rgbbgrr6   )dimsz1unknown `controlnet_conditioning_channel_order`: Ng     mpsnpu)dtypedevicer   )r  z9class_labels should be provided when num_class_embeds > 0r   r   r   text_embedsz has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`time_idsz has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`r;   )r   has_cross_attention)hidden_statestembr   r   r   )r  r  )r   r   r   r-   )r	  )r   r   T)r   keepdim)r$   r%   ).r   rx   r*   flipr   tor  	unsqueeze	is_tensorr	  typer   floatfloat32float64int32int64tensorrC   shapeexpandr   r   r   rq   rr   r   rK   getr   flattenreshapeconcatr?   r   r   r   r  r   zipr   r   rz   logspacemeanr#   )#rG   r   r   r   r   r   r   r   r   r   r   r   r  channel_order	timestepsis_mpsis_npur  t_embembaug_emb	class_embr
  r  time_embeds
add_embedsr$   downsample_blockres_samples!controlnet_down_block_res_samplesdown_block_res_sampler   r%   scalesscales#                                      r/   rS   zControlNetModel.forward  s   p IIE!e##jjsCOPQ^P_`aa %."3"3FLL"AAXMN+55a8N 	y) ]]''50F]]''50F(E**0F(.&u{{i[fmmTI!Q&!$**6==9I $$V\\!_5	y)
 v||,!!%7+# !\]]{{++z9#~~l;,,\:==DJJ=OI	/C;;**6{{..&8,,-BC00K? (99$>>*  +  @  033MB%66$>>*  +|  }  -00<"001A1A1CD)11;3D3DQ3G2LM"\\;*D"M
']]3995
,,Z8&2cGm f%88I/) #) $ 0 0 	2')>?DTDhDh&6"(*?#1+A'# '7VRU&V#"k1"	2 >>%t~~'<=$..BdBd*?#1+A (  4 -/)7:;QSWSnSn7o 	m3!#3$45J$K!0QUjTl0l-	m "C#88@ dkk@@^^B3/E+F+JSYS`S`aF00FJMNdflJm%nfun%n"%n#7&*#D Pf%gff/A&A%g"%g#7:L#L ;;--Ka&AG

6vt<&" & $)::.BX\#] *,@AA#9Pd
 	
 &o &h&s    VVV#) r~   r   Tr   )CrossAttnDownBlock2Dr4  r4  DownBlock2Dr   F)i@  i     r6  r   r6   r6   rO   rV   gh㈵>r6  r6   NN   NFNNNNFdefaultNr  rT   F@   )r  rT   Tr   )g      ?NNNNNFT)r&   r'   r(   r)    _supports_gradient_checkpointingr   rY   r   r   r   r   r	   r  r=   classmethodr!   r   propertyr   r   r   r   r   r   r   r*   r+   r   r#   rS   rZ   r[   s   @r/   r]   r]   m   s   BH (,$ %& $-
 )B9>.D !"#())+#'DE)-.2:;EI&+*.-115*.!&'0?C5:IZ',-/MCKCK  #CK 	CK
 CK  S/CK !CK $D%+$56CK "#s(OCK CK   !CK" !&#CK$ %CK& "#'CK( )CK* !+CK, ',CsCx,@&A-CK. "#/CK0 'sm1CK2 "#uS#X"673CK4 &eCsCx,@&AB5CK6  $7CK8 #3-9CK: &c];CK< "*#=CK> #3-?CK@ ACKB "%CCKD 08}ECKF 03GCKH .6eCHo-FICKJ !%KCKL (+MCK CKJ  6;IZ'+%&I"I 03I .6eCHo-F	I
 !%I  #I IV c+=&=!>  0 AE2Dd3PbKbFc2c,d  AF+ ?JeCd3i4G.H ?JT ?JN %(/30415?C;?  I
I
 eS01I
  %||	I

 I
 "I
 u||,I
  -I
 !.I
 $Dell):$;<I
 !)c3h 8I
 I
 I
 
uU\\3->'?'M!NN	OI
r.   r]   c                 n    | j                         D ]!  }t        j                  j                  |       # | S rM   )
parametersr
   initzeros_)r   ps     r/   rE   rE   `  s/      
qMr.   )2dataclassesr   typingr   r   r   r   r   r	   r*   r
   torch.nnr   rN   configuration_utilsr   r   loaders.single_file_modelr   utilsr   r   attention_processorr   r   r   r   r   
embeddingsr   r   r   r   r   modeling_utilsr   unets.unet_2d_blocksr   r   r    unets.unet_2d_conditionr!   
get_loggerr&   r   r#   r   r1   r]   rE   r-   r.   r/   <module>rN     s    " : :   $ B ? (  v u ' 
 ; 
		H	% 'z ' '&*bii *Zp
j+/E p
fr.   