
    bi             	       z   d dl mZ d dlmZmZmZmZmZmZ d dl	Z	d dl
mZ d dlZ	ddlmZmZ ddlmZmZ ddlmZ ddlmZmZmZmZmZmZ d	d
lmZ d	dlmZm Z m!Z!m"Z"m#Z#m$Z$m%Z% d	dl&m'Z'm(Z(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0 d	dl1m2Z2 ddl3m4Z4m5Z5m6Z6  ejn                  e8      Z9e G d de             Z: G d de2eeee      Z;y)    )	dataclass)AnyDictListOptionalTupleUnionN   )ConfigMixinregister_to_config)PeftAdapterMixinUNet2DConditionLoadersMixin)FromOriginalModelMixin)USE_PEFT_BACKEND
BaseOutput	deprecateloggingscale_lora_layersunscale_lora_layers   )get_activation)ADDED_KV_ATTENTION_PROCESSORSCROSS_ATTENTION_PROCESSORS	AttentionAttentionProcessorAttnAddedKVProcessorAttnProcessorFusedAttnProcessor2_0)
GaussianFourierProjectionGLIGENTextBoundingboxProjectionImageHintTimeEmbeddingImageProjectionImageTimeEmbeddingTextImageProjectionTextImageTimeEmbeddingTextTimeEmbeddingTimestepEmbedding	Timesteps)
ModelMixin   )get_down_blockget_mid_blockget_up_blockc                   4    e Zd ZU dZdZej                  ed<   y)UNet2DConditionOutputa  
    The output of [`UNet2DConditionModel`].

    Args:
        sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
            The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
    Nsample)__name__
__module____qualname____doc__r0   torchTensor__annotations__     c/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/diffusers/models/unets/unet_2d_condition.pyr/   r/   :   s      FELLr9   r/   c            b           e Zd ZdZdZg dZdgZdgZe	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dade	e
eeeef   f      deded	ed
ededee   de	e   dee   de
eee   f   dee   de
eee   f   dedededede	e   dede
eee   f   de
eee   ee   f   de	eee         de	e   de	e   de
eee   f   de	e
eee   f      ded ed!e	e   d"e	e   d#e	e   d$e	e   d%ed&ed'ed(ed)ed*e	e   d+e	e   d,e	e   d-e	e   d.ed/ed0e	e   d1ed2ed3e	e   d4e	e   d5ef` fd6       Zdee   dee   de
eee   f   dee   de
eee   f   de
eee   f   de
eee   eee      f   dedede	e
eee   f      fd7Zd)eded
eded*ed8eeef   fd9Zde	e   de
eee   f   de	e   fd:Zd!e	e   ded$e	e   d0e	e   d;ed<efd=Zd"ed5ed#e	e   d
edede	e   de	e   d0e	e   d;efd>Zd1edefd?Zed8eeef   fd@       ZdAe
eeeef   f   fdBZdC ZdbdDe
eeee   f   fdEZdFedGedHedIefdJZdK Z dL Z!dM Z"dNe#jH                  dOe
e#jH                  eef   d8e	e#jH                     fdPZ%dNe#jH                  dQe	e#jH                     d8e	e#jH                     fdRZ&dSe#jH                  dTe#jH                  dUeee'f   d8e	e#jH                     fdVZ(dTe#jH                  dUeee'f   d8e#jH                  fdWZ)	 	 	 	 	 	 	 	 	 	 dcdNe#jH                  dOe
e#jH                  eef   dTe#jH                  dQe	e#jH                     dXe	e#jH                     dYe	e#jH                     dZe	eee'f      dUe	eee#jH                  f      d[e	ee#jH                        d\e	e#jH                     d]e	ee#jH                        d^e	e#jH                     d_ed8e
e*ef   fd`Z+ xZ,S )dUNet2DConditionModela  
    A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
    shaped output.

    This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
    for all models (such as downloading or saving).

    Parameters:
        sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
            Height and width of input/output sample.
        in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
        out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
        center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
        flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
            Whether to flip the sin to cos in the time embedding.
        freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
        down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
            The tuple of downsample blocks to use.
        mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`):
            Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or
            `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.
        up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
            The tuple of upsample blocks to use.
        only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
            Whether to include self-attention in the basic transformer blocks, see
            [`~models.attention.BasicTransformerBlock`].
        block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
            The tuple of output channels for each block.
        layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
        downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
        mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
        dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
        act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
        norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
            If `None`, normalization and activation layers is skipped in post-processing.
        norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
        cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
            The dimension of the cross attention features.
        transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1):
            The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
            [`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`],
            [`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
        reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None):
            The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling
            blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for
            [`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`],
            [`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
        encoder_hid_dim (`int`, *optional*, defaults to None):
            If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
            dimension to `cross_attention_dim`.
        encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
            If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
            embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
        attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
        num_attention_heads (`int`, *optional*):
            The number of attention heads. If not defined, defaults to `attention_head_dim`
        resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
            for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
        class_embed_type (`str`, *optional*, defaults to `None`):
            The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
            `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
        addition_embed_type (`str`, *optional*, defaults to `None`):
            Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
            "text". "text" will use the `TextTimeEmbedding` layer.
        addition_time_embed_dim: (`int`, *optional*, defaults to `None`):
            Dimension for the timestep embeddings.
        num_class_embeds (`int`, *optional*, defaults to `None`):
            Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
            class conditioning with `class_embed_type` equal to `None`.
        time_embedding_type (`str`, *optional*, defaults to `positional`):
            The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
        time_embedding_dim (`int`, *optional*, defaults to `None`):
            An optional override for the dimension of the projected time embedding.
        time_embedding_act_fn (`str`, *optional*, defaults to `None`):
            Optional activation function to use only once on the time embeddings before they are passed to the rest of
            the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
        timestep_post_act (`str`, *optional*, defaults to `None`):
            The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
        time_cond_proj_dim (`int`, *optional*, defaults to `None`):
            The dimension of `cond_proj` layer in the timestep embedding.
        conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.
        conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.
        projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when
            `class_embed_type="projection"`. Required when `class_embed_type="projection"`.
        class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
            embeddings with the class embeddings.
        mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
            Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If
            `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the
            `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`
            otherwise.
    T)BasicTransformerBlockResnetBlock2DCrossAttnUpBlock2D	UpBlock2Dnormr=   sample_sizein_channelsout_channelscenter_input_sampleflip_sin_to_cos
freq_shiftdown_block_typesmid_block_typeup_block_typesonly_cross_attentionblock_out_channelslayers_per_blockdownsample_paddingmid_block_scale_factordropoutact_fnnorm_num_groupsnorm_epscross_attention_dimtransformer_layers_per_block$reverse_transformer_layers_per_blockencoder_hid_dimencoder_hid_dim_typeattention_head_dimnum_attention_headsdual_cross_attentionuse_linear_projectionclass_embed_typeaddition_embed_typeaddition_time_embed_dimnum_class_embedsupcast_attentionresnet_time_scale_shiftresnet_skip_time_actresnet_out_scale_factortime_embedding_typetime_embedding_dimtime_embedding_act_fntimestep_post_acttime_cond_proj_dimconv_in_kernelconv_out_kernel%projection_class_embeddings_input_dimattention_typeclass_embeddings_concatmid_block_only_cross_attentioncross_attention_normaddition_embed_type_num_headsc1                 	   t         E|           || _        |t        d      |xs |}| j	                  ||	|
|||||||
       |)dz
  dz  }1t        j                  ||d   |)|1      | _        | j                  |$||||%      \  }2}3t        |3|2||'|(      | _
        | j                  |||	       | j                  ||||+|2|3
       | j                  ||0||||||+|2	       |&d | _        nt        |&      | _        t        j                   g       | _        t        j                   g       | _        t'        |
t(              r|.|
}.|
gt+        |      z  }
|.d}.t'        |t,              r|ft+        |      z  }t'        |t,              r|ft+        |      z  }t'        |t,              r|ft+        |      z  }t'        |t,              r|gt+        |      z  }t'        |t,              r|gt+        |      z  }|-r|2dz  }4n|2}4|d   }5t/        |      D ]  \  }6}7|5}8||6   }5|6t+        |      dz
  k(  }9t1        |7fi d||6   d||6   d|8d|5d|4d|9 d|d|d|d||6   d||6   d|d|d|d|
|6   d| d|!d|,d|"d |#d!|/d"||6   ||6   n|5d#|}:| j"                  j3                  |:        t5        |fi d|4d|d$   d|d|d|d%|d|d$   d|d$   d|d$   d|d|d&|.d| d|!d|,d|"d!|/d"|d$   d#|| _        d| _        t;        t=        |            };t;        t=        |            }<t;        t=        |            }=t;        t=        |            }>|t;        t=        |            n|}?t;        t=        |
            }
|;d   }5t/        |	      D ]  \  }6}@|6t+        |      dz
  k(  }9|5}A|;|6   }5|;t?        |6dz   t+        |      dz
           }8|9sd'}B| xj8                  dz  c_        nd}BtA        @fi d|=|6   dz   d|?|6   d|8d|5d(Ad|4d)Bd|d|d*|6d|d|>|6   d|<|6   d|d|d|
|6   d| d|!d|,d|"d |#d!|/d"||6   ||6   n|5d#|}C| j$                  j3                  |C        |1t        jB                  |d   ||+      | _"        t        |      | _#        nd | _"        d | _#        |*dz
  dz  }Dt        j                  |d   ||*|D      | _$        | jK                  |,|,       y )-Na#  At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.)
rH   rJ   rK   rL   rM   rT   rU   rV   rY   rZ   r*   r   r   )kernel_sizepadding)rL   rF   rG   rf   )rQ   post_act_fncond_proj_dim)rT   rW   )rQ   r`   rl   time_embed_dimtimestep_input_dim)rq   r_   rT   rW   rF   rG   rl   rw   F
num_layersrU   rC   rD   temb_channelsadd_downsample
resnet_epsresnet_act_fnresnet_groupsrT   rZ   rN   r[   r\   rK   ra   rb   rm   rc   rd   rp   rY   rP   output_scale_factorro   Tprev_output_channeladd_upsampleresolution_idx)num_channels
num_groupseps)rm   rT   )&super__init__rB   
ValueError_check_confignnConv2dconv_in_set_time_projr'   time_embedding_set_encoder_hid_proj_set_class_embedding_set_add_embeddingtime_embed_actr   
ModuleListdown_blocks	up_blocks
isinstanceboollenint	enumerater+   appendr,   	mid_blocknum_upsamplerslistreversedminr-   	GroupNormconv_norm_outconv_actconv_out_set_pos_net_if_use_gligen)FselfrB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   ro   rp   rq   conv_in_paddingrw   rx   blocks_time_embed_dimoutput_channelidown_block_typeinput_channelis_final_block
down_blockreversed_block_out_channelsreversed_num_attention_headsreversed_layers_per_blockreversed_cross_attention_dim%reversed_transformer_layers_per_blockup_block_typer   r   up_blockconv_out_padding	__class__sF                                                                        r:   r   zUNet2DConditionModel.__init__   s   r 	&* v  2G5G 	-)!51- 3)E1U1 3 	 	
 *A-!3yy+A.NTc

 .2-@-@1+!1 .A .
** 0),
 	""  3+ 	# 	
 	!!-2W)1 	" 	
 	*G$; 3++!2W) 	  
	
 !("&D"01F"GD==,r**D1-51E.$8#9C@P<Q#Q )1-2*)3/#6"83?O;P"P(#."4!6=M9N!N)3/#6"83?O;P"P&, 01C8H4II2C8,H+ICP`La+a(" %3Q$6!$2! ,A."+,<"= 	0A*M/2N#&8"9A"==N'+A. .J!-L *	
 , 4 $21 $ % . %8$: %8$: $6 &: '<  &:!%<!" "2#$ )@%&  .'( &:)* )@+, &:-. =Oq<Q<]#5a#8cq/0  1J4 ##J/?	0D '
/
 +2.
  	

 !
 *
 !7
 *Fb)I
 !4B 7
 !4B 7
 "6
 #8
 ,J
 .
 %<
  *!
" "6#
$ "6%
&  2"5'
( )
0   '+84F+G&H#'+H5H,I'J$$(2B)C$D!'+H5H,I'J$ 4; 6785 	.
  $H-A$BC4Q7 ). 9 )	,A}#&8"9A"==N"08;N7AE3GYCZ]^C^8_`M "###q(#$#4Q7!; .SST-U *	
 , %8 4 * $ %  ! . %A$C %A$C &:  '<!" &:!%<#$ "2%& )@'(  .)* &:+, )@-. &:/0 =Oq<Q<]#5a#8cq12  3H6 NN!!(+S)	,X &!#/2T\"D +62DM "&D DM+a/A5		q!<_Vf
 	''~[n'or9   c                 N   t        |      t        |      k7  rt        d| d| d      t        |      t        |      k7  rt        d| d| d      t        |t              s)t        |      t        |      k7  rt        d| d| d      t        |
t              s)t        |
      t        |      k7  rt        d|
 d| d      t        |	t              s)t        |	      t        |      k7  rt        d|	 d| d      t        |t
              r)t        |      t        |      k7  rt        d	| d| d      t        |t              s)t        |      t        |      k7  rt        d
| d| d      t        |t
              r&|#|D ]  }t        |t
              st        d       y y y )Nz\Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: z. `up_block_types`: .zbMust provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: z. `down_block_types`: zfMust provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: zdMust provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: zbMust provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: zdMust provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: z^Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: zOMust provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.)r   r   r   r   r   r   )r   rH   rJ   rK   rL   rM   rT   rU   rV   rY   rZ   layer_number_per_blocks               r:   r   z"UNet2DConditionModel._check_config  s     C$77no  oA  AU  Vd  Ue  ef  g  !"c*:&;;t  vH  uI  I_  `p  _q  qr  s  .5#>R:SWZ[kWl:lx  zN  yO  Oe  fv  ew  wx  y  -s3<O8PTWXhTi8iv  xK  wL  Lb  cs  bt  tu  v  ,c2s;M7NRUVfRg7gt  vH  uI  I_  `p  _q  qr  s  )40S9L5MQTUeQf5fv  xK  wL  Lb  cs  bt  tu  v  *C0S9I5JcRbNc5cp  rB  qC  CY  Zj  Yk  kl  m  2D9>b>j*F x&4d;$%vwwx ?k9r9   returnc                    |dk(  r@|xs |d   dz  }|dz  dk7  rt        d| d      t        |dz  dd|      | _        |}||fS |dk(  r*|xs |d   d	z  }t        |d   ||      | _        |d   }||fS t        | d
      )Nfourierr   r   z2`time_embed_dim` should be divisible by 2, but is r   F)set_W_to_weightlogrF   
positional   zJ does not exist. Please make sure to use one of `fourier` or `positional`.)r   r   	time_projr(   )r   re   rL   rF   rG   rf   rw   rx   s           r:   r   z#UNet2DConditionModel._set_time_proj  s     )+/L3Ea3H13LN!Q& #UVdUeef!ghh6!#UWfDN "0 111 !L0/L3Ea3H13LN&'9!'<ozZDN!3A!6 111	 &''qr r9   c                 X   |+|)d}| j                  |       t        j                  d       ||t        d| d      |dk(  rt	        j
                  ||      | _        y |dk(  rt        |||      | _        y |dk(  rt        ||	      | _        y |t        d
| d      d | _        y )N	text_proj)rX   zMencoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.zJ`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to r   text_image_proj)text_embed_dimimage_embed_dimrT   
image_proj)r   rT   z`encoder_hid_dim_type`: z? must be None, 'text_proj', 'text_image_proj', or 'image_proj'.)	r   loggerinfor   r   Linearencoder_hid_projr$   r"   )r   rX   rT   rW   s       r:   r   z*UNet2DConditionModel._set_encoder_hid_proj8  s      'O,G#. ##9M#NKKgh"';'G\]q\rrst   ;.$&IIo?R$SD!!%66 %8. 3$7%D!
 "\1$3 /$7%D! "-*+?*@@  A  %)D!r9   rw   rx   c                 j   ||t        j                  ||      | _        y |dk(  rt        |||      | _        y |dk(  rt        j                  ||      | _        y |dk(  r|t        d      t        ||      | _        y |dk(  r)|t        d      t        j                  ||      | _        y d | _        y )Ntimestep)rQ   identity
projectionzX`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be setsimple_projectionz_`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set)r   	Embeddingclass_embeddingr'   Identityr   r   )r   r]   rQ   r`   rl   rw   rx   s          r:   r   z)UNet2DConditionModel._set_class_embedding`  s     #(8(D#%<<0@.#QD +#45G`f#gD +#%;;~~#ND -4< n  $55Z\j#kD !444< u  $&99-RTb#cD #'D r9   c
                 J   |dk(  r||}
n|}
t        |
|	|      | _        y |dk(  rt        |||	      | _        y |dk(  r$t        |||      | _        t        ||	      | _        y |dk(  rt        ||	      | _        y |dk(  rt        ||	      | _        y |t        d	| d
      y )Ntext)	num_heads
text_image)r   r   rw   	text_timeimage)r   rw   
image_hintz`addition_embed_type`: zK must be None, 'text', 'text_image', 'text_time', 'image', or 'image_hint'.)	r&   add_embeddingr%   r(   add_time_projr'   r#   r!   r   )r   r^   rq   r_   rF   rG   rT   rW   rl   rw   text_time_embedding_from_dims              r:   r   z'UNet2DConditionModel._set_add_embedding  s     &(*/>,/B,!2,nHe"D !L0 "82DWhv"D !K/!*+BOU_!`D!23XZh!iD G+!3Odr!sD L0!7hv!wD ,)*=)>  ?J  K  -r9   c                     |dv rMd}t        |t              r|}nt        |t        t        f      r|d   }|dk(  rdnd}t	        |||      | _        y y )N)gatedzgated-text-imagei   r   r   z	text-onlyz
text-image)positive_lenout_dimfeature_type)r   r   r   tupler    position_net)r   rm   rT   r   r   s        r:   r   z/UNet2DConditionModel._set_pos_net_if_use_gligen  sb    ::L-s32/$?215*8G*C;L ?)3FUa!D ;r9   c                     i }dt         dt        j                  j                  dt        t         t
        f   ffd| j                         D ]  \  }} |||        |S )z
        Returns:
            `dict` of attention processors: A dictionary containing all attention processors used in the model with
            indexed by its weight name.
        namemodule
processorsc                     t        |d      r|j                         ||  d<   |j                         D ]  \  }} |  d| ||        |S )Nget_processor
.processorr   )hasattrr   named_children)r   r   r   sub_namechildfn_recursive_add_processorss        r:   r   zIUNet2DConditionModel.attn_processors.<locals>.fn_recursive_add_processors  sd    v/282F2F2H
dV:./#)#8#8#: U%+tfAhZ,@%TU r9   )strr5   r   Moduler   r   r   )r   r   r   r   r   s       @r:   attn_processorsz$UNet2DConditionModel.attn_processors  sm     
	c 	588?? 	X\]`bt]tXu 	 !//1 	BLD&'fjA	B r9   	processorc           	      T   t        | j                  j                               }t        |t              r,t        |      |k7  rt        dt        |       d| d| d      dt        dt        j                  j                  ffd| j                         D ]  \  }} |||        y)	a4  
        Sets the attention processor to use to compute attention.

        Parameters:
            processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
                The instantiated processor class or a dictionary of processor classes that will be set as the processor
                for **all** `Attention` layers.

                If `processor` is a dict, the key needs to define the path to the corresponding cross attention
                processor. This is strongly recommended when setting trainable attention processors.

        z>A dict of processors was passed, but the number of processors z0 does not match the number of attention layers: z. Please make sure to pass z processor classes.r   r   c                     t        |d      rEt        |t              s|j                  |       n#|j                  |j	                  |  d             |j                         D ]  \  }} |  d| ||        y )Nset_processorr   r   )r   r   dictr   popr   )r   r   r   r   r   fn_recursive_attn_processors        r:   r   zLUNet2DConditionModel.set_attn_processor.<locals>.fn_recursive_attn_processor  sx    v/!)T2((3(($z7J)KL#)#8#8#: T%+tfAhZ,@%STr9   N)r   r   keysr   r   r   r   r5   r   r   r   )r   r   countr   r   r   s        @r:   set_attn_processorz'UNet2DConditionModel.set_attn_processor  s     D((--/0i&3y>U+BPQTU^Q_P` a005w6QRWQXXkm 
	Tc 	T588?? 	T !//1 	ALD&'fi@	Ar9   c           	      j   t        d | j                  j                         D              rt               }nmt        d | j                  j                         D              rt	               }n8t        dt        t        | j                  j                                            | j                  |       y)ze
        Disables custom attention processors and sets the default attention implementation.
        c              3   @   K   | ]  }|j                   t        v   y wN)r   r   .0procs     r:   	<genexpr>zBUNet2DConditionModel.set_default_attn_processor.<locals>.<genexpr>  s     i4t~~!>>i   c              3   @   K   | ]  }|j                   t        v   y wr  )r   r   r  s     r:   r  zBUNet2DConditionModel.set_default_attn_processor.<locals>.<genexpr>  s     h$#==hr  zOCannot call `set_default_attn_processor` when attention processors are of type N)	allr   valuesr   r   r   nextiterr   )r   r   s     r:   set_default_attn_processorz/UNet2DConditionModel.set_default_attn_processor  s     i4K_K_KfKfKhii,.Ih$J^J^JeJeJghh%Iabfgklp  mA  mA  mH  mH  mJ  hK  cL  bM  N  		*r9   
slice_sizec                    	
 g 
dt         j                  j                  f
fd| j                         D ]
  } |        t	        
      }|dk(  r
D cg c]  }|dz  	 }}n|dk(  r|dgz  }t        |t              s||gz  n|}t	        |      t	        
      k7  r=t        dt	        |       d| j                   d	t	        
       d
t	        
       d	      t        t	        |            D ]&  }||   }
|   }|||kD  st        d| d| d       dt         j                  j                  dt        t           f	fd	t        t        |            }| j                         D ]  } 	||        yc c}w )a  
        Enable sliced attention computation.

        When this option is enabled, the attention module splits the input tensor in slices to compute attention in
        several steps. This is useful for saving some memory in exchange for a small decrease in speed.

        Args:
            slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
                When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
                `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
                provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
                must be a multiple of `slice_size`.
        r   c                     t        | d      rj                  | j                         | j                         D ]
  } |        y Nset_attention_slice)r   r   sliceable_head_dimchildren)r   r   $fn_recursive_retrieve_sliceable_dimssliceable_head_dimss     r:   r  zVUNet2DConditionModel.set_attention_slice.<locals>.fn_recursive_retrieve_sliceable_dims  s@    v45#**6+D+DE* <4U;<r9   autor   maxr*   zYou have provided z, but z has zH different attention layers. Make sure to match `len(slice_size)` to be r   Nzsize z has to be smaller or equal to r  c                     t        | d      r| j                  |j                                | j                         D ]  } ||        y r  )r   r  r   r  )r   r  r    fn_recursive_set_attention_slices      r:   r  zRUNet2DConditionModel.set_attention_slice.<locals>.fn_recursive_set_attention_slice;  sE    v45**:>>+;<* D0
CDr9   )r5   r   r   r  r   r   r   r   configranger   r   r   )r   r  r   num_sliceable_layersdimr   sizereversed_slice_sizer  r  r  s           @@@r:   r  z(UNet2DConditionModel.set_attention_slice  s    !	< 	< mmo 	9F08	9  ##67 /BBs#(BJB5 -3J@J:W[@\)ZL8bl
z?c"566$S_$5VDKK=cReNfMg hQQTUhQiPjjkm 
 s:' 	VAa=D%a(CD3J 5.McURS!TUU		V	DUXX__ 	DRVWZR[ 	D #8J#78mmo 	JF,V5HI	J= Cs   E;s1s2b1b2c                     t        | j                        D ]9  \  }}t        |d|       t        |d|       t        |d|       t        |d|       ; y)aF  Enables the FreeU mechanism from https://huggingface.co/papers/2309.11497.

        The suffixes after the scaling factors represent the stage blocks where they are being applied.

        Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that
        are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.

        Args:
            s1 (`float`):
                Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
                mitigate the "oversmoothing effect" in the enhanced denoising process.
            s2 (`float`):
                Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
                mitigate the "oversmoothing effect" in the enhanced denoising process.
            b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
            b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
        r!  r"  r#  r$  N)r   r   setattr)r   r!  r"  r#  r$  r   upsample_blocks          r:   enable_freeuz!UNet2DConditionModel.enable_freeuF  sQ    $ "+4>>!: 	.A~ND"-ND"-ND"-ND"-		.r9   c                     h d}t        | j                        D ]3  \  }}|D ])  }t        ||      st        ||d      t	        ||d       + 5 y)zDisables the FreeU mechanism.>   r#  r$  r!  r"  N)r   r   r   getattrr&  )r   
freeu_keysr   r'  ks        r:   disable_freeuz"UNet2DConditionModel.disable_freeu^  sW    -
!*4>>!: 	5A~ 5>1-D1Q1]NAt45	5r9   c                 r   d| _         | j                  j                         D ]1  \  }}dt        |j                  j
                        v s(t        d       | j                  | _         | j                         D ]%  }t        |t              s|j                  d       ' | j                  t                      y)u1  
        Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
        are fused. For cross-attention modules, key and value projection matrices are fused.

        <Tip warning={true}>

        This API is 🧪 experimental.

        </Tip>
        NAddedzQ`fuse_qkv_projections()` is not supported for models having added KV projections.T)fuse)original_attn_processorsr   itemsr   r   r1   r   modulesr   r   fuse_projectionsr   r   )r   _attn_processorr   s       r:   fuse_qkv_projectionsz)UNet2DConditionModel.fuse_qkv_projectionsf  s     )-%!%!5!5!;!;!= 	vA~#n66??@@ !tuu	v )-(<(<%lln 	3F&),''T'2	3 	 5 78r9   c                 T    | j                   | j                  | j                          yy)u   Disables the fused QKV projection if enabled.

        <Tip warning={true}>

        This API is 🧪 experimental.

        </Tip>

        N)r1  r   )r   s    r:   unfuse_qkv_projectionsz+UNet2DConditionModel.unfuse_qkv_projections  s)     ((4##D$A$AB 5r9   r0   r   c                    |}t        j                  |      s|j                  j                  dk(  }|j                  j                  dk(  }t	        |t
              r%|s|rt         j                  nt         j                  }n$|s|rt         j                  nt         j                  }t        j                  |g||j                        }n6t        |j                        dk(  r|d    j                  |j                        }|j                  |j                  d         }| j                  |      }|j                  |j                         }|S )Nmpsnpu)dtypedevicer   r=  )r5   	is_tensorr>  typer   floatfloat32float64int32int64tensorr   shapetoexpandr   r=  )r   r0   r   	timestepsis_mpsis_npur=  t_embs           r:   get_time_embedz#UNet2DConditionModel.get_time_embed  s     	y) ]]''50F]]''50F(E**0F(.&u{{i[fmmTI!Q&!$**6==9I $$V\\!_5	y) v||,r9   class_labelsc                    d }| j                   ~|t        d      | j                  j                  dk(  r-| j	                  |      }|j                  |j                        }| j                  |      j                  |j                        }|S )Nz9class_labels should be provided when num_class_embeds > 0r   r?  )r   r   r  r]   r   rI  r=  )r   r0   rP  	class_embs       r:   get_class_embedz$UNet2DConditionModel.get_class_embed  s    	+# !\]]{{++z9#~~l;  ,V\\B,,\:==FLL=QIr9   embencoder_hidden_statesadded_cond_kwargsc                    d }| j                   j                  dk(  r| j                  |      }|S | j                   j                  dk(  rSd|vrt        | j                   d      |j                  d      }|j                  d|      }| j                  ||      }|S | j                   j                  dk(  rd|vrt        | j                   d      |j                  d      }d|vrt        | j                   d	      |j                  d      }| j                  |j                               }	|	j                  |j                  d
   df      }	t        j                  ||	gd      }
|
j                  |j                        }
| j                  |
      }|S | j                   j                  dk(  r@d|vrt        | j                   d      |j                  d      }| j                  |      }|S | j                   j                  dk(  rTd|vsd|vrt        | j                   d      |j                  d      }|j                  d      }| j                  ||      }|S )Nr   r   image_embedsz has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`text_embedsr   z has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`time_idsz has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`r   r   r  r   z has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`r   hintz has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`)r  r^   r   r   r   getr   flattenreshaperH  r5   concatrI  r=  )r   rT  rU  rV  aug_emb
image_embs	text_embsrY  rZ  time_embeds
add_embedsr\  s               r:   get_aug_embedz"UNet2DConditionModel.get_aug_embed  s    ;;**f4(()>?GZ Y [[,,<%66 ~~&  '}  ~  +..~>J)--m=RSI((J?GF E [[,,;$55 ~~&  '{  |  ,//>K!22 ~~&  'x  y  ),,Z8H,,X-=-=-?@K%--{/@/@/CR.HIK{K&@bIJ#syy1J((4G$ # [[,,7%66 ~~&  'x  y  +..~>J((4G  [[,,<%66&HY:Y ~~&  'I  J  +..~>J$((0D((T:Gr9   c                 ,   | j                   ,| j                  j                  dk(  r| j                  |      }|S | j                   Z| j                  j                  dk(  rAd|vrt        | j                   d      |j                  d      }| j                  ||      }|S | j                   Y| j                  j                  dk(  r@d|vrt        | j                   d      |j                  d      }| j                  |      }|S | j                   | j                  j                  dk(  rkd|vrt        | j                   d      t        | d	      r| j                  | j                  |      }|j                  d      }| j                  |      }||f}|S )
Nr   r   rX  z has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`r   z has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`ip_image_projz has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`text_encoder_hid_proj)r   r  rX   r   r   r]  r   ri  )r   rU  rV  rX  s       r:   process_encoder_hidden_statesz2UNet2DConditionModel.process_encoder_hidden_states  s      ,1Q1QU`1`$($9$9:O$P!< %$; "".4;;3S3SWh3h%66 ~~&  'C  D  -00@L$($9$9:OQ]$^!* %$) "".4;;3S3SWc3c%66 ~~&  '~    -00@L$($9$9,$G! %$ "".4;;3S3SWf3f%66 ~~&  'A  B  t45$:T:T:`(,(B(BCX(Y%,00@L00>L%:L$I!$$r9   timestep_condattention_maskcross_attention_kwargsdown_block_additional_residualsmid_block_additional_residual$down_intrablock_additional_residualsencoder_attention_maskreturn_dictc                 	   d| j                   z  }d}d}|j                  dd D ]  }||z  dk7  sd} n |2d|j                  |j                        z
  dz  }|j	                  d      }|2d|j                  |j                        z
  dz  }|j	                  d      }| j
                  j                  rd|z  d	z
  }| j                  ||
      }| j                  ||      }| j                  ||      }|5| j
                  j                  rt        j                  ||gd      }n||z   }| j                  |||      }| j
                  j                  dk(  r|\  }}t        j                  ||gd      }|||z   n|}| j                  | j                  |      }| j!                  ||      }| j#                  |      }|J|j%                  dd      8|j'                         }|j)                  d      }d | j*                  di |i|d<   |#|j'                         }|j)                  dd	      }nd	}t,        rt/        | |       |
duxr |	du}|du}|s|
|	t1        dddd       |	}d}|f}| j2                  D ]  }t5        |d      rE|j6                  r9i }|r"t9        |      dkD  r|j)                  d      |d<    |d||||||d|\  }}n1 |||      \  }}|r"t9        |      dkD  r||j)                  d      z  }||z  } |r#d}t;        ||	      D ]  \  } }!| |!z   } || fz   } |}| j<                  t5        | j<                  d      r.| j<                  j6                  r| j=                  ||||||      }n| j=                  ||      }|r>t9        |      dkD  r0|j                  |d   j                  k(  r||j)                  d      z  }|r||
z   }t?        | j@                        D ]  \  }"}#|"t9        | j@                        dz
  k(  }$|t9        |#jB                         d }|dt9        |#jB                          }|$s|r|d   j                  dd }t5        |#d      r|#j6                  r |#||||||||      } |#||||      } | jD                  r"| jE                  |      }| jG                  |      }| jI                  |      }t,        rtK        | |       |s|fS tM        |       S )!aK  
        The [`UNet2DConditionModel`] forward method.

        Args:
            sample (`torch.Tensor`):
                The noisy input tensor with the following shape `(batch, channel, height, width)`.
            timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input.
            encoder_hidden_states (`torch.Tensor`):
                The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
            class_labels (`torch.Tensor`, *optional*, defaults to `None`):
                Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
            timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
                Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
                through the `self.time_embedding` layer to obtain the timestep embeddings.
            attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
                An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
                is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
                negative values to the attention scores corresponding to "discard" tokens.
            cross_attention_kwargs (`dict`, *optional*):
                A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
                `self.processor` in
                [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
            added_cond_kwargs: (`dict`, *optional*):
                A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
                are passed along to the UNet blocks.
            down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
                A tuple of tensors that if specified are added to the residuals of down unet blocks.
            mid_block_additional_residual: (`torch.Tensor`, *optional*):
                A tensor that if specified is added to the residual of the middle unet block.
            down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
                additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
            encoder_attention_mask (`torch.Tensor`):
                A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
                `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
                which adds large negative values to the attention scores corresponding to "discard" tokens.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
                tuple.

        Returns:
            [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
                If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned,
                otherwise a `tuple` is returned where the first element is the sample tensor.
        r   FNr   Tr*   g           ?)r0   r   )r0   rP  r   r[  )rT  rU  rV  r   )rU  rV  gligenobjsscalez2T2I should not use down_block_additional_residualsz1.3.0a?  Passing intrablock residual connections with `down_block_additional_residuals` is deprecated                        and will be removed in diffusers 1.3.0.  `down_block_additional_residuals` should only be used                        for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. )standard_warnhas_cross_attentionadditional_residuals)hidden_statestembrU  rl  rm  rq  )r|  r}  r8   )rU  rl  rm  rq  )r|  r}  res_hidden_states_tuplerU  rm  upsample_sizerl  rq  )r|  r}  r~  r  )r0   )'r   rH  rI  r=  	unsqueezer  rE   rO  r   rS  rn   r5   catrf  r^   r   rj  r   r]  copyr   r   r   r   r   r   r   rz  r   zipr   r   r   resnetsr   r   r   r   r/   )%r   r0   r   rU  rP  rk  rl  rm  rV  rn  ro  rp  rq  rr  default_overall_up_factorforward_upsample_sizer  r  rN  rT  rR  ra  r\  gligen_args
lora_scaleis_controlnet
is_adapterdown_block_res_samplesdownsample_blockr{  res_samplesnew_down_block_res_samplesdown_block_res_sampledown_block_additional_residualr   r'  r   s%                                        r:   forwardzUNet2DConditionModel.forward  s   @ %&t':':$:! !&<<$ 	C..!3(,%		 %
  ."3"3FLL"AAXMN+55a8N "-&'*@*C*CFLL*Q&QU]%]"%;%E%Ea%H" ;;**Z#%F ##6H#E!!%7((\(R	 {{22iii 0b9Io$$+@Te % 
 ;;**l:#MGTYY~15F&2cGm*%%c*C $ B B"7K\ !C !

 f% "-2H2L2LXW[2\2h%;%@%@%B"044X>K068I8I8I8XK8X/Y"8,
 "-%;%@%@%B"/33GSAJJdJ/5TAqFemqFq9E
 ;CHgHsDn $ 4S0J"( $ 0 0 	2')>?DTDhDh')$#&J"Ka"OCgCkCklmCn()?@&6 '"(*?#1+A+A' +'# '7VRU&V##&J"Ka"OBFFqIIF"k1"+	2. )+&IL&(GJ cE%'E )>@^(^%-GK`Jb-b*	c &@" >>%t~~'<=$..BdBd*?#1+A+A (  4 <=ALL$H$K$Q$QQ>BB1EE;;F "+4>>!: 	A~#dnn"5"99N0#n6L6L2M1M1OPK%;<Zs>CYCY?Z>Z%[" "&; 6r : @ @ D~'<=.BdBd'"(,7*?+A"/#1+A	 ("(,7"/	/	> ''/F]]6*Fv&j19$F33r9   )0Nr   r   FTr   )CrossAttnDownBlock2Dr  r  DownBlock2DUNetMidBlock2DCrossAttn)r@   r?   r?   r?   F)i@  i     r  r   r*   r*   g        silu    gh㈵>r  r*   NNN   NFFNNNNFdefaultFru  r   NNNNr
   r
   Nr  FNN@   )r  )
NNNNNNNNNT)-r1   r2   r3   r4    _supports_gradient_checkpointing_no_split_modules _skip_layerwise_casting_patterns_repeated_blocksr   r   r	   r   r   r   r   rB  r   r   r   r   r   r   r   propertyr   r   r   r   r  r   r  r(  r-  r7  r9  r5   r6   rO  rS  r   rf  rj  r/   r  __classcell__)r   s   @r:   r<   r<   G   s   [z (,$e(.x$/0 >B$) $(
 )B%t9>)?34"#())+6:MNLP)-.256@D%*&+*.-115*.!&'0%*),#/,0/3+/,0 ?C'(-9=.2-/mypeCsCx$89:yp yp 	yp
 "yp yp yp  *yp !yp c
yp  $D%+$56!yp" "#J#yp$  U3Z0%yp&  'yp( !&)yp* +yp, -yp. "#/yp0 1yp2 #3c
?33yp4 ',CsU5\,I&J5yp6 /7uU3Z7H.I7yp8 "#9yp: 'sm;yp< "#uSz/2=yp> &eCsO&<=?yp@ #AypB  $CypD #3-EypF &c]GypH "*#IypJ #3-KypL MypN "%OypP #QypR "'SypT !UypV %SMWypX  (}YypZ $C=[yp\ %SM]yp^ _yp` aypb 08}cypd eypf "&gyph )1iypj 'smkypl (+myp ypv	2x*2x c
2x $D%+$56	2x
 "#J2x  U3Z02x #3c
?32x ',CsU5:=N,N&O2x /32x  2x &eCsO&<=2xh2 2  2 	2
 2  2 
sCx28&)&sm&) #3c
?3&) "#	&)P#("3-#( #( #3-	#(
 08}#( #(  #(J( ( (+( "*#	(
 ( ( &c]( "#( 08}( (T SV  c+=&=!>  . AE2Dd3PbKbFc2c,d  AD+?JeCd3i4G.H ?JB.u .% .U . .0592Cll.3ELL%4L.M	%,,	6ell (5<<BX ]efkfrfr]s  2<<28=2Y]^acf^fYg2	%,,	2h"%%*\\"%FJ3PS8n"%	"%R 040415;??CIM@DNR9= P4P4 eS01P4  %||	P4
 u||,P4  -P4 !.P4 !)c3h 8P4 $Dell):$;<P4 *2%2E)FP4 (0'=P4 /7uU\\7J.KP4 !) 6P4 P4 
$e+	,P4r9   r<   )<dataclassesr   typingr   r   r   r   r   r	   r5   torch.nnr   torch.utils.checkpointconfiguration_utilsr   r   loadersr   r   loaders.single_file_modelr   utilsr   r   r   r   r   r   activationsr   attention_processorr   r   r   r   r   r   r   
embeddingsr   r    r!   r"   r#   r$   r%   r&   r'   r(   modeling_utilsr)   unet_2d_blocksr+   r,   r-   
get_loggerr1   r   r/   r<   r8   r9   r:   <module>r     s    " : :    B D ? m m (     (  
		H	% 	 J 	  	 X435PRbX4r9   