
    bia                         d dl mZmZmZmZ d dlZd dlmZ ddlm	Z	m
Z
 ddlmZ ddlmZ ddlmZ ddlmZ d	d
lmZmZmZmZmZmZmZ d	dlmZ d	dlmZ ddlm Z m!Z!m"Z"m#Z#  G d dee	ee      Z$y)    )DictOptionalTupleUnionN   )ConfigMixinregister_to_config)PeftAdapterMixin)FromOriginalModelMixin)	deprecate)apply_forward_hook   )ADDED_KV_ATTENTION_PROCESSORSCROSS_ATTENTION_PROCESSORS	AttentionAttentionProcessorAttnAddedKVProcessorAttnProcessorFusedAttnProcessor2_0)AutoencoderKLOutput)
ModelMixin   )DecoderDecoderOutputDiagonalGaussianDistributionEncoderc            &       &    e Zd ZdZdZddgZe	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d7dededee	   dee	   d	ee   d
ede	dededede
dee
   deee
      deee
      dedededef$ fd       Zd8defdZd Zd Zd Zedee	ef   fd       Zdeeee	ef   f   fd Zd! Zd"ej4                  dej4                  fd#Ze	 d8d"ej4                  d$edeeee   f   fd%       Zd8d&ej4                  d$edee ej4                  f   fd'Z!e	 d9d&ejD                  d$edee ejD                  f   fd(       Z#d)ej4                  d*ej4                  d+edej4                  fd,Z$d)ej4                  d*ej4                  d+edej4                  fd-Z%d"ej4                  dej4                  fd.Z&d8d"ej4                  d$edefd/Z'd8d&ej4                  d$edee ej4                  f   fd0Z(	 	 	 d:d1ej4                  d2ed$ed3eejR                     dee ej4                  f   f
d4Z*d5 Z+d6 Z, xZ-S );AutoencoderKLa	  
    A VAE model with KL loss for encoding images into latents and decoding latent representations into images.

    This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
    for all models (such as downloading or saving).

    Parameters:
        in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
        out_channels (int,  *optional*, defaults to 3): Number of channels in the output.
        down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
            Tuple of downsample block types.
        up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
            Tuple of upsample block types.
        block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
            Tuple of block output channels.
        act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
        latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
        sample_size (`int`, *optional*, defaults to `32`): Sample input size.
        scaling_factor (`float`, *optional*, defaults to 0.18215):
            The component-wise standard deviation of the trained latent space computed using the first batch of the
            training set. This is used to scale the latent space to have unit variance when training the diffusion
            model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
            diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
            / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
            Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) paper.
        force_upcast (`bool`, *optional*, default to `True`):
            If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
            can be fine-tuned / trained to a lower range without losing too much precision in which case `force_upcast`
            can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
        mid_block_add_attention (`bool`, *optional*, default to `True`):
            If enabled, the mid_block of the Encoder and Decoder will have attention blocks. If set to false, the
            mid_block will only have resnet blocks
    TBasicTransformerBlockResnetBlock2Din_channelsout_channelsdown_block_typesup_block_typesblock_out_channelslayers_per_blockact_fnlatent_channelsnorm_num_groupssample_sizescaling_factorshift_factorlatents_meanlatents_stdforce_upcastuse_quant_convuse_post_quant_convmid_block_add_attentionc                    t         |           t        |||||||	d|	      | _        t	        ||||||	||      | _        |rt        j                  d|z  d|z  d      nd | _        |rt        j                  ||d      nd | _	        d| _
        d| _        | j                  j                  | _        t        | j                  j                  t         t"        f      r| j                  j                  d   n| j                  j                  }
t%        |
dt'        | j                  j(                        dz
  z  z        | _        d| _        y )	NT)	r!   r"   r#   r%   r&   r'   r)   double_zr2   )r!   r"   r$   r%   r&   r)   r'   r2   r   r   Fr   g      ?)super__init__r   encoderr   decodernnConv2d
quant_convpost_quant_convuse_slicing
use_tilingconfigr*   tile_sample_min_size
isinstancelisttupleintlenr%   tile_latent_min_sizetile_overlap_factor)selfr!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   	__class__s                      g/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/diffusers/models/autoencoders/autoencoder_kl.pyr6   zAutoencoderKL.__init__L   s9   , 	 #(-1-+$;

 '%)1-+$;	
 Uc"))A$7_9LaPhlQdryy/1Mjn  %)KK$;$;! $++11D%=A KK##A&(( 	
 %(qSA_A_=`cd=d7e(f$g!#'     r>   c                     || _         y)a  
        Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
        compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
        processing larger images.
        N)r>   )rH   r>   s     rJ   enable_tilingzAutoencoderKL.enable_tiling   s     %rK   c                 &    | j                  d       y)z
        Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
        decoding in one step.
        FN)rM   rH   s    rJ   disable_tilingzAutoencoderKL.disable_tiling   s    
 	5!rK   c                     d| _         y)z
        Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
        compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
        TNr=   rO   s    rJ   enable_slicingzAutoencoderKL.enable_slicing   s    
  rK   c                     d| _         y)z
        Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
        decoding in one step.
        FNrR   rO   s    rJ   disable_slicingzAutoencoderKL.disable_slicing   s    
 !rK   returnc                     i }dt         dt        j                  j                  dt        t         t
        f   ffd| j                         D ]  \  }} |||        |S )z
        Returns:
            `dict` of attention processors: A dictionary containing all attention processors used in the model with
            indexed by its weight name.
        namemodule
processorsc                     t        |d      r|j                         ||  d<   |j                         D ]  \  }} |  d| ||        |S )Nget_processor
.processor.)hasattrr\   named_children)rX   rY   rZ   sub_namechildfn_recursive_add_processorss        rJ   rc   zBAutoencoderKL.attn_processors.<locals>.fn_recursive_add_processors   sd    v/282F2F2H
dV:./#)#8#8#: U%+tfAhZ,@%TU rK   )strtorchr9   Moduler   r   r`   )rH   rZ   rX   rY   rc   s       @rJ   attn_processorszAutoencoderKL.attn_processors   sm     
	c 	588?? 	X\]`bt]tXu 	 !//1 	BLD&'fjA	B rK   	processorc           	      T   t        | j                  j                               }t        |t              r,t        |      |k7  rt        dt        |       d| d| d      dt        dt        j                  j                  ffd| j                         D ]  \  }} |||        y)	a4  
        Sets the attention processor to use to compute attention.

        Parameters:
            processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
                The instantiated processor class or a dictionary of processor classes that will be set as the processor
                for **all** `Attention` layers.

                If `processor` is a dict, the key needs to define the path to the corresponding cross attention
                processor. This is strongly recommended when setting trainable attention processors.

        z>A dict of processors was passed, but the number of processors z0 does not match the number of attention layers: z. Please make sure to pass z processor classes.rX   rY   c                     t        |d      rEt        |t              s|j                  |       n#|j                  |j	                  |  d             |j                         D ]  \  }} |  d| ||        y )Nset_processorr]   r^   )r_   rA   dictrk   popr`   )rX   rY   rh   ra   rb   fn_recursive_attn_processors        rJ   rn   zEAutoencoderKL.set_attn_processor.<locals>.fn_recursive_attn_processor   sx    v/!)T2((3(($z7J)KL#)#8#8#: T%+tfAhZ,@%STrK   N)rE   rg   keysrA   rl   
ValueErrorrd   re   r9   rf   r`   )rH   rh   countrX   rY   rn   s        @rJ   set_attn_processorz AutoencoderKL.set_attn_processor   s     D((--/0i&3y>U+BPQTU^Q_P` a005w6QRWQXXkm 
	Tc 	T588?? 	T !//1 	ALD&'fi@	ArK   c           	      j   t        d | j                  j                         D              rt               }nmt        d | j                  j                         D              rt	               }n8t        dt        t        | j                  j                                            | j                  |       y)ze
        Disables custom attention processors and sets the default attention implementation.
        c              3   @   K   | ]  }|j                   t        v   y wN)rI   r   .0procs     rJ   	<genexpr>z;AutoencoderKL.set_default_attn_processor.<locals>.<genexpr>   s     i4t~~!>>i   c              3   @   K   | ]  }|j                   t        v   y wru   )rI   r   rv   s     rJ   ry   z;AutoencoderKL.set_default_attn_processor.<locals>.<genexpr>   s     h$#==hrz   zOCannot call `set_default_attn_processor` when attention processors are of type N)	allrg   valuesr   r   rp   nextiterrr   )rH   rh   s     rJ   set_default_attn_processorz(AutoencoderKL.set_default_attn_processor   s     i4K_K_KfKfKhii,.Ih$J^J^JeJeJghh%Iabfgklp  mA  mA  mH  mH  mJ  hK  cL  bM  N  		*rK   xc                     |j                   \  }}}}| j                  r/|| j                  kD  s|| j                  kD  r| j                  |      S | j	                  |      }| j
                  | j                  |      }|S ru   )shaper>   r@   _tiled_encoder7   r;   )rH   r   
batch_sizenum_channelsheightwidthencs          rJ   _encodezAutoencoderKL._encode   sp    23''/
L&%??(A(A AVdNgNgEg%%a((ll1o??&//#&C
rK   return_dictc                 (   | j                   rU|j                  d   dkD  rC|j                  d      D cg c]  }| j                  |       }}t	        j
                  |      }n| j                  |      }t        |      }|s|fS t        |      S c c}w )a  
        Encode a batch of images into latents.

        Args:
            x (`torch.Tensor`): Input batch of images.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.

        Returns:
                The latent representations of the encoded images. If `return_dict` is True, a
                [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
        r   r   latent_dist)r=   r   splitr   re   catr   r   )rH   r   r   x_sliceencoded_slicesh	posteriors          rJ   encodezAutoencoderKL.encode  s      
QCD771:Ndll73NNN		.)AQA03	<"y99 Os   Bzc                 .   | j                   rK|j                  d   | j                  kD  s|j                  d   | j                  kD  r| j                  ||      S | j                  | j	                  |      }| j                  |      }|s|fS t        |      S )N)r   sample)r>   r   rF   tiled_decoder<   r8   r   )rH   r   r   decs       rJ   _decodezAutoencoderKL._decode  s    ??d.G.G G177SU;Y]YrYrKr$$QK$@@+$$Q'All1o6MC((rK   c                 :   | j                   r_|j                  d   dkD  rM|j                  d      D cg c]  }| j                  |      j                   }}t        j                  |      }n| j                  |      j                  }|s|fS t        |      S c c}w )a  
        Decode a batch of images.

        Args:
            z (`torch.Tensor`): Input batch of latent vectors.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.

        Returns:
            [`~models.vae.DecoderOutput`] or `tuple`:
                If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
                returned.

        r   r   r   )r=   r   r   r   r   re   r   r   )rH   r   r   	generatorz_slicedecoded_slicesdecodeds          rJ   decodezAutoencoderKL.decode-  s    $ 
QJK''RS*Uwdll73::UNUii/Gll1o,,G:G,, Vs   "Babblend_extentc                     t        |j                  d   |j                  d   |      }t        |      D ]A  }|d d d d | |z   d d f   d||z  z
  z  |d d d d |d d f   ||z  z  z   |d d d d |d d f<   C |S )Nr   r   minr   range)rH   r   r   r   ys        rJ   blend_vzAutoencoderKL.blend_vJ  s    1771:qwwqz<@|$ 	xAa\MA$5q89Q\AQ=QRUVWXZ[]^`aWaUbfgjvfvUwwAaAqjM	xrK   c                     t        |j                  d   |j                  d   |      }t        |      D ]A  }|d d d d d d | |z   f   d||z  z
  z  |d d d d d d |f   ||z  z  z   |d d d d d d |f<   C |S )Nr   r   r   )rH   r   r   r   r   s        rJ   blend_hzAutoencoderKL.blend_hP  s    1771:qwwqz<@|$ 	xAaA}q'889Q\AQ=QRUVWXZ[]^`aWaUbfgjvfvUwwAaAqjM	xrK   c           
         t        | j                  d| j                  z
  z        }t        | j                  | j                  z        }| j                  |z
  }g }t	        d|j
                  d   |      D ]  }g }t	        d|j
                  d   |      D ]v  }|dddd||| j                  z   ||| j                  z   f   }	| j                  |	      }	| j                  j                  r| j                  |	      }	|j                  |	       x |j                  |        g }
t        |      D ]  \  }}g }t        |      D ]d  \  }}	|dkD  r| j                  ||dz
     |   |	|      }	|dkD  r| j                  ||dz
     |	|      }	|j                  |	ddddd|d|f          f |
j                  t        j                  |d              t        j                  |
d      }|S )a  Encode a batch of images using a tiled encoder.

        When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
        steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is
        different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the
        tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
        output, but they should be much less noticeable.

        Args:
            x (`torch.Tensor`): Input batch of images.

        Returns:
            `torch.Tensor`:
                The latent representation of the encoded videos.
        r   r   r   r   Ndim)rD   r@   rG   rF   r   r   r7   r?   r0   r;   append	enumerater   r   re   r   )rH   r   overlap_sizer   	row_limitrowsirowjtileresult_rows
result_rowr   s                rJ   r   zAutoencoderKL._tiled_encodeV  s   " 444D<T<T8TUV444t7O7OOP--<	 q!''!*l3 	AC1aggaj,7 !Aq1t'@'@#@@!a$JcJcFcBccd||D);;--??40D

4 ! KK	 o 
	=FAsJ$S> F4 q5<<QUAlKDq5<<AE
D,GD!!$q!ZiZ)'C"DEF uyy;<
	= ii+
rK   c           
      "   d}t        dd|d       t        | j                  d| j                  z
  z        }t        | j                  | j                  z        }| j                  |z
  }g }t        d|j                  d   |      D ]  }g }	t        d|j                  d	   |      D ]v  }
|d
d
d
d
||| j                  z   |
|
| j                  z   f   }| j                  |      }| j                  j                  r| j                  |      }|	j                  |       x |j                  |	        g }t        |      D ]  \  }}	g }t        |	      D ]d  \  }
}|dkD  r| j                  ||dz
     |
   ||      }|
dkD  r| j                  |	|
dz
     ||      }|j                  |d
d
d
d
d
|d
|f          f |j                  t        j                   |d	              t        j                   |d      }t#        |      }|s|fS t%        |      S )a8  Encode a batch of images using a tiled encoder.

        When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
        steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is
        different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the
        tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
        output, but they should be much less noticeable.

        Args:
            x (`torch.Tensor`): Input batch of images.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.

        Returns:
            [`~models.autoencoder_kl.AutoencoderKLOutput`] or `tuple`:
                If return_dict is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain
                `tuple` is returned.
        aC  The tiled_encode implementation supporting the `return_dict` parameter is deprecated. In the future, the implementation of this method will be replaced with that of `_tiled_encode` and you will no longer be able to pass `return_dict`. You will also have to create a `DiagonalGaussianDistribution()` from the returned value.tiled_encodez1.0.0F)standard_warnr   r   r   r   Nr   r   )r   rD   r@   rG   rF   r   r   r7   r?   r0   r;   r   r   r   r   re   r   r   r   )rH   r   r   deprecation_messager   r   r   r   r   r   r   r   r   r   momentsr   s                   rJ   r   zAutoencoderKL.tiled_encode  s    (~ 	
 	.'+>eT444D<T<T8TUV444t7O7OOP--<	 q!''!*l3 	AC1aggaj,7 !Aq1t'@'@#@@!a$JcJcFcBccd||D);;--??40D

4 ! KK	 o 
	=FAsJ$S> F4 q5<<QUAlKDq5<<AE
D,GD!!$q!ZiZ)'C"DEF uyy;<
	= ))KQ/09	<"y99rK   c           
         t        | j                  d| j                  z
  z        }t        | j                  | j                  z        }| j                  |z
  }g }t	        d|j
                  d   |      D ]  }g }t	        d|j
                  d   |      D ]v  }	|dddd||| j                  z   |	|	| j                  z   f   }
| j                  j                  r| j                  |
      }
| j                  |
      }|j                  |       x |j                  |        g }t        |      D ]  \  }}g }t        |      D ]d  \  }	}
|dkD  r| j                  ||dz
     |	   |
|      }
|	dkD  r| j                  ||	dz
     |
|      }
|j                  |
ddddd|d|f          f |j                  t        j                  |d              t        j                  |d      }|s|fS t!        |      S )a  
        Decode a batch of images using a tiled decoder.

        Args:
            z (`torch.Tensor`): Input batch of latent vectors.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.

        Returns:
            [`~models.vae.DecoderOutput`] or `tuple`:
                If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
                returned.
        r   r   r   r   Nr   r   )rD   rF   rG   r@   r   r   r?   r1   r<   r8   r   r   r   r   re   r   r   )rH   r   r   r   r   r   r   r   r   r   r   r   r   r   r   s                  rJ   r   zAutoencoderKL.tiled_decode  s    444D<T<T8TUV444t7O7OOP--<	 q!''!*l3 	AC1aggaj,7 $Aq1t'@'@#@@!a$JcJcFcBccd;;22//5D,,t,

7#$ KK	 o 
	=FAsJ$S> F4 q5<<QUAlKDq5<<AE
D,GD!!$q!ZiZ)'C"DEF uyy;<
	= ii+6MC((rK   r   sample_posteriorr   c                     |}| j                  |      j                  }|r|j                  |      }n|j                         }| j	                  |      j                  }|s|fS t        |      S )aa  
        Args:
            sample (`torch.Tensor`): Input sample.
            sample_posterior (`bool`, *optional*, defaults to `False`):
                Whether to sample from the posterior.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
        )r   r   )r   r   r   moder   r   )	rH   r   r   r   r   r   r   r   r   s	            rJ   forwardzAutoencoderKL.forward  sf     KKN..	  9 5A Akk!n##6MC((rK   c                 r   d| _         | j                  j                         D ]1  \  }}dt        |j                  j
                        v s(t        d       | j                  | _         | j                         D ]%  }t        |t              s|j                  d       ' | j                  t                      y)u1  
        Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
        are fused. For cross-attention modules, key and value projection matrices are fused.

        <Tip warning={true}>

        This API is 🧪 experimental.

        </Tip>
        NAddedzQ`fuse_qkv_projections()` is not supported for models having added KV projections.T)fuse)original_attn_processorsrg   itemsrd   rI   __name__rp   modulesrA   r   fuse_projectionsrr   r   )rH   _attn_processorrY   s       rJ   fuse_qkv_projectionsz"AutoencoderKL.fuse_qkv_projections  s     )-%!%!5!5!;!;!= 	vA~#n66??@@ !tuu	v )-(<(<%lln 	3F&),''T'2	3 	 5 78rK   c                 T    | j                   | j                  | j                          yy)u   Disables the fused QKV projection if enabled.

        <Tip warning={true}>

        This API is 🧪 experimental.

        </Tip>

        N)r   rr   rO   s    rJ   unfuse_qkv_projectionsz$AutoencoderKL.unfuse_qkv_projections,  s)     ((4##D$A$AB 5rK   )r   r   )DownEncoderBlock2D)UpDecoderBlock2D)@   r   silu       r   g{P?NNNTTTT)T)TN)FTN).r   
__module____qualname____doc__ _supports_gradient_checkpointing_no_split_modulesr	   rD   r   rd   floatr   boolr6   rM   rP   rS   rU   propertyr   r   rg   r   rr   r   re   Tensorr   r   r   r   r   r   r   FloatTensorr   r   r   r   r   r   	Generatorr   r   r   __classcell__)rI   s   @rJ   r   r   &   s    D (,$0/B '>%:). ! ! '(,/3.2!#$((,'>(>( >(  *	>(
 c
>( "#J>( >( >( >( >( >( >( uo>( uU|,>( eEl+>(  !>(" #>($ "%>(& "&'>( >(@% %" ! c+=&=!>  0 AE2Dd3PbKbFc2c,d  AF+
 
%,, 
 37::,0:	"E*F$GG	H: :8) )D )E-Y^YeYeJeDf ) HL-""-15-	}e///	0- -8 %,, c ell  %,, c ell .u|| . .`<:ell <: <:I\ <:|/)ell /) /)}^c^j^jOjIk /)h "' /3)) ) 	)
 EOO,) 
}ell*	+):94CrK   r   )%typingr   r   r   r   re   torch.nnr9   configuration_utilsr   r	   loadersr
   loaders.single_file_modelr   utilsr   utils.accelerate_utilsr   attention_processorr   r   r   r   r   r   r   modeling_outputsr   modeling_utilsr   vaer   r   r   r   r    rK   rJ   <module>r      sW    0 /   B ' ?  8   3 ' N NQCJ-CEU QCrK   