
    bi=                         d dl mZ d dlmZmZmZ d dlZddlmZm	Z	 ddl
mZ ddlmZ dd	lmZ d
dlmZmZmZ e G d de             Z G d dee      Zy)    )	dataclass)OptionalTupleUnionN   )ConfigMixinregister_to_config)
BaseOutput)apply_forward_hook   )
ModelMixin   )DecoderOutputDecoderTinyEncoderTinyc                   0    e Zd ZU dZej
                  ed<   y)AutoencoderTinyOutputz
    Output of AutoencoderTiny encoding method.

    Args:
        latents (`torch.Tensor`): Encoded outputs of the `Encoder`.

    latentsN)__name__
__module____qualname____doc__torchTensor__annotations__     i/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/diffusers/models/autoencoders/autoencoder_tiny.pyr   r      s     \\r   r   c                        e Zd ZdZdZe	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d&dededeedf   deedf   ded	ed
ededeedf   deedf   dede	de
de	de	f fd       Zdej                  dej                  fdZdej                  dej                  fdZd'dZd'dZd(de
ddfdZd'dZdej                  dej                  fdZdej                  dej                  fdZed(dej                  d e
deeeej                     f   fd!       Ze	 d)dej                  d"eej6                     d e
deeeej                     f   fd#       Z	 d(d$ej                  d e
deeeej                     f   fd%Z xZS )*AutoencoderTinya  
    A tiny distilled VAE model for encoding images into latents and decoding latent representations into images.

    [`AutoencoderTiny`] is a wrapper around the original implementation of `TAESD`.

    This model inherits from [`ModelMixin`]. Check the superclass documentation for its generic methods implemented for
    all models (such as downloading or saving).

    Parameters:
        in_channels (`int`, *optional*, defaults to 3): Number of channels in the input image.
        out_channels (`int`,  *optional*, defaults to 3): Number of channels in the output.
        encoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`):
            Tuple of integers representing the number of output channels for each encoder block. The length of the
            tuple should be equal to the number of encoder blocks.
        decoder_block_out_channels (`Tuple[int]`, *optional*, defaults to `(64, 64, 64, 64)`):
            Tuple of integers representing the number of output channels for each decoder block. The length of the
            tuple should be equal to the number of decoder blocks.
        act_fn (`str`, *optional*, defaults to `"relu"`):
            Activation function to be used throughout the model.
        latent_channels (`int`, *optional*, defaults to 4):
            Number of channels in the latent representation. The latent space acts as a compressed representation of
            the input image.
        upsampling_scaling_factor (`int`, *optional*, defaults to 2):
            Scaling factor for upsampling in the decoder. It determines the size of the output image during the
            upsampling process.
        num_encoder_blocks (`Tuple[int]`, *optional*, defaults to `(1, 3, 3, 3)`):
            Tuple of integers representing the number of encoder blocks at each stage of the encoding process. The
            length of the tuple should be equal to the number of stages in the encoder. Each stage has a different
            number of encoder blocks.
        num_decoder_blocks (`Tuple[int]`, *optional*, defaults to `(3, 3, 3, 1)`):
            Tuple of integers representing the number of decoder blocks at each stage of the decoding process. The
            length of the tuple should be equal to the number of stages in the decoder. Each stage has a different
            number of decoder blocks.
        latent_magnitude (`float`, *optional*, defaults to 3.0):
            Magnitude of the latent representation. This parameter scales the latent representation values to control
            the extent of information preservation.
        latent_shift (float, *optional*, defaults to 0.5):
            Shift applied to the latent representation. This parameter controls the center of the latent space.
        scaling_factor (`float`, *optional*, defaults to 1.0):
            The component-wise standard deviation of the trained latent space computed using the first batch of the
            training set. This is used to scale the latent space to have unit variance when training the diffusion
            model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
            diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
            / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
            Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) paper. For this
            Autoencoder, however, no such scaling factor was used, hence the value of 1.0 as the default.
        force_upcast (`bool`, *optional*, default to `False`):
            If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
            can be fine-tuned / trained to a lower range without losing too much precision, in which case
            `force_upcast` can be set to `False` (see this fp16-friendly
            [AutoEncoder](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)).
    Tin_channelsout_channelsencoder_block_out_channels.decoder_block_out_channelsact_fnupsample_fnlatent_channelsupsampling_scaling_factornum_encoder_blocksnum_decoder_blockslatent_magnitudelatent_shiftforce_upcastscaling_factorshift_factorc           	         t         |           t        |      t        |	      k7  rt        d      t        |      t        |
      k7  rt        d      t	        |||	||      | _        t        |||
||||      | _        || _        || _	        || _
        d| _        d| _        d|z  | _        d| _        d| _        | j                  | j                  z  | _        | j#                  |	       | j#                  d
       y )NzQ`encoder_block_out_channels` should have the same length as `num_encoder_blocks`.zQ`decoder_block_out_channels` should have the same length as `num_decoder_blocks`.)r!   r"   
num_blocksblock_out_channelsr%   )r!   r"   r1   r2   r(   r%   r&   Fr   g      ?i   )r2   )r-   )super__init__len
ValueErrorr   encoderr   decoderr+   r,   r.   use_slicing
use_tilingspatial_scale_factortile_overlap_factortile_sample_min_sizetile_latent_min_sizer	   )selfr!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   	__class__s                   r   r4   zAutoencoderTiny.__init__a   s   & 	)*c2D.EEpqq)*c2D.EEpqq"#()9
 #'%)9&?#
 !1(,  %&|O!#( $'!$($=$=AZAZ$Z!3MNU3r   xreturnc                     |j                  d| j                  z        j                  | j                        j	                  dd      S )zraw latents -> [0, 1]r   r   r   )divr+   addr,   clampr?   rA   s     r   scale_latentszAutoencoderTiny.scale_latents   s;    uuQ.../33D4E4EFLLQPQRRr   c                 p    |j                  | j                        j                  d| j                  z        S )z[0, 1] -> raw latentsr   )subr,   mulr+   rG   s     r   unscale_latentszAutoencoderTiny.unscale_latents   s-    uuT&&'++A0E0E,EFFr   Nc                     d| _         y)z
        Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
        compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
        TNr9   r?   s    r   enable_slicingzAutoencoderTiny.enable_slicing   s    
  r   c                     d| _         y)z
        Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
        decoding in one step.
        FNrN   rO   s    r   disable_slicingzAutoencoderTiny.disable_slicing   s    
 !r   r:   c                     || _         y)a  
        Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
        compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
        processing larger images.
        N)r:   )r?   r:   s     r   enable_tilingzAutoencoderTiny.enable_tiling   s     %r   c                 &    | j                  d       y)z
        Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
        decoding in one step.
        FN)rT   rO   s    r   disable_tilingzAutoencoderTiny.disable_tiling   s    
 	5!r   c                 @   | j                   }| j                  }t        || j                  z        }||z
  }t	        d|j
                  d   |      }t	        d|j
                  d   |      }t        j                  t        j                  t        j                  ||z        ||z  dz
  z  gdz  d            }|j                  dd      j                  |j                        }t        j                  |j
                  d   d|j
                  d   |z  |j
                  d   |z  |j                  	      }	|D ]  }
|D ]  }|d
|
|
|z   |||z   f   }|	d
|
|z  |
|z   |z  ||z  ||z   |z  f   }| j                  |      }|j
                  d   |j
                  d   }}|
dk(  rt        j                  |d         n|d   }|dk(  rt        j                  |d         n|d   }||z  }|d
d|d|f   |d
d|d|f   }}|j!                  ||z  d|z
  |z  z            |	S )  Encode a batch of images using a tiled encoder.

        When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
        steps. This is useful to keep memory use constant regardless of image size. To avoid tiling artifacts, the
        tiles overlap and are blended together to form a smooth output.

        Args:
            x (`torch.Tensor`): Input batch of images.

        Returns:
            `torch.Tensor`: Encoded batch of images.
        r   r   r   ijindexing   device.N)r;   r=   intr<   rangeshaper   stackmeshgridarangerF   tor`   zerosr7   	ones_likecopy_r?   rA   sf	tile_size
blend_sizetraverse_sizetitjblend_masksoutijtile_intile_outtilehwblend_mask_iblend_mask_j
blend_masks                       r   _tiled_encodezAutoencoderTiny._tiled_encode   sR    &&--	 T%=%==>
!J. 1aggbk=11aggbk=1 kkNNELLR8JOa<OPQTUU`de
 "''1-00: kk!''!*a):AGGBK2<MVWV^V^_ 	PA PCQ]!2AI4EEFsAGq9}.C$CQ"WPQT]P]bdOdEdde||G,zz"~tzz"~1BCq&u{1~>kZ[nBCq&u{1~>kZ[n)L8
#'RaR!#4jbqb"1"6MjzD0A
Nh3NNOP	P 
r   c                 $   | j                   }| j                  }t        || j                  z        }||z
  }t	        d|j
                  d   |      }t	        d|j
                  d   |      }t        j                  t        j                  t        j                  ||z        ||z  dz
  z  gdz  d            }|j                  dd      j                  |j                        }t        j                  |j
                  d   d|j
                  d   |z  |j
                  d   |z  |j                  	      }	|D ]  }
|D ]  }|d
|
|
|z   |||z   f   }|	d
|
|z  |
|z   |z  ||z  ||z   |z  f   }| j                  |      }|j
                  d   |j
                  d   }}|
dk(  rt        j                  |d         n|d   }|dk(  rt        j                  |d         n|d   }||z  d
d|d|f   }|j!                  ||z  d|z
  |z  z            |	S )rX   r   rY   rZ   r   r   r[   r\   r   r_   .N)r;   r>   ra   r<   rb   rc   r   rd   re   rf   rF   rg   r`   rh   r8   ri   rj   rk   s                       r   _tiled_decodezAutoencoderTiny._tiled_decode   s4    &&--	 T%=%==>
!J. 1aggbk=11aggbk=1 kkNNELLR8JOa<OPQTUU`de
 "''1-00: kk!''!*ar)91772;;KTUT\T\] 	PA 
PCQ]!2AI4EEFsAFa)mr-A$A1r6QQZ]^`L`C``a||G,zz"~tzz"~1BCq&u{1~>kZ[nBCq&u{1~>kZ[n*\93BQB;G
zD0A
Nh3NNO
P	P 
r   return_dictc                    | j                   rr|j                  d   dkD  r`|j                  d      D cg c]0  }| j                  r| j	                  |      n| j                  |      2 }}t        j                  |      }n.| j                  r| j	                  |      n| j                  |      }|s|fS t        |      S c c}w )Nr   r   )r   )	r9   rc   splitr:   r~   r7   r   catr   )r?   rA   r   x_sliceoutputs        r   encodezAutoencoderTiny.encode!  s    
Qijipipqris^et""7+DLLQXDYYF  YYv&F.2ooT''*4<<PQ?F9$V44   5B>	generatorc                    | j                   rr|j                  d   dkD  r`|j                  d      D cg c]0  }| j                  r| j	                  |      n| j                  |      2 }}t        j                  |      }n.| j                  r| j	                  |      n| j                  |      }|s|fS t        |      S c c}w )Nr   r   sample)	r9   rc   r   r:   r   r8   r   r   r   )r?   rA   r   r   r   r   s         r   decodezAutoencoderTiny.decode0  s     
Qijipipqris^et""7+DLLQXDYYF  YYv&F.2ooT''*4<<PQ?F9F++r   r   c                 0   | j                  |      j                  }| j                  |      j                  d      j	                         j                         }| j                  |dz        }| j                  |      j                  }|s|fS t        |      S )z
        Args:
            sample (`torch.Tensor`): Input sample.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
           g     o@r   )
r   r   rH   mul_round_byterL   r   r   r   )r?   r   r   enc
scaled_encunscaled_encdecs          r   forwardzAutoencoderTiny.forwardA  s     kk&!)) '',11#6==?DDF
 ++J,>?kk,'..6MC((r   )r   r   @   r   r   r   r   relunearestr^   r   )r   r   r   r   )r   r   r   r   r   g      ?Fg      ?g        )rB   N)T)NT) r   r   r   r    _supports_gradient_checkpointingr	   ra   r   strfloatboolr4   r   r   rH   rL   rP   rR   rT   rV   r~   r   r   r   r   r   r   	Generatorr   r   r   __classcell__)r@   s   @r   r    r    )   sv   3j (,$ 6F6F$ )*.:.: !!" #!!9494 94 %*#s(O	94
 %*#s(O94 94 94 94 $'94 "#s(O94 "#s(O94 94 94 94 94  !94 94vSu|| S SG G%,, G !% % %".u|| . .`-u|| - -^ 5 54 55I^`efkfrfr`sIsCt 5 5 `d,,*25??*C,Y],	}eELL11	2, ,& !)) ) 
}eELL11	2	)r   r    )dataclassesr   typingr   r   r   r   configuration_utilsr   r	   utilsr
   utils.accelerate_utilsr   modeling_utilsr   vaer   r   r   r   r    r   r   r   <module>r      sP     " ) )  B  8 ' 8 8 	J 	 	q)j+ q)r   