
    bi%                         d dl mZ d dlmZmZmZ d dlZd dlZddl	m
Z
mZ ddlmZ ddlmZ dd	lmZ e G d
 de             Z G d dee
      Zy)    )	dataclass)OptionalTupleUnionN   )ConfigMixinregister_to_config)
BaseOutput)randn_tensor   )SchedulerMixinc                   v    e Zd ZU dZej
                  ed<   ej
                  ed<   dZeej
                     ed<   y)KarrasVeOutputa  
    Output class for the scheduler's step function output.

    Args:
        prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
            Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
            denoising loop.
        derivative (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
            Derivative of predicted original image sample (x_0).
        pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
            The predicted denoised sample (x_{0}) based on the model output from the current timestep.
            `pred_original_sample` can be used to preview progress or for guidance.
    prev_sample
derivativeNpred_original_sample)	__name__
__module____qualname____doc__torchTensor__annotations__r   r        o/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/diffusers/schedulers/deprecated/scheduling_karras_ve.pyr   r      s1     37(5<<07r   r   c                   
   e Zd ZdZdZe	 	 	 	 	 	 ddedededededefd	       Zd dej                  de
e   dej                  fdZd dedeeej                  f   fdZ	 d dej                  dede
ej"                     deej                  ef   fdZ	 d!dej                  dededej                  dedeeef   fdZ	 d!dej                  dededej                  dej                  dej                  dedeeef   fdZd Zy
)"KarrasVeScheduleraQ  
    A stochastic scheduler tailored to variance-expanding models.

    This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
    methods the library implements for all schedulers such as loading and saving.

    <Tip>

    For more details on the parameters, see [Appendix E](https://huggingface.co/papers/2206.00364). The grid search
    values used to find the optimal `{s_noise, s_churn, s_min, s_max}` for a specific model are described in Table 5 of
    the paper.

    </Tip>

    Args:
        sigma_min (`float`, defaults to 0.02):
            The minimum noise magnitude.
        sigma_max (`float`, defaults to 100):
            The maximum noise magnitude.
        s_noise (`float`, defaults to 1.007):
            The amount of additional noise to counteract loss of detail during sampling. A reasonable range is [1.000,
            1.011].
        s_churn (`float`, defaults to 80):
            The parameter controlling the overall amount of stochasticity. A reasonable range is [0, 100].
        s_min (`float`, defaults to 0.05):
            The start value of the sigma range to add noise (enable stochasticity). A reasonable range is [0, 10].
        s_max (`float`, defaults to 50):
            The end value of the sigma range to add noise. A reasonable range is [0.2, 80].
    r   	sigma_min	sigma_maxs_noises_churns_mins_maxc                 <    || _         d | _        d | _        d | _        y N)init_noise_sigmanum_inference_steps	timestepsschedule)selfr   r    r!   r"   r#   r$   s          r   __init__zKarrasVeScheduler.__init__R   s$     !* )- '+&*r   Nsampletimestepreturnc                     |S )a  
        Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
        current timestep.

        Args:
            sample (`torch.Tensor`):
                The input sample.
            timestep (`int`, *optional*):
                The current timestep in the diffusion chain.

        Returns:
            `torch.Tensor`:
                A scaled input sample.
        r   )r+   r-   r.   s      r   scale_model_inputz#KarrasVeScheduler.scale_model_inputd   s	     r   r(   devicec                    || _         t        j                  d| j                         ddd   j                         }t	        j
                  |      j                  |      | _        | j                  D cg c]X  }| j                  j                  dz  | j                  j                  dz  | j                  j                  dz  z  ||dz
  z  z  z  Z }}t	        j                  |t        j                  |      | _        yc c}w )a  
        Sets the discrete timesteps used for the diffusion chain (to be run before inference).

        Args:
            num_inference_steps (`int`):
                The number of diffusion steps used when generating samples with a pre-trained model.
            device (`str` or `torch.device`, *optional*):
                The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
        r   Nr      )dtyper2   )r(   nparangecopyr   
from_numpytor)   configr    r   tensorfloat32r*   )r+   r(   r2   r)   ir*   s         r   set_timestepszKarrasVeScheduler.set_timestepsu   s     $7 IIa!9!9:4R4@EEG	)))477? ^^

  %%q(;;((!+dkk.C.CQ.FFAQdghQhLijk
 
 XU]]6R
s   3AC>sigma	generatorc                    | j                   j                  |cxk  r| j                   j                  k  r1n n.t        | j                   j                  | j
                  z  d      }nd}| j                   j                  t        |j                  |      j                  |j                        z  }|||z  z   }||dz  |dz  z
  dz  |z  z   }||fS )u  
        Explicit Langevin-like "churn" step of adding noise to the sample according to a `gamma_i ≥ 0` to reach a
        higher noise level `sigma_hat = sigma_i + gamma_i*sigma_i`.

        Args:
            sample (`torch.Tensor`):
                The input sample.
            sigma (`float`):
            generator (`torch.Generator`, *optional*):
                A random number generator.
        g4y?r   )rB   r         ?)r<   r#   r$   minr"   r(   r!   r   shaper;   r2   )r+   r-   rA   rB   gammaeps	sigma_hat
sample_hats           r   add_noise_to_inputz$KarrasVeScheduler.add_noise_to_input   s     ;;:):)::++d.F.FF
SEE kk!!L$S$V$VW]WdWd$eeEEM)		1uax 7C?#EF
9$$r   model_outputrI   
sigma_prevrJ   return_dictc                 `    |||z  z   }||z
  |z  }|||z
  |z  z   }|s||fS t        |||      S )a  
        Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
        process from the learned model outputs (most often the predicted noise).

        Args:
            model_output (`torch.Tensor`):
                The direct output from learned diffusion model.
            sigma_hat (`float`):
            sigma_prev (`float`):
            sample_hat (`torch.Tensor`):
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] or `tuple`.

        Returns:
            [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] or `tuple`:
                If return_dict is `True`, [`~schedulers.scheduling_karras_ve.KarrasVESchedulerOutput`] is returned,
                otherwise a tuple is returned where the first element is the sample tensor.

        r   r   r   r   )	r+   rL   rI   rM   rJ   rN   r   r   sample_prevs	            r   stepzKarrasVeScheduler.step   s[    8  *I,DD #779D
 J$:j#HH,,#
Qe
 	
r   rR   r   c                 r    |||z  z   }||z
  |z  }	|||z
  d|z  d|	z  z   z  z   }|s||fS t        |||      S )a  
        Corrects the predicted sample based on the `model_output` of the network.

        Args:
            model_output (`torch.Tensor`):
                The direct output from learned diffusion model.
            sigma_hat (`float`): TODO
            sigma_prev (`float`): TODO
            sample_hat (`torch.Tensor`): TODO
            sample_prev (`torch.Tensor`): TODO
            derivative (`torch.Tensor`): TODO
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`~schedulers.scheduling_ddpm.DDPMSchedulerOutput`] or `tuple`.

        Returns:
            prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO

        rD   rP   rQ   )
r+   rL   rI   rM   rJ   rR   r   rN   r   derivative_corrs
             r   step_correctzKarrasVeScheduler.step_correct   sl    8  +Z,-FF&)==K J$:sZ?ORUXgRg?g#hh,,#
Qe
 	
r   c                     t               r&   )NotImplementedError)r+   original_samplesnoiser)   s       r   	add_noisezKarrasVeScheduler.add_noise   s    !##r   )g{Gz?d   g&1?P   g?2   r&   )T)r   r   r   r   orderr	   floatr,   r   r   r   intr1   r   strr2   r@   	Generatorr   rK   boolr   rS   rV   r[   r   r   r   r   r   1   s   < E  ++ + 	+
 + + + +"  Y^YeYe "S SeCDU>V S. Z^%ll%+0%=Eeoo=V%	u||U"	#%@ !%
ll%
 %
 	%

 LL%
 %
 
~u$	%%
^ !%
ll%
 %
 	%

 LL%
 \\%
 LL%
 %
 
~u$	%%
N$r   r   )dataclassesr   typingr   r   r   numpyr7   r   configuration_utilsr   r	   utilsr
   utils.torch_utilsr   scheduling_utilsr   r   r   r   r   r   <module>rl      sM     " ) )   B  - - 8Z 8 8(C$ C$r   