
    bi%                         d dl mZ d dlmZmZmZ d dlZd dlZd dlm	Z
 d dlmZ ddlmZmZ ddlmZ dd	lmZ ej&                  j                   G d
 d             Ze G d de             Z G d dee      Zy)    )	dataclass)OptionalTupleUnionN)random   )ConfigMixinregister_to_config)
BaseOutput   )FlaxSchedulerMixinc                       e Zd ZU dZee   ed<   dZeej                     ed<   dZ
eej                     ed<   ed        Zy)KarrasVeSchedulerStateNnum_inference_steps	timestepsschedulec                      |        S N )clss    i/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/diffusers/schedulers/scheduling_karras_ve_flax.pycreatezKarrasVeSchedulerState.create$   s	    u    )__name__
__module____qualname__r   r   int__annotations__r   jnpndarrayr   classmethodr   r   r   r   r   r      sK     *.#-'+Ix$+&*Hhs{{#* r   r   c                   X    e Zd ZU dZej
                  ed<   ej
                  ed<   eed<   y)FlaxKarrasVeOutputa=  
    Output class for the scheduler's step function output.

    Args:
        prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images):
            Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
            denoising loop.
        derivative (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images):
            Derivative of predicted original image sample (x_0).
        state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class.
    prev_sample
derivativestateN)r   r   r   __doc__r   r    r   r   r   r   r   r#   r#   )   s#    
 !!r   r#   c                      e Zd ZdZed        Ze	 	 	 	 	 	 ddedededededefd	       Zd
 Z		 d de
dedede
fdZde
dej                  dedej"                  deej                  ef   f
dZ	 d!de
dej                  dededej                  dedeeef   fdZ	 d!de
dej                  dededej                  dej                  dej                  dedeeef   fdZde
fdZy)"FlaxKarrasVeScheduleraP  
    Stochastic sampling from Karras et al. [1] tailored to the Variance-Expanding (VE) models [2]. Use Algorithm 2 and
    the VE column of Table 1 from [1] for reference.

    [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models."
    https://huggingface.co/papers/2206.00364 [2] Song, Yang, et al. "Score-based generative modeling through stochastic
    differential equations." https://huggingface.co/papers/2011.13456

    [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
    function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
    [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
    [`~SchedulerMixin.from_pretrained`] functions.

    For more details on the parameters, see the original paper's Appendix E.: "Elucidating the Design Space of
    Diffusion-Based Generative Models." https://huggingface.co/papers/2206.00364. The grid search values used to find
    the optimal {s_noise, s_churn, s_min, s_max} for a specific model are described in Table 5 of the paper.

    Args:
        sigma_min (`float`): minimum noise magnitude
        sigma_max (`float`): maximum noise magnitude
        s_noise (`float`): the amount of additional noise to counteract loss of detail during sampling.
            A reasonable range is [1.000, 1.011].
        s_churn (`float`): the parameter controlling the overall amount of stochasticity.
            A reasonable range is [0, 100].
        s_min (`float`): the start value of the sigma range where we add noise (enable stochasticity).
            A reasonable range is [0, 10].
        s_max (`float`): the end value of the sigma range where we add noise.
            A reasonable range is [0.2, 80].
    c                      y)NTr   selfs    r   	has_statezFlaxKarrasVeScheduler.has_state[   s    r   	sigma_min	sigma_maxs_noises_churns_mins_maxc                      y r   r   )r,   r.   r/   r0   r1   r2   r3   s          r   __init__zFlaxKarrasVeScheduler.__init___   s     	r   c                 *    t         j                         S r   )r   r   r+   s    r   create_statez"FlaxKarrasVeScheduler.create_statek   s    %,,..r   r&   r   shapereturnc                    t        j                  d|      ddd   j                         }|D cg c]X  }| j                  j                  dz  | j                  j
                  dz  | j                  j                  dz  z  ||dz
  z  z  z  Z }}|j                  |t        j                  |t         j                        |      S c c}w )a  
        Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference.

        Args:
            state (`KarrasVeSchedulerState`):
                the `FlaxKarrasVeScheduler` state data class.
            num_inference_steps (`int`):
                the number of diffusion steps used when generating samples with a pre-trained model.

        r   Nr   r   )dtype)r   r   r   )	r   arangecopyconfigr/   r.   replacearrayfloat32)r,   r&   r   r8   r   ir   s          r   set_timestepsz#FlaxKarrasVeScheduler.set_timestepsn   s     JJq"56tt<AAC	 

  %%q(;;((!+dkk.C.CQ.FFAQdghQhLijk
 
 }} 3YYxs{{;  
 	

s   ACsamplesigmakeyc                    | j                   j                  |cxk  r| j                   j                  k  r1n n.t        | j                   j                  |j
                  z  d      }nd}t        j                  |d      }| j                   j                  t        j                  ||j                        z  }|||z  z   }||dz  |dz  z
  dz  |z  z   }||fS )u   
        Explicit Langevin-like "churn" step of adding noise to the sample according to a factor gamma_i ≥ 0 to reach a
        higher noise level sigma_hat = sigma_i + gamma_i*sigma_i.

        TODO Args:
        g4y?r   r   )num)rG   r8   r         ?)r?   r2   r3   minr1   r   r   splitr0   normalr8   )	r,   r&   rE   rF   rG   gammaeps	sigma_hat
sample_hats	            r   add_noise_to_inputz(FlaxKarrasVeScheduler.add_noise_to_input   s     ;;:):)::++e.G.GGTEE ll3A&kk!!FMMc$NNEEM)		1uax 7C?#EF
9$$r   model_outputrP   
sigma_prevrQ   return_dictc                 b    |||z  z   }||z
  |z  }|||z
  |z  z   }	|s|	||fS t        |	||      S )a  
        Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
        process from the learned model outputs (most often the predicted noise).

        Args:
            state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class.
            model_output (`torch.Tensor` or `np.ndarray`): direct output from learned diffusion model.
            sigma_hat (`float`): TODO
            sigma_prev (`float`): TODO
            sample_hat (`torch.Tensor` or `np.ndarray`): TODO
            return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class

        Returns:
            [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] or `tuple`: Updated sample in the diffusion
            chain and derivative. [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] if `return_dict` is
            True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
        r$   r%   r&   r#   )
r,   r&   rS   rP   rT   rQ   rU   pred_original_sampler%   sample_prevs
             r   stepzFlaxKarrasVeScheduler.step   sX    6  *I,DD #779D
 J$:j#HHU33!kjX]^^r   rZ   r%   c	                 t    |||z  z   }	||	z
  |z  }
|||z
  d|z  d|
z  z   z  z   }|s|||fS t        |||      S )a'  
        Correct the predicted sample based on the output model_output of the network. TODO complete description

        Args:
            state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class.
            model_output (`torch.Tensor` or `np.ndarray`): direct output from learned diffusion model.
            sigma_hat (`float`): TODO
            sigma_prev (`float`): TODO
            sample_hat (`torch.Tensor` or `np.ndarray`): TODO
            sample_prev (`torch.Tensor` or `np.ndarray`): TODO
            derivative (`torch.Tensor` or `np.ndarray`): TODO
            return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class

        Returns:
            prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO

        rJ   rW   rX   )r,   r&   rS   rP   rT   rQ   rZ   r%   rU   rY   derivative_corrs              r   step_correctz"FlaxKarrasVeScheduler.step_correct   si    8  +Z,-FF&)==K J$:sZ?ORUXgRg?g#hhU33!kjX]^^r   c                     t               r   )NotImplementedError)r,   r&   original_samplesnoiser   s        r   	add_noisezFlaxKarrasVeScheduler.add_noise   s    !##r   N)g{Gz?d   g&1?P   g?2   )r   )T)r   r   r   r'   propertyr-   r
   floatr5   r7   r   r   r   rD   r   r    jaxArrayrR   boolr   r#   r[   r^   rc   r   r   r   r)   r)   <   s   <     		 	 		
 	 	 	 	/ WY
+
BE
NS
	
8%%% % 	%
 YY% 
s{{E!	"%B !"_%"_ kk"_ 	"_
 "_ KK"_ "_ 
!5(	)"_Z !#_%#_ kk#_ 	#_
 #_ KK#_ [[#_ KK#_ #_ 
!5(	)#_J$5 $r   r)   )dataclassesr   typingr   r   r   flaxri   	jax.numpynumpyr   r   configuration_utilsr	   r
   utilsr   scheduling_utils_flaxr   structr   r#   r)   r   r   r   <module>ru      st     " ) )  
   A  5    " " "$r$. r$r   