
    biw                     N   d dl Z d dlmZmZmZmZmZmZ d dlZ	d dl
Z
d dlmZmZmZmZmZmZ ddlmZmZ ddlmZ ddlmZmZmZmZmZ ddlmZmZm Z  d	d
l!m"Z"m#Z#m$Z$m%Z%  e       rd dl&Z&ddlm'Z'  e'       rd dl(m)c m*Z+ dZ,ndZ, ejZ                  e.      Z/dZ0 G d de#e$e%      Z1y)    N)AnyCallableDictListOptionalUnion)ClapFeatureExtractor	ClapModelClapTextModelWithProjectionRobertaTokenizerRobertaTokenizerFastSpeechT5HifiGan   )AutoencoderKLUNet2DConditionModel)KarrasDiffusionSchedulers)is_accelerate_availableis_accelerate_versionis_librosa_availableloggingreplace_example_docstring)empty_device_cache
get_devicerandn_tensor   )AudioPipelineOutputDeprecatedPipelineMixinDiffusionPipelineStableDiffusionMixin)is_torch_xla_availableTFae  
    Examples:
        ```py
        >>> from diffusers import MusicLDMPipeline
        >>> import torch
        >>> import scipy

        >>> repo_id = "ucsd-reach/musicldm"
        >>> pipe = MusicLDMPipeline.from_pretrained(repo_id, torch_dtype=torch.float16)
        >>> pipe = pipe.to("cuda")

        >>> prompt = "Techno music with a strong, upbeat tempo and high melodic riffs"
        >>> audio = pipe(prompt, num_inference_steps=10, audio_length_in_s=5.0).audios[0]

        >>> # save the audio sample as a .wav file
        >>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio)
        ```
c            $           e Zd ZdZ	 dedeeef   deee	f   de
e   dededef fd	Z	 	 	 d#de
ej"                     de
ej"                     fdZd Zd Zd Z	 	 	 d#dZd$dZd%dZ ej2                          ee      	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d&deeee   f   de
e   dedede
eeee   f      de
e   dede
eej@                  eej@                     f      de
ej"                     de
ej"                     de
ej"                     de!de
e"eeej"                  gd
f      de
e   d e
e#ee$f      d!e
e   f d"              Z% xZ&S )'MusicLDMPipelinez0.33.1vaetext_encoder	tokenizerfeature_extractorunet	schedulervocoderc           	          t         |           | j                  |||||||       t        | dd       r5dt	        | j
                  j                  j                        dz
  z  | _        y d| _        y )N)r#   r$   r%   r&   r'   r(   r)   r#   r         )	super__init__register_modulesgetattrlenr#   configblock_out_channelsvae_scale_factor)	selfr#   r$   r%   r&   r'   r(   r)   	__class__s	           i/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/diffusers/pipelines/musicldm/pipeline_musicldm.pyr.   zMusicLDMPipeline.__init__j   sx     	%/ 	 	
 W^^bdikoVpc$((//*L*L&MPQ&Q Rvw    Nprompt_embedsnegative_prompt_embedsc                    |t        |t              rd}n-|t        |t              rt        |      }n|j                  d   }|:| j                  |d| j
                  j                  dd      }	|	j                  }
|	j                  }| j                  |dd	      j                  }|j                  d
   |
j                  d
   k\  rt        j                  |
|      sj| j
                  j                  |dd| j
                  j                  dz
  d
f         }t        j                  d| j
                  j                   d|        | j                  j                  |
j!                  |      |j!                  |            }|j!                  | j                  j"                  j$                  |      }|j                  \  }}|j'                  d|      }|j)                  ||z  |      }|r| |dg|z  }nt+        |      t+        |      ur$t-        dt+        |       dt+        |       d      t        |t              r|g}n1|t        |      k7  r!t/        d| dt        |       d| d| d	      |}|j                  d   }| j                  |d|dd      }|j                  j!                  |      }|j                  j!                  |      }| j                  j                  ||      }|r~|j                  d   }|j!                  | j                  j"                  j$                  |      }|j'                  d|      }|j)                  ||z  |      }t        j0                  ||g      }|S )a`  
        Encodes the prompt into text encoder hidden states.

        Args:
            prompt (`str` or `List[str]`, *optional*):
                prompt to be encoded
            device (`torch.device`):
                torch device
            num_waveforms_per_prompt (`int`):
                number of waveforms that should be generated per prompt
            do_classifier_free_guidance (`bool`):
                whether to use classifier free guidance or not
            negative_prompt (`str` or `List[str]`, *optional*):
                The prompt or prompts not to guide the audio generation. If not defined, one has to pass
                `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
                less than `1`).
            prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
                provided, text embeddings will be generated from `prompt` input argument.
            negative_prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
                weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
                argument.
        Nr+   r   
max_lengthTpt)paddingr<   
truncationreturn_tensorslongest)r>   r@   z\The following part of your input was truncated because CLAP can only handle sequences up to z	 tokens: )attention_mask)dtypedevice z?`negative_prompt` should be the same type to `prompt`, but got z != .z`negative_prompt`: z has batch size z, but `prompt`: zT. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.)
isinstancestrlistr1   shaper%   model_max_length	input_idsrC   torchequalbatch_decodeloggerwarningr$   get_text_featuresto
text_modelrD   repeatviewtype	TypeError
ValueErrorcat)r5   promptrE   num_waveforms_per_promptdo_classifier_free_guidancenegative_promptr9   r:   
batch_sizetext_inputstext_input_idsrC   untruncated_idsremoved_textbs_embedseq_lenuncond_tokensr<   uncond_inputuncond_input_idss                       r7   _encode_promptzMusicLDMPipeline._encode_prompt   s   D *VS"9JJvt$<VJ&,,Q/J ..$>>::# ) K )22N(77N"nnVYW[n\ffO$$R(N,@,@,DDU[[N  $~~::#At~~'F'F'JR'O$OP  778	,Q
 !--??!!&)-008 @ M
 &((t/@/@/K/K/Q/QZ`(a
 	
 &,,Q0HI%**86N+NPWX '+A+I&!#z 1fT/%::UVZ[jVkUl mV~Q(  OS1!0 1s?33 )/)::J3K_J` ax/
| <33  !0&,,Q/J>>$%# * L  ,5588@)88;;FCN%)%6%6%H%H - &I &"
 ',2215G%;%>%>TEVEVEaEaEgEgpv%>%w"%;%B%B1F^%_"%;%@%@NfAfho%p"
 "II'=}&MNMr8   c                     |j                         dk(  r|j                  d      }| j                  |      }|j                         j	                         }|S )N   r+   )dimsqueezer)   cpufloat)r5   mel_spectrogramwaveforms      r7   mel_spectrogram_to_waveformz,MusicLDMPipeline.mel_spectrogram_to_waveform  sJ     A%-55a8O<<0<<>'')r8   c                    t               st        j                  d       |S | j                  |dd      }t	        j
                  |j                         | j                  j                  j                  | j                  j                        }| j                  t        |      d| j                  j                        j                  j                  |      |d<   |j                  |      } | j                  di |j                   }t#        j$                  |dd	      d d d |f   }	t#        j&                  |d
|	j)                  d      j+                               }|S )Na  Automatic scoring of the generated audio waveforms against the input prompt text requires the `librosa` package to resample the generated waveforms. Returning the audios in the order they were generated. To enable automatic scoring, install `librosa` with: `pip install librosa`.r=   T)r@   r>   )orig_sr	target_sr)r@   sampling_rateinput_featuresr+   )rm   
descendingr   rB    )r   rQ   infor%   librosaresamplenumpyr)   r2   rw   r&   rJ   rx   rX   rT   r$   logits_per_textrN   argsortindex_selectreshapero   )
r5   textaudior]   rE   rD   inputsresampled_audior   indicess
             r7   score_waveformsz MusicLDMPipeline.score_waveforms  s/   #%KKi
 LT4H!**KKM4<<#6#6#D#DPTPfPfPtPt
 $(#9#9!$dF\F\FjFj $: $

.e 	  6" ,$++5f5EE--Q4HLeMeLeIef""5!W__R-@-D-D-FGr8   c                 V   dt        t        j                  | j                  j                        j
                  j                               v }i }|r||d<   dt        t        j                  | j                  j                        j
                  j                               v }|r||d<   |S )Neta	generator)setinspect	signaturer(   step
parameterskeys)r5   r   r   accepts_etaextra_step_kwargsaccepts_generators         r7   prepare_extra_step_kwargsz*MusicLDMPipeline.prepare_extra_step_kwargs*  s     s7#4#4T^^5H5H#I#T#T#Y#Y#[\\'*e$ (3w/@/@ATAT/U/`/`/e/e/g+hh-6k*  r8   c                    || j                   z  }||k  rt        d| d| d      | j                  j                  j                  | j                   z  dk7  r:t        d| j                  j                  j                   d| j                    d      ||0t        |t              r|dk  rt        d| dt        |       d      ||t        d	| d
| d      ||t        d      |7t        |t              s't        |t              st        dt        |             ||t        d| d| d      |C|@|j                  |j                  k7  r&t        d|j                   d|j                   d      y y y )NzH`audio_length_in_s` has to be a positive value greater than or equal to z	, but is rG   r   zwThe number of frequency bins in the vocoder's log-mel spectrogram has to be divisible by the VAE scale factor, but got z bins and a scale factor of z5`callback_steps` has to be a positive integer but is z	 of type zCannot forward both `prompt`: z and `prompt_embeds`: z2. Please make sure to only forward one of the two.zeProvide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.z2`prompt` has to be of type `str` or `list` but is z'Cannot forward both `negative_prompt`: z and `negative_prompt_embeds`: zu`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` z != `negative_prompt_embeds` )r4   rZ   r)   r2   model_in_dimrH   intrX   rI   rJ   rK   )	r5   r\   audio_length_in_svocoder_upsample_factorcallback_stepsr_   r9   r:   min_audio_length_in_ss	            r7   check_inputszMusicLDMPipeline.check_inputs<  s    !8$:O:O O44Z[pZq r'(+ 
 <<++d.C.CCqH--1\\-@-@-M-M,NNj((),  "&
>30OSaefSfGGW X(), 
 -";08N}o ^0 0  ^ 5w  FC)@TZ\`IaQRVW]R^Q_`aa&+A+M9/9J K*++]_ 
 $)?)K""&<&B&BB --:-@-@,A B.445Q8  C *L$r8   c                    ||t        |      | j                  z  t        | j                  j                  j                        | j                  z  f}t        |t              r)t        |      |k7  rt        dt        |       d| d      |t        ||||      }n|j                  |      }|| j                  j                  z  }|S )Nz/You have passed a list of generators of length z+, but requested an effective batch size of z@. Make sure the batch size matches the length of the generators.)r   rE   rD   )r   r4   r)   r2   r   rH   rJ   r1   rZ   r   rT   r(   init_noise_sigma)	r5   r`   num_channels_latentsheightrD   rE   r   latentsrK   s	            r7   prepare_latentsz MusicLDMPipeline.prepare_latentsw  s     K4000##001T5J5JJ	
 i&3y>Z+GA#i.AQ R&<'gi 
 ?"5IfTYZGjj(G DNN;;;r8   c                    t               rt        dd      rddlm} nt	        d      t               }t        j                  | d|       }| j                  j                  dk7  r| j                  dd	       t                | j                  j                  | j                  j                  | j                  | j                  | j                   | j                  g}d
}|D ]  } ||||      \  }} || _        y
)a  
        Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
        to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the accelerator when its
        `forward` method is called, and the model remains in accelerator until the next model runs. Memory savings are
        lower than with `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution
        of the `unet`.
        z>=z0.17.0.dev0r   )cpu_offload_with_hookzC`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.:ro   T)silence_dtype_warningsN)prev_module_hook)r   r   
accelerater   ImportErrorr   rN   rE   rX   rT   r   r$   rU   text_projectionr'   r#   r)   final_offload_hook)	r5   gpu_idr   device_typerE   model_sequencehookcpu_offloaded_model_s	            r7   enable_model_cpu_offloadz)MusicLDMPipeline.enable_model_cpu_offload  s     #$)>t])S8cdd lQvh78;;u$GGE$G7  ((--IIHHLL
 #1 	`+,?Z^_GAt	` #'r8   r\   r   num_inference_stepsguidance_scaler_   r]   r   r   r   return_dictcallbackr   cross_attention_kwargsoutput_typec           
      >	   t        j                  | j                  j                  j                        | j                  j                  j
                  z  }|0| j                  j                  j                  | j                  z  |z  }t        ||z        }t        || j                  j                  j
                  z        }|| j                  z  dk7  rZt        t        j                  || j                  z              | j                  z  }t        j                  d| d||z   d| d       | j                  ||||||
|       |t        |t              rd}n-|t        |t               rt#        |      }n|
j$                  d   }| j&                  }|dkD  }| j)                  ||||||
|	      }
| j*                  j-                  ||
       | j*                  j.                  }| j                  j                  j0                  }| j3                  ||z  |||
j4                  |||	      }	| j7                  ||      }t#        |      || j*                  j8                  z  z
  }| j;                  |      5 }t=        |      D ]5  \  }}|rt?        j@                  |	gdz        n|	}| j*                  jC                  ||      }| j                  ||d|
|d      d   }|r|jE                  d      \  } }!| ||!| z
  z  z   } | j*                  jF                  |||	fi |jH                  }	|t#        |      dz
  k(  s'|dz   |kD  r]|dz   | j*                  j8                  z  dk(  r>|jK                          |,||z  dk(  r$|tM        | j*                  dd      z  }" ||"||	       tN        s"tQ        jR                          8 	 ddd       | jU                          |dk(  sLd| jV                  j                  jX                  z  |	z  }	| jV                  j[                  |	      j\                  }#nt_        |	      S | ja                  |#      }$|$ddd|f   }$|dkD  r"| | jc                  ||$|||
j4                        }$|dk(  r|$je                         }$|s|$fS t_        |$      S # 1 sw Y   xY w)u  
        The call function to the pipeline for generation.

        Args:
            prompt (`str` or `List[str]`, *optional*):
                The prompt or prompts to guide audio generation. If not defined, you need to pass `prompt_embeds`.
            audio_length_in_s (`int`, *optional*, defaults to 10.24):
                The length of the generated audio sample in seconds.
            num_inference_steps (`int`, *optional*, defaults to 200):
                The number of denoising steps. More denoising steps usually lead to a higher quality audio at the
                expense of slower inference.
            guidance_scale (`float`, *optional*, defaults to 2.0):
                A higher guidance scale value encourages the model to generate audio that is closely linked to the text
                `prompt` at the expense of lower sound quality. Guidance scale is enabled when `guidance_scale > 1`.
            negative_prompt (`str` or `List[str]`, *optional*):
                The prompt or prompts to guide what to not include in audio generation. If not defined, you need to
                pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
            num_waveforms_per_prompt (`int`, *optional*, defaults to 1):
                The number of waveforms to generate per prompt. If `num_waveforms_per_prompt > 1`, the text encoding
                model is a joint text-audio model ([`~transformers.ClapModel`]), and the tokenizer is a
                `[~transformers.ClapProcessor]`, then automatic scoring will be performed between the generated outputs
                and the input text. This scoring ranks the generated waveforms based on their cosine similarity to text
                input in the joint text-audio embedding space.
            eta (`float`, *optional*, defaults to 0.0):
                Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only
                applies to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
            generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
                A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
                generation deterministic.
            latents (`torch.Tensor`, *optional*):
                Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
                generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
                tensor is generated by sampling using the supplied random `generator`.
            prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
                provided, text embeddings are generated from the `prompt` input argument.
            negative_prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
                not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple.
            callback (`Callable`, *optional*):
                A function that calls every `callback_steps` steps during inference. The function is called with the
                following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
            callback_steps (`int`, *optional*, defaults to 1):
                The frequency at which the `callback` function is called. If not specified, the callback is called at
                every step.
            cross_attention_kwargs (`dict`, *optional*):
                A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
                [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
            output_type (`str`, *optional*, defaults to `"np"`):
                The output format of the generated audio. Choose between `"np"` to return a NumPy `np.ndarray` or
                `"pt"` to return a PyTorch `torch.Tensor` object. Set to `"latent"` to return the latent diffusion
                model (LDM) output.

        Examples:

        Returns:
            [`~pipelines.AudioPipelineOutput`] or `tuple`:
                If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is
                returned where the first element is a list with the generated audio.
        Nr   zAudio length in seconds z is increased to z; so that it can be handled by the model. It will be cut to z after the denoising process.r+   g      ?)r9   r:   )rE   )totalr   F)encoder_hidden_statesclass_labelsr   r   orderlatent)audios)r   r   r]   rE   rD   np)3r   prodr)   r2   upsample_ratesrw   r'   sample_sizer4   r   ceilrQ   r{   r   rH   rI   rJ   r1   rK   _execution_devicerj   r(   set_timesteps	timestepsin_channelsr   rD   r   r   progress_bar	enumeraterN   r[   scale_model_inputchunkr   prev_sampleupdater0   XLA_AVAILABLExm	mark_stepmaybe_free_model_hooksr#   scaling_factordecodesampler   rs   r   r~   )%r5   r\   r   r   r   r_   r]   r   r   r   r9   r:   r   r   r   r   r   r   r   original_waveform_lengthr`   rE   r^   r   r   r   num_warmup_stepsr   itlatent_model_input
noise_prednoise_pred_uncondnoise_pred_textstep_idxrq   r   s%                                        r7   __call__zMusicLDMPipeline.__call__  s   h #%''$,,*=*=*L*L"MPTP\P\PcPcPqPq"q$ $		 0 0 < <t?T?T TWn n&)@@A#&'84<<;N;N;\;\'\#] D)))Q.$*?*?!?@ADDYDYYFKK*+<*==NvXoOoNp qMM^L_ `%& 	#"	
 *VS"9JJvt$<VJ&,,Q/J'' '5s&:# ++$''#9 , 
 	$$%8$HNN,,	  $yy//;;&&11 
 !::9cJ y>,?$..BVBV,VV%89  	#\!), #1A\UYYy1}%=bi"%)^^%E%EFXZ[%\" "YY&*.!.+A % '  
 /9C9I9I!9L6%!2^YjGj5k!kJ .$..--j!WZHYZff I**A9I/IqSTuX\XfXfXlXlNlpqNq '')+N0Ba0G#$(K#K 1g6 LLN?# 	#D 	##% h&$((//8887BG"hhoog6==O&g6600Aa22223 $a'F,>(()A#)) ) E $KKME8O"%00 	#  	#s   /D,RRR)NNN)N)r   )NN   g       @Nr+   g        NNNNTNr+   Nr   )'__name__
__module____qualname___last_supported_versionr   r   r   r
   r   r   r   r	   r   r   r   r.   rN   Tensorrj   rs   r   r   r   r   r   no_gradr   EXAMPLE_DOC_STRINGrI   r   rp   r   	Generatorboolr   r   r   r   __classcell__)r6   s   @r7   r"   r"   O   sz   &2xx 7BCx )+??@	x
 $$89x #x -x !x: 049=C  -C !) 6CL2!0 #8v,"'H U]]_12 )--1#& #;?23MQ*.049= GK();?%)#Z1c49n%Z1 $E?Z1 !	Z1
 Z1 "%T#Y"78Z1 #+3-Z1 Z1 E%//43H"HIJZ1 %,,'Z1  -Z1 !) 6Z1 Z1 8S#u||$<d$BCDZ1 !Z1  !)c3h 8!Z1" c]#Z1 3 Z1r8   r"   )2r   typingr   r   r   r   r   r   r~   r   rN   transformersr	   r
   r   r   r   r   modelsr   r   
schedulersr   utilsr   r   r   r   r   utils.torch_utilsr   r   r   pipeline_utilsr   r   r   r   r|   r    torch_xla.core.xla_modelcore	xla_modelr   r   
get_loggerr   rQ   r   r"   rz   r8   r7   <module>r      s     = =    : 3  N M r r  , ))MM			H	% (~1.0ACW ~1r8   