
    biY                        d dl Z d dlmZmZmZmZmZmZ d dlZd dl	m
Z
mZ ddlmZmZ ddlmZ ddlmZmZ ddlmZ dd	lmZmZ dd
lmZmZmZ ddlmZ ddlm Z   e       rd dl!m"c m#Z$ dZ%ndZ% ejL                  e'      Z(dZ)	 	 	 	 ddee*   deee+ejX                  f      deee*      deee-      fdZ. G d de      Z/y)    N)CallableDictListOptionalTupleUnion)T5EncoderModelT5Tokenizer   )MultiPipelineCallbacksPipelineCallback)VaeImageProcessor)AutoencoderKLCogView3PlusTransformer2DModel)DiffusionPipeline)CogVideoXDDIMSchedulerCogVideoXDPMScheduler)is_torch_xla_availableloggingreplace_example_docstring)randn_tensor   )CogView3PipelineOutputTFa  
    Examples:
        ```python
        >>> import torch
        >>> from diffusers import CogView3PlusPipeline

        >>> pipe = CogView3PlusPipeline.from_pretrained("THUDM/CogView3-Plus-3B", torch_dtype=torch.bfloat16)
        >>> pipe.to("cuda")

        >>> prompt = "A photo of an astronaut riding a horse on mars"
        >>> image = pipe(prompt).images[0]
        >>> image.save("output.png")
        ```
num_inference_stepsdevice	timestepssigmasc                    ||t        d      |dt        t        j                  | j                        j
                  j                               v }|st        d| j                   d       | j                  d
||d| | j                  }t        |      }||fS |dt        t        j                  | j                        j
                  j                               v }|st        d| j                   d       | j                  d
||d| | j                  }t        |      }||fS  | j                  |fd	|i| | j                  }||fS )a  
    Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
    custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.

    Args:
        scheduler (`SchedulerMixin`):
            The scheduler to get timesteps from.
        num_inference_steps (`int`):
            The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
            must be `None`.
        device (`str` or `torch.device`, *optional*):
            The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
        timesteps (`List[int]`, *optional*):
            Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
            `num_inference_steps` and `sigmas` must be `None`.
        sigmas (`List[float]`, *optional*):
            Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
            `num_inference_steps` and `timesteps` must be `None`.

    Returns:
        `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
        second element is the number of inference steps.
    zYOnly one of `timesteps` or `sigmas` can be passed. Please choose one to set custom valuesr   zThe current scheduler class zx's `set_timesteps` does not support custom timestep schedules. Please check whether you are using the correct scheduler.)r   r   r   zv's `set_timesteps` does not support custom sigmas schedules. Please check whether you are using the correct scheduler.)r   r   r    )

ValueErrorsetinspect	signatureset_timesteps
parameterskeys	__class__r   len)	schedulerr   r   r   r   kwargsaccepts_timestepsaccept_sigmass           m/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/diffusers/pipelines/cogview3/pipeline_cogview3plus.pyretrieve_timestepsr.   ;   s   > !3tuu'3w/@/@AXAX/Y/d/d/i/i/k+ll .y/B/B.C Da b  	 	M)FMfM''	!)n ))) 
	 C(9(9):Q:Q(R(](](b(b(d$ee.y/B/B.C D_ `  	 	GvfGG''	!)n ))) 	 	 3MFMfM''	)))    c            /           e Zd ZdZg ZdZg dZdedede	de
deeef   f
 fd	Z	 	 	 	 	 d5deeee   f   dededeej(                     deej*                     f
dZ	 	 	 	 	 	 	 	 d6deeee   f   deeeee   f      dededeej0                     deej0                     dedeej(                     deej*                     fdZd7dZd Z	 	 d8dZed        Zed        Zed        Z ed        Z! ejD                          e#e$      d
d
d
d
d d
d!dd"d
d
d
d
d
d#d$dd
d%gdfdeeeee   f      deeeee   f      d&ee   d'ee   d(ed)eee      d*e%ded+e%d,eeejL                  eejL                     f      d%eejN                     deejN                     deejN                     d-ee(eef      d.e(eef   d/ed0ed1eee)eee*gd
f   e+e,f      d2ee   ded3ee-e(f   f*d4              Z. xZ/S )9CogView3PlusPipelinea  
    Pipeline for text-to-image generation using CogView3Plus.

    This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
    library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)

    Args:
        vae ([`AutoencoderKL`]):
            Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
        text_encoder ([`T5EncoderModel`]):
            Frozen text-encoder. CogView3Plus uses
            [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel); specifically the
            [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant.
        tokenizer (`T5Tokenizer`):
            Tokenizer of class
            [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
        transformer ([`CogView3PlusTransformer2DModel`]):
            A text conditioned `CogView3PlusTransformer2DModel` to denoise the encoded image latents.
        scheduler ([`SchedulerMixin`]):
            A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
    ztext_encoder->transformer->vae)latentsprompt_embedsnegative_prompt_embeds	tokenizertext_encodervaetransformerr)   c                 
   t         |           | j                  |||||       t        | dd       r/dt	        | j
                  j                  j                        dz
  z  nd| _        t        | j                        | _
        y )N)r5   r6   r7   r8   r)   r7      r      )vae_scale_factor)super__init__register_modulesgetattrr(   r7   configblock_out_channelsr<   r   image_processor)selfr5   r6   r7   r8   r)   r'   s         r-   r>   zCogView3PlusPipeline.__init__   s~     	lQ\hq 	 	
 W^^bdikoVpc$((//*L*L&MPQ&Q Rvw0$BWBWXr/   Nr   promptnum_images_per_promptmax_sequence_lengthr   dtypec                    |xs | j                   }|xs | j                  j                  }t        |t              r|gn|}t        |      }| j                  |d|ddd      }|j                  }| j                  |dd      j                  }	|	j                  d   |j                  d   k\  rXt        j                  ||	      sB| j                  j                  |	d d |dz
  df         }
t        j                  d	| d
|
        | j                  |j                  |            d   }|j                  ||      }|j                  \  }}}|j                  d|d      }|j!                  ||z  |d      }|S )N
max_lengthTpt)paddingrJ   
truncationadd_special_tokensreturn_tensorslongest)rL   rO   r   zXThe following part of your input was truncated because `max_sequence_length` is set to  z	 tokens: r   )rH   r   )_execution_devicer6   rH   
isinstancestrr(   r5   	input_idsshapetorchequalbatch_decodeloggerwarningtorepeatview)rD   rE   rF   rG   r   rH   
batch_sizetext_inputstext_input_idsuntruncated_idsremoved_textr3   _seq_lens                 r-   _get_t5_prompt_embedsz*CogView3PlusPipeline._get_t5_prompt_embeds   s    14110**00'4&&[
nn *# % 
 %....SW.Xbb  $(<(<R(@@UcetIu>>66qJ]`aJadfJfGf7ghLNN'(	,A
 )).*;*;F*CDQG%((uV(D &++7A%,,Q0EqI%**:8M+MwXZ[r/   T   negative_promptdo_classifier_free_guidancer3   r4   c
                 6   |xs | j                   }t        |t              r|gn|}|t        |      }
n|j                  d   }
|| j                  |||||	      }|r||j                  |j                        }|r|t        |t              r|
|gz  n|}|:t        |      t        |      ur$t        dt        |       dt        |       d      |
t        |      k7  r!t        d| dt        |       d| d|
 d		      | j                  |||||	      }||fS )
a  
        Encodes the prompt into text encoder hidden states.

        Args:
            prompt (`str` or `List[str]`, *optional*):
                prompt to be encoded
            negative_prompt (`str` or `List[str]`, *optional*):
                The prompt or prompts not to guide the image generation. If not defined, one has to pass
                `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
                less than `1`).
            do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
                Whether to use classifier free guidance or not.
            num_images_per_prompt (`int`, *optional*, defaults to 1):
                Number of images that should be generated per prompt. torch device to place the resulting embeddings on
            prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
                provided, text embeddings will be generated from `prompt` input argument.
            negative_prompt_embeds (`torch.Tensor`, *optional*):
                Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
                weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
                argument.
            max_sequence_length (`int`, defaults to `224`):
                Maximum sequence length in encoded prompt. Can be set to other values but may lead to poorer results.
            device: (`torch.device`, *optional*):
                torch device
            dtype: (`torch.dtype`, *optional*):
                torch dtype
        r   )rE   rF   rG   r   rH   z?`negative_prompt` should be the same type to `prompt`, but got z != .z`negative_prompt`: z has batch size z, but `prompt`: zT. Please make sure that passed `negative_prompt` matches the batch size of `prompt`.)
rR   rS   rT   r(   rV   rf   	new_zerostype	TypeErrorr    )rD   rE   rh   ri   rF   r3   r4   rG   r   rH   r_   s              r-   encode_promptz"CogView3PlusPipeline.encode_prompt   s   P 1411'4&&VJ&,,Q/J  66&;$7 7 M '?+B%2%<%<]=P=P%Q"&+A+I@J?\_@`jO+<<fuO!d6l$:O&OUVZ[jVkUl mV~Q(  s?33 )/)::J3K_J` ax/
| <33  &*%?%?&&;$7 &@ &" 444r/   c	                 T   ||t        |      | j                  z  t        |      | j                  z  f}	t        |t              r)t	        |      |k7  rt        dt	        |       d| d      |t        |	|||      }n|j                  |      }|| j                  j                  z  }|S )Nz/You have passed a list of generators of length z+, but requested an effective batch size of z@. Make sure the batch size matches the length of the generators.)	generatorr   rH   )
intr<   rS   listr(   r    r   r\   r)   init_noise_sigma)
rD   r_   num_channels_latentsheightwidthrH   r   rq   r2   rV   s
             r-   prepare_latentsz$CogView3PlusPipeline.prepare_latents(  s     K4000J$///	
 i&3y>Z+GA#i.AQ R&<'gi 
 ?"5IfTYZGjj(G DNN;;;r/   c                 V   dt        t        j                  | j                  j                        j
                  j                               v }i }|r||d<   dt        t        j                  | j                  j                        j
                  j                               v }|r||d<   |S )Netarq   )r!   r"   r#   r)   stepr%   r&   )rD   rq   rz   accepts_etaextra_step_kwargsaccepts_generators         r-   prepare_extra_step_kwargsz.CogView3PlusPipeline.prepare_extra_step_kwargs?  s     s7#4#4T^^5H5H#I#T#T#Y#Y#[\\'*e$ (3w/@/@ATAT/U/`/`/e/e/g+hh-6k*  r/   c           
          |dz  dk7  s|dz  dk7  rt        d| d| d      |Lt         fd|D              s8t        d j                   d|D cg c]  }| j                  vs| c}       ||t        d	| d
| d      ||t        d      |7t        |t              s't        |t
              st        dt        |             ||t        d	| d| d      ||t        d| d| d      |C|@|j                  |j                  k7  r&t        d|j                   d|j                   d      y y y c c}w )Nr;   r   z7`height` and `width` have to be divisible by 8 but are z and rk   c              3   :   K   | ]  }|j                   v   y wN)_callback_tensor_inputs).0krD   s     r-   	<genexpr>z4CogView3PlusPipeline.check_inputs.<locals>.<genexpr>^  s#      F
23A---F
s   z2`callback_on_step_end_tensor_inputs` has to be in z, but found zCannot forward both `prompt`: z and `prompt_embeds`: z2. Please make sure to only forward one of the two.zeProvide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.z2`prompt` has to be of type `str` or `list` but is z and `negative_prompt_embeds`: z'Cannot forward both `negative_prompt`: zu`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but got: `prompt_embeds` z != `negative_prompt_embeds` )r    allr   rS   rT   rs   rm   rV   )	rD   rE   rv   rw   rh   "callback_on_step_end_tensor_inputsr3   r4   r   s	   `        r-   check_inputsz!CogView3PlusPipeline.check_inputsQ  s&    A:?eai1nVW]V^^cdicjjklmm-9# F
7YF
 C
 DTEaEaDbbn  |^  pHvw  bc  ko  kG  kG  bGpq  pH  oI  J  -";08N}o ^0 0  ^ 5w  FC)@TZ\`IaQRVW]R^Q_`aa"8"D0 9*++]_ 
 &+A+M9/9J K*++]_ 
 $)?)K""&<&B&BB --:-@-@,A B.445Q8  C *L$5 pHs   E%Ec                     | j                   S r   _guidance_scalerD   s    r-   guidance_scalez#CogView3PlusPipeline.guidance_scale  s    ###r/   c                      | j                   dkD  S )Nr   r   r   s    r-   ri   z0CogView3PlusPipeline.do_classifier_free_guidance  s    ##a''r/   c                     | j                   S r   )_num_timestepsr   s    r-   num_timestepsz"CogView3PlusPipeline.num_timesteps  s    """r/   c                     | j                   S r   )
_interruptr   s    r-   	interruptzCogView3PlusPipeline.interrupt  s    r/   2   g      @g        )r   r   pilr2   rv   rw   r   r   r   rz   rq   original_sizecrops_coords_top_leftoutput_typereturn_dictcallback_on_step_endr   returnc                    t        |t        t        f      r|j                  }|xs- | j                  j
                  j                  | j                  z  }|xs- | j                  j
                  j                  | j                  z  }|xs ||f}||f}| j                  |||||||       || _	        d| _
        |t        |t              rd}n-|t        |t              rt        |      }n|j                  d   }| j                  }|dkD  }| j!                  ||| j"                  |||||      \  }}| j"                  rt%        j&                  ||gd      }t)        | j*                  |||      \  }}t        |      | _        | j                  j
                  j.                  }| j1                  ||z  ||||j2                  ||
|      }| j5                  |
|	      }t%        j6                  |g|j2                        }t%        j6                  |g|j2                        }t%        j6                  |g|j2                        }| j"                  rEt%        j&                  ||g      }t%        j&                  ||g      }t%        j&                  ||g      }|j9                  |      j;                  ||z  d      }|j9                  |      j;                  ||z  d      }|j9                  |      j;                  ||z  d      }t=        t        |      || j*                  j>                  z  z
  d      }| jA                  |	      5 }d}tC        |      D ]%  \  }}| jD                  r| j"                  rt%        j&                  |gd
z        n|} | j*                  jG                  | |      } |jI                  | j                  d         }!| j	                  | ||!|||d      d   }"|"jK                         }"| j"                  r)|"jM                  d
      \  }#}$|#| jN                  |$|#z
  z  z   }"t        | j*                  tP              s' | j*                  jR                  |"||fi |ddid   }n5 | j*                  jR                  |"|||dkD  r||dz
     nd|fi |ddi\  }}|j9                  |j2                        }|Zi }%|D ]  }&tU               |&   |%|&<     || |||%      }'|'jW                  d|      }|'jW                  d|      }|'jW                  d|      }|t        |      dz
  k(  s'|dz   |kD  r/|dz   | j*                  j>                  z  dk(  r|jY                          tZ        st]        j^                          ( 	 ddd       |dk(  sC| j`                  jc                  || j`                  j
                  jd                  z  d|
      d   }(n|}(| jf                  ji                  |(|      }(| jk                          |s|(fS tm        |(      S # 1 sw Y   xY w)a  
        Function invoked when calling the pipeline for generation.

        Args:
            prompt (`str` or `List[str]`, *optional*):
                The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
            negative_prompt (`str` or `List[str]`, *optional*):
                The prompt or prompts not to guide the image generation. If not defined, one has to pass
                `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
                less than `1`).
            height (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor):
                The height in pixels of the generated image. If not provided, it is set to 1024.
            width (`int`, *optional*, defaults to self.transformer.config.sample_size * self.vae_scale_factor):
                The width in pixels of the generated image. If not provided it is set to 1024.
            num_inference_steps (`int`, *optional*, defaults to `50`):
                The number of denoising steps. More denoising steps usually lead to a higher quality image at the
                expense of slower inference.
            timesteps (`List[int]`, *optional*):
                Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
                in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
                passed will be used. Must be in descending order.
            guidance_scale (`float`, *optional*, defaults to `5.0`):
                Guidance scale as defined in [Classifier-Free Diffusion
                Guidance](https://huggingface.co/papers/2207.12598). `guidance_scale` is defined as `w` of equation 2.
                of [Imagen Paper](https://huggingface.co/papers/2205.11487). Guidance scale is enabled by setting
                `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to
                the text `prompt`, usually at the expense of lower image quality.
            num_images_per_prompt (`int`, *optional*, defaults to `1`):
                The number of images to generate per prompt.
            generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
                One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
                to make generation deterministic.
            latents (`torch.FloatTensor`, *optional*):
                Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
                generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
                tensor will ge generated by sampling using the supplied random `generator`.
            prompt_embeds (`torch.FloatTensor`, *optional*):
                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
                provided, text embeddings will be generated from `prompt` input argument.
            negative_prompt_embeds (`torch.FloatTensor`, *optional*):
                Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
                weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
                argument.
            original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)):
                If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled.
                `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as
                explained in section 2.2 of
                [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
            crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)):
                `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position
                `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting
                `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of
                [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952).
            output_type (`str`, *optional*, defaults to `"pil"`):
                The output format of the generate image. Choose between
                [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
                of a plain tuple.
            attention_kwargs (`dict`, *optional*):
                A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
                `self.processor` in
                [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
            callback_on_step_end (`Callable`, *optional*):
                A function that calls at the end of each denoising steps during the inference. The function is called
                with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
                callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
                `callback_on_step_end_tensor_inputs`.
            callback_on_step_end_tensor_inputs (`List`, *optional*):
                The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
                will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
                `._callback_tensor_inputs` attribute of your pipeline class.
            max_sequence_length (`int`, defaults to `224`):
                Maximum sequence length in encoded prompt. Can be set to other values but may lead to poorer results.

        Examples:

        Returns:
            [`~pipelines.cogview3.pipeline_cogview3plus.CogView3PipelineOutput`] or `tuple`:
            [`~pipelines.cogview3.pipeline_cogview3plus.CogView3PipelineOutput`] if `return_dict` is True, otherwise a
            `tuple`. When returning a tuple, the first element is a list with the generated images.
        FNr   r   g      ?)rF   r3   r4   rG   r   )dim)rH   )totalr:   )hidden_statesencoder_hidden_statestimestepr   target_sizecrop_coordsr   r   r2   r3   r4   latent)r   rq   )r   )images)7rS   r   r   tensor_inputsr8   rA   sample_sizer<   r   r   r   rT   rs   r(   rV   rR   ro   ri   rW   catr.   r)   r   in_channelsrx   rH   r   tensorr\   r]   maxorderprogress_bar	enumerater   scale_model_inputexpandfloatchunkr   r   r{   localspopupdateXLA_AVAILABLExm	mark_stepr7   decodescaling_factorrC   postprocessmaybe_free_model_hooksr   ))rD   rE   rh   rv   rw   r   r   r   rF   rz   rq   r2   r3   r4   r   r   r   r   r   r   rG   r   r_   r   ri   latent_channelsr}   num_warmup_stepsr   old_pred_original_sampleitlatent_model_inputr   
noise_prednoise_pred_uncondnoise_pred_textcallback_kwargsr   callback_outputsimages)                                            r-   __call__zCogView3PlusPipeline.__call__  sA   \ *-=?U,VW1E1S1S.V4++22>>AVAVVT))00<<t?T?TT%8&%uo 	."	
  . *VS"9JJvt$<VJ&,,Q/J''
 '5s&:# 150B0B,,"7'#9 3 1C 	1
-- ++!II'=}&MSTUM *<DNNL_agir)s&	&!)n **11==&&..	
 !::9cJ m_M<O<OPllK=8K8KL %.C-DML_L_ `++!II}m&DEM))[+$>?K$)II/DF[.\$]!%((077
EZ8Z\]^!nnV,33JAV4VXYZ 5 8 8 @ G G
UjHjlm n s9~0CdnnFZFZ0ZZ\]^%89 <	#\'+$!), 9#1>>AEAaAaUYYy1}%=gn"%)^^%E%EFXZ[%\" 88$6$<$<Q$?@ "--"4*7%"/ + 5 % .  
 (--/
 339C9I9I!9L6%!2T5H5HO^oLo5p!pJ "$..2GH1dnn11*aqL]qkpqrstG8K8K8K"0,-E	!a%(t9 ,9 %*95G5 "**]%8%89 (3&(O? 9-3Xa[*9';D!Q'X$.229gFG$4$8$8-$XM-=-A-ABZ\r-s*I**A9I/IqSTuX\XfXfXlXlNlpqNq '') LLNs9#<	#| h&HHOOGdhhoo.L.L$LZ_ktOuE E$$00K0P 	##%8O%U33[<	# <	#s   :HXXX)Nr      NN)NTr   NNrg   NNr   )NN)0__name__
__module____qualname____doc___optional_componentsmodel_cpu_offload_seqr   r
   r	   r   r   r   r   r   r>   rT   r   rr   r   rW   r   rH   rf   boolTensorro   rx   r   r   propertyr   ri   r   r   no_gradr   EXAMPLE_DOC_STRINGr   	GeneratorFloatTensorr   r   r   r   r   r   r   __classcell__)r'   s   @r-   r1   r1   v   s	   , <YY %Y 	Y
 4Y /1FFGY( )-%&#&)-'+(c49n%(  #( !	(
 &( $(Z <@,0%&049=#&)-'+S5c49n%S5 "%T#Y"78S5 &*	S5
  #S5  -S5 !) 6S5 !S5 &S5 $S5l.!2 #1f $ $ ( ( # #   U]]_12 37;? $##%)- #%&MQ/359>B3717   9B#&/Q4sDI~./Q4 "%T#Y"78Q4 	Q4
 }Q4 !Q4 DI&Q4 Q4  #Q4 Q4 E%//43H"HIJQ4 %++,Q4   1 12Q4 !)):): ;Q4  c3h0Q4   %S#X!Q4" #Q4$ %Q4& '(Cd+T124DF\\]
'Q4, -1I-Q4. !/Q40 
%u,	-1Q4 3 Q4r/   r1   )NNNN)0r"   typingr   r   r   r   r   r   rW   transformersr	   r
   	callbacksr   r   rC   r   modelsr   r   pipelines.pipeline_utilsr   
schedulersr   r   utilsr   r   r   utils.torch_utilsr   pipeline_outputr   torch_xla.core.xla_modelcore	xla_modelr   r   
get_loggerr   rZ   r   rr   rT   r   r   r.   r1   r   r/   r-   <module>r      s      ? ?  4 A 0 C 9 G O O - 3 ))MM			H	% & *.15%)$(8*!#8* U3,-.8* S	"	8*
 T%[!8*vt4, t4r/   