
    bi9                     j   d Z ddlZddlmZ ddlmZmZ ddlmZ ddl	m
Z
 ddlmZ  ej                  e      Z G d	 d
e      Zddedede
fdZddededede
fdZddededede
fdZ	 ddedededede
f
dZ	 ddededededede
fdZ	 ddededededede
fdZ	 	 	 d dedededededede
fdZej6                  eej8                  eej:                  eej<                  eej>                  eej@                  eejB                  eiZ"	 	 	 	 	 	 d!deeef   dedee   dee   dee   dededede
fdZ#y)"z*PyTorch optimization for diffusion models.    N)Enum)OptionalUnion)	OptimizerLambdaLR   )loggingc                   (    e Zd ZdZdZdZdZdZdZdZ	y)	SchedulerTypelinearcosinecosine_with_restarts
polynomialconstantconstant_with_warmuppiecewise_constantN)
__name__
__module____qualname__LINEARCOSINECOSINE_WITH_RESTARTS
POLYNOMIALCONSTANTCONSTANT_WITH_WARMUPPIECEWISE_CONSTANT     Q/home/cdr/jupyterlab/.venv/lib/python3.12/site-packages/diffusers/optimization.pyr   r      s(    FF1JH1-r   r   	optimizer
last_epochreturnc                      t        | d |      S )a  
    Create a schedule with a constant learning rate, using the learning rate set in optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    c                      yNr	   r   )_s    r    <lambda>z'get_constant_schedule.<locals>.<lambda>5   s    r   r"   r   )r!   r"   s     r    get_constant_scheduler*   (   s     I{zBBr   num_warmup_stepsc                 8    dt         ffd}t        | ||      S )ad  
    Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
    increases linearly between 0 and the initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    current_stepc                 R    | k  r!t        |       t        t        d            z  S y)N      ?floatmax)r-   r+   s    r    	lr_lambdaz4get_constant_schedule_with_warmup.<locals>.lr_lambdaI   s,    **&s38H/I)JJJr   r)   intr   )r!   r+   r"   r3   s    `  r    !get_constant_schedule_with_warmupr6   8   s     " 
 IyZ@@r   
step_rulesc                     i }|j                  d      }|dd D ]1  }|j                  d      \  }}t        |      }t        |      }	|	||<   3 t        |d         }
d } |||
      }t        | ||      S )a  
    Create a schedule with a constant learning rate, using the learning rate set in optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        step_rules (`string`):
            The rules for the learning rate. ex: rule_steps="1:10,0.1:20,0.01:30,0.005" it means that the learning rate
            if multiple 1 for the first 10 steps, multiple 0.1 for the next 20 steps, multiple 0.01 for the next 30
            steps and multiple 0.005 for the other steps.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    ,N:c                 0     dt         dt        f fd}|S )Nstepsr#   c                 ~    t        j                               }t        |      D ]  \  }}| |k  s||      c S  S )N)sortedkeys	enumerate)r=   sorted_stepsisorted_steplast_lr_multiple
rules_dicts       r    	rule_funczQget_piecewise_constant_schedule.<locals>.create_rules_function.<locals>.rule_funcm   sK    !*//"34L"+L"9 7;;&%l1o667 $#r   )r5   r1   )rF   rE   rG   s   `` r    create_rules_functionz>get_piecewise_constant_schedule.<locals>.create_rules_functionl   s    	$S 	$U 	$ r   r)   )splitr5   r1   r   )r!   r7   r"   rF   	rule_listrule_str	value_str	steps_strr=   valuerE   rH   
rules_funcs                r    get_piecewise_constant_schedulerP   Q   s    $ J  %IcrN "'~~c2	9Ii !
5	"
 Yr]+ 'z3CDJIzjAAr   num_training_stepsc                 :    dt         ffd}t        | ||      S )a  
    Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after
    a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    r-   c                     | k  r!t        |       t        t        d            z  S t        dt        | z
        t        t        dz
              z        S )Nr	           r0   )r-   rQ   r+   s    r    r3   z2get_linear_schedule_with_warmup.<locals>.lr_lambda   s^    **&s16F/G)HHH)L89E#aI[^nInBo<pp
 	
r   r4   )r!   r+   rQ   r"   r3   s    ``  r    get_linear_schedule_with_warmuprU   {   s     *
 
 Iy*55r   
num_cyclesc                 0    fd}t        | ||      S )a  
    Create a schedule with a learning rate that decreases following the values of the cosine function between the
    initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
    initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        num_periods (`float`, *optional*, defaults to 0.5):
            The number of periods of the cosine function in a schedule (the default is to just decrease from the max
            value to 0 following a half-cosine).
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    c                 (   | k  r!t        |       t        t        d            z  S t        | z
        t        t        dz
              z  }t        dddt        j                  t        j                  t              z  dz  |z        z   z        S )Nr	   rT         ?r/   g       @r1   r2   mathcospir-   progressrV   rQ   r+   s     r    r3   z2get_cosine_schedule_with_warmup.<locals>.lr_lambda   s    **&s16F/G)HHH(889E#aI[^nInBo<pp3sTXXdggj8I.IC.ORZ.Z%[[\]]r   r   r!   r+   rQ   rV   r"   r3   s    ```  r    get_cosine_schedule_with_warmupra      s    2^ Iy*55r   c                 0    fd}t        | ||      S )a  
    Create a schedule with a learning rate that decreases following the values of the cosine function between the
    initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases
    linearly between 0 and the initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        num_cycles (`int`, *optional*, defaults to 1):
            The number of hard restarts to use.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    c                 4   | k  r!t        |       t        t        d            z  S t        | z
        t        t        dz
              z  }|dk\  ryt        dddt        j                  t        j                  t              |z  dz  z        z   z        S )Nr	   r/   rT   rY   rZ   r^   s     r    r3   zEget_cosine_with_hard_restarts_schedule_with_warmup.<locals>.lr_lambda   s    **&s16F/G)HHH(889E#aI[^nInBo<pps?3sTXXdgg%
:Kh:VZ]9].^%__`aar   r   r`   s    ```  r    2get_cosine_with_hard_restarts_schedule_with_warmuprd      s    0b Iy*55r   lr_endpowerc                     | j                   d   kD  st        d d d      dt        ffd}t        | ||      S )a  
    Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the
    optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the
    initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        lr_end (`float`, *optional*, defaults to 1e-7):
            The end LR.
        power (`float`, *optional*, defaults to 1.0):
            Power factor.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT
    implementation at
    https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.

    lrzlr_end (z#) must be smaller than initial lr ()r-   c                     | k  r!t        |       t        t        d            z  S | kD  rz  S z
  }z
  }d| z
  |z  z
  }||	z  z  z   }|z  S r&   r0   )
r-   lr_rangedecay_stepspct_remainingdecayre   lr_initrQ   r+   rf   s
        r    r3   z<get_polynomial_decay_schedule_with_warmup.<locals>.lr_lambda  s    **&s16F/G)HHH..G##'H,/??K0@!@K OOM}e33f<E7?"r   )defaults
ValueErrorr5   r   )r!   r+   rQ   re   rf   r"   r3   ro   s    ````  @r    )get_polynomial_decay_schedule_with_warmuprr      s\    H   &Gf8F8+NwiWXYZZ
# 
# 
# Iy*55r   namec                    t        |       } t        |    }| t         j                  k(  r
 |||      S | t         j                  k(  r ||||      S |t	        |  d      | t         j
                  k(  r ||||      S |t	        |  d      | t         j                  k(  r ||||||      S | t         j                  k(  r ||||||      S  |||||      S )	a  
    Unified API to get any scheduler from its name.

    Args:
        name (`str` or `SchedulerType`):
            The name of the scheduler to use.
        optimizer (`torch.optim.Optimizer`):
            The optimizer that will be used during training.
        step_rules (`str`, *optional*):
            A string representing the step rules to use. This is only used by the `PIECEWISE_CONSTANT` scheduler.
        num_warmup_steps (`int`, *optional*):
            The number of warmup steps to do. This is not required by all schedulers (hence the argument being
            optional), the function will raise an error if it's unset and the scheduler type requires it.
        num_training_steps (`int``, *optional*):
            The number of training steps to do. This is not required by all schedulers (hence the argument being
            optional), the function will raise an error if it's unset and the scheduler type requires it.
        num_cycles (`int`, *optional*):
            The number of hard restarts used in `COSINE_WITH_RESTARTS` scheduler.
        power (`float`, *optional*, defaults to 1.0):
            Power factor. See `POLYNOMIAL` scheduler
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.
    r)   )r7   r"   z; requires `num_warmup_steps`, please provide that argument.)r+   r"   z= requires `num_training_steps`, please provide that argument.)r+   rQ   rV   r"   )r+   rQ   rf   r"   )r+   rQ   r"   )r   TYPE_TO_SCHEDULER_FUNCTIONr   r   rq   r   r   r   )	rs   r!   r7   r+   rQ   rV   rf   r"   schedule_funcs	            r    get_schedulerrw   !  s   B D.t4M}%%%Y:>>}///Y:*UU D6!\]^^}111Y9IV`aa !D6!^_``}111-1!!
 	
 }'''-1!
 	
 $4I[hr r   )r:   )rY   r:   )r	   r:   )gHz>r/   r:   )NNNr	   r/   r:   )$__doc__r[   enumr   typingr   r   torch.optimr   torch.optim.lr_schedulerr   utilsr
   
get_loggerr   loggerr   r5   r*   r6   strrP   rU   r1   ra   rd   rr   r   r   r   r   r   r   r   ru   rw   r   r   r    <module>r      s   1   " ! -  
		H	%.D .CY CC C C A Ac A_b Alt A2'By 'Bc 'BWZ 'Bdl 'BV ]_66,/6EH6VY66@ vx66,/6EH6V[6or66F rt 6 6,/ 6EH 6VY 6kn 6 6N 464646 46 	46
 46 46 46p 99&&(ZG1&&(I$$&E  !%&*(,H
]"
#HH H sm	H
 !H H H H Hr   