o
    izp                  '   @   s  d dl mZmZmZmZ d dlZd dlmZ ddlmZm	Z	m
Z
mZmZmZmZmZmZmZmZmZ d dlmZ ddgZG d	d deZd
de de de de de d e_						d*dee dee dee dee dee dee dee dededee dee dee dedededeeef ded ed!ef&d"dZdee dee dee dee dee dee dee dee dedededeeef ded ed!ededef"d#d$Zdee dee dee dee dee dee dee dee dedededeeef ded ed!ededef"d%d&Zdee dee dee dee dee dee dee dee dedededeeef ded ed!ededed'df$d(d)ZdS )+    )ListOptionalUnionTupleN)Tensor   )	Optimizerparams_t_use_grad_for_differentiable
_get_value_stack_if_compiling_dispatch_sqrt_default_to_fused_or_foreach_capturable_doc_differentiable_doc_foreach_doc
_fused_doc_maximize_doc)$_get_fused_kernels_supported_devicesAdamadamc                       s   e Zd Z					dddddddded	eeef d
eeef dededede	e dededede	e f fddZ
 fddZdd ZedddZ  ZS )r   MbP?g?g+?:0yE>r   FN)foreachmaximize
capturabledifferentiablefusedparamslrbetasepsweight_decayamsgradr   r   r   r   r   c                   s6  d|kst d| t|tr|r|	st dd|ks#t d| d|d   kr/dk s9n t d|d  d|d   krEdk sOn t d	|d  d|ksZt d
| t||||||||	|
|d
}t || |r|
rwtdd| _t  t	 fdd| j
D std  d|rtdd S d S )N        zInvalid learning rate: Elr as a Tensor is not supported for capturable=False and foreach=TruezInvalid epsilon value: r   g      ?z#Invalid beta parameter at index 0: r   z#Invalid beta parameter at index 1: zInvalid weight_decay value: )
r    r!   r"   r#   r$   r   r   r   r   r   z)`fused` does not support `differentiable`Tc                 3   s4    | ]}|d  D ]}|j j v ot|V  qqdS )r   N)devicetypetorchZis_floating_point).0ZpgpZfused_supported_devices _/var/www/html/eduruby.in/lip-sync/lip-sync-env/lib/python3.10/site-packages/torch/optim/adam.py	<genexpr>8   s    z Adam.__init__.<locals>.<genexpr>zX`fused=True` requires all the params to be floating point Tensors of supported devices: .z0`fused` and `foreach` cannot be `True` together.)
ValueError
isinstancer   dictsuper__init__RuntimeErrorZ_step_supports_amp_scalingr   allparam_groups)selfr   r    r!   r"   r#   r$   r   r   r   r   r   defaults	__class__r,   r.   r5      sB   
zAdam.__init__c                    s   t  | | jD ]&}|dd |dd |dd  |dd |dd |dd  q	t| j }t|dkoEt	|d d	 }|sX|D ]}t
t|d	 |d	< qJd S d S )
Nr$   Fr   r   r   r   r   r   step)r4   __setstate__r8   
setdefaultliststatevalueslenr)   	is_tensortensorfloat)r9   rA   groupZstate_valuesZstep_is_tensorsr;   r-   r.   r>   A   s   
zAdam.__setstate__c           
      C   sN  |d D ]}|j d ur|| |j jrtd||j  | j| }	t|	dkrd|d s1|d r;tjdtj|j	dnt
d|	d	< tj|tjd
|	d< tj|tjd
|	d< |d rdtj|tjd
|	d< ||	d  ||	d  |d r}||	d  |d r|	d	 jrtd|d rt|d r|d std||	d	  qd S )Nr   zJAdam does not support sparse gradients, please consider SparseAdam insteadr   r   r   r-   )Zdtyper'   r%   r=   )Zmemory_formatexp_avg
exp_avg_sqr$   max_exp_avg_sqr   zB`requires_grad` is not supported for `step` in differentiable moder   r    r&   )gradappendZ	is_sparser6   rA   rC   r)   ZzerosrF   r'   rE   Z
zeros_likeZpreserve_formatZrequires_gradrD   )
r9   rG   params_with_gradgradsexp_avgsexp_avg_sqsmax_exp_avg_sqsstate_stepsr+   rA   r-   r-   r.   _init_groupP   s:   



zAdam._init_groupc                 C   s   |    d}|dur!t  | }W d   n1 sw   Y  | jD ]R}g }g }g }g }g }g }	|d \}
}| |||||||	 t||||||	f|d |
||d |d |d |d |d |d	 |d
 |d t| ddt| ddd q$|S )zPerforms a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr!   r$   r    r#   r"   r   r   r   r   r   
grad_scale	found_inf)r$   beta1beta2r    r#   r"   r   r   r   r   r   rU   rV   )Z _cuda_graph_capture_health_checkr)   Zenable_gradr8   rT   r   getattr)r9   closureZlossrG   rN   rO   rP   rQ   rR   rS   rW   rX   r-   r-   r.   r=      s\   

	


z	Adam.step)r   r   r   r   FN)__name__
__module____qualname__r	   r   rF   r   r   boolr   r5   r>   rT   r
   r=   __classcell__r-   r-   r;   r.   r      sN    

	
22a  Implements Adam algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \beta_1, \beta_2
                \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)}          \\
            &\hspace{13mm}      \lambda \text{ (weight decay)},  \: \textit{amsgrad},
                \:\textit{maximize}                                                              \\
            &\textbf{initialize} :  m_0 \leftarrow 0 \text{ ( first moment)},
                v_0\leftarrow 0 \text{ (second moment)},\: \widehat{v_0}^{max}\leftarrow 0\\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\

            &\hspace{5mm}\textbf{if} \: \textit{maximize}:                                       \\
            &\hspace{10mm}g_t           \leftarrow   -\nabla_{\theta} f_t (\theta_{t-1})         \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})          \\
            &\hspace{5mm}\textbf{if} \: \lambda \neq 0                                           \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{5mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{5mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{5mm}\widehat{v_t} \leftarrow   v_t/\big(1-\beta_2^t \big)                   \\
            &\hspace{5mm}\textbf{if} \: amsgrad                                                  \\
            &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
                \widehat{v_t})                                                                   \\
            &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big)                                 \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}} + \epsilon \big)                                       \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
    a  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR
            is not yet supported for all our implementations. Please use a float
            LR if you are not also specifying fused=True or capturable=True.
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        amsgrad (bool, optional): whether to use the AMSGrad variant of this
            algorithm from the paper `On the Convergence of Adam and Beyond`_
            (default: False)
        z	
        z
    .. _Adam\: A Method for Stochastic Optimization:
        https://arxiv.org/abs/1412.6980
    .. _On the Convergence of Adam and Beyond:
        https://openreview.net/forum?id=ryQu7f-RZ

    Fr   rO   rP   rQ   rR   rS   r   r   r   r   rU   rV   r$   rW   rX   r    r#   r"   r   c                C   s   |	du r|du rt | |dd\}}|rt|tr|sd}|	du r"d}	|du r(d}tj s:tdd |D s:td|rEtj	 rEtd|	rPtj	 rPtd|	rZtj	 sZt
}n|rdtj	 sdt}nt}|| |||||||||||||||
|d	 dS )
zmFunctional API that performs Adam algorithm computation.
    See :class:`~torch.optim.Adam` for details.
    NF)Z	use_fusedc                 s   s    | ]	}t |tjV  qd S r[   )r2   r)   r   )r*   tr-   r-   r.   r/   (  s    zadam.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsz6torch.jit.script not supported with foreach optimizersz4torch.jit.script not supported with fused optimizers)r$   rW   rX   r    r#   r"   r   r   r   rU   rV   )r   r2   r   r)   _utilsis_compilingr7   r6   jitis_scripting_fused_adam_multi_tensor_adam_single_tensor_adam)r   rO   rP   rQ   rR   rS   r   r   r   r   rU   rV   r$   rW   rX   r    r#   r"   r   _funcr-   r-   r.   r      sJ   
c       	         C   s  |d u r|d u s
J t j rt|tsJ t| D ]1\}}|s%|| n||  }|| }|| }|| }t j sM|rM|jrC|jsM|j	rI|j	sMJ d|d7 }|dkr\|j
||d}t |rt |}t |}t |}|r{t || ||< t |}||d|	  ||
j|| d|
 d |s|r|}d|	|  }d|
|  }|| }| }| }|r|r||  }n|| }|| t || ||  ||  || }n| ||  || }||| nEt|}d|	|  }d|
|  }|| }t|}|r&t j|| ||| d ||  | |}n	| | |}|j||| d |rLt | | rLt || ||< qd S )NzGIf capturable=True, params and state_steps must be CUDA or XLA tensors.r   r   alpha)value)out)r)   rd   re   r2   rF   	enumeraterb   rc   is_cudaZis_xlaadd
is_complexview_as_realZlerp_Zmul_Zaddcmul_ZconjnegsqrtcloneZcopy_maximumZadd_Zaddcdiv_r   r   Zview_as_complex)r   rO   rP   rQ   rR   rS   rU   rV   r$   rW   rX   r    r#   r"   r   r   r   iparamrL   rI   rJ   Zstep_tr=   bias_correction1bias_correction2	step_sizeZstep_size_negbias_correction2_sqrtrK   denomr-   r-   r.   rh   J  sx   





 rh   c       	            s  t | dkrd S ttr|stdtj s*|r*tdd t| |D s*J d|d u r2|d u s4J |r:J dt	
| |||||g}| D ]\\}}}}}}}|r[t|}dd |D }d	d |D }d
d |D }dd |D }dd |D }t|d |dkr|rtj|||d ntj|||d}t||d   t| t|||d  ~|rt |}t|}t|d t|d t| t| t| t| |}|}|rt|| t|}nt|}t|| t|| t|| t||| qI fdd|D }fdd|D }tfdd|D }dd |D }|rIt|| t|}nt|}t|| t|| t|||| qId S )Nr   r&   c                 s   s     | ]\}}|j o|j V  qd S r[   )rp   )r*   r+   r=   r-   r-   r.   r/     s    z%_multi_tensor_adam.<locals>.<genexpr>z@If capturable=True, params and state_steps must be CUDA tensors.z#_foreach ops don't support autogradc                 S   $   g | ]}t |rt |n|qS r-   r)   rr   rs   r*   xr-   r-   r.   
<listcomp>     $ z&_multi_tensor_adam.<locals>.<listcomp>c                 S   r   r-   r   r   r-   r-   r.   r     r   c                 S   r   r-   r   r   r-   r-   r.   r     r   c                 S   r   r-   r   r   r-   r-   r.   r     r   c                 S   r   r-   r   r   r-   r-   r.   r     r   r   rk   c                       g | ]
}d  t |  qS r   r   r*   r=   )rW   r-   r.   r   '      c                    r   r   r   r   )rX   r-   r.   r   (  r   c                    s   g | ]} | d  qS )r-   r*   bc)r    r-   r.   r   *  s    c                 S   s   g | ]}t |qS r-   )r   r   r-   r-   r.   r   ,  s    )rC   r2   r   r6   r)   rb   rc   r7   zipr   "_group_tensors_by_device_and_dtyperB   Z_foreach_neg_foreach_add_Z_foreach_addZ_foreach_lerp_Z_foreach_mul_Z_foreach_addcmul_Z_foreach_pow_foreach_sub_Z_foreach_neg_Z_foreach_div_Z_foreach_reciprocal_Z_foreach_sqrt_Z_foreach_maximum_Z_foreach_sqrtZ_foreach_addcdiv_r   )r   rO   rP   rQ   rR   rS   rU   rV   r$   rW   rX   r    r#   r"   r   r   r   grouped_tensorsdevice_paramsdevice_gradsdevice_exp_avgsdevice_exp_avg_sqsdevice_max_exp_avg_sqsdevice_state_stepsri   rz   r{   r|   r}   Zexp_avg_sq_sqrtr-   )rW   rX   r    r.   rg     s   	






rg   returnc       	         C   s~  | sd S |r
t d|d ur|j|ind }|d ur|j|ind }t|tr1t|jdkr1|j|ind }t| |||||g}| D ]z\\}}\\}}}}}}}d\}}|d uri||vre|j|dd||< || }|d ur~||vrz|j|dd||< || }|d ur||vr|j|dd||< || }t	
|d t	j|||||||||	|
|||||d |d urt	||gt|  qBd S )	Nz9Adam with fused=True does not support differentiable=Truecpu)NNT)non_blocking)r'   r   r   )	r$   r    rW   rX   r#   r"   r   rU   rV   )r6   r'   r2   r   strr   r   itemstor)   r   Z_fused_adam_r   rC   )r   rO   rP   rQ   rR   rS   rU   rV   r$   rW   rX   r    r#   r"   r   r   r   Zgrad_scale_dictZfound_inf_dictZlr_dictr   r'   ri   r   r   r   r   r   r   Zdevice_grad_scaleZdevice_found_infr-   r-   r.   rf   <  sf   &rf   )NFFNNN)typingr   r   r   r   r)   r   Z	optimizerr   r	   r
   r   r   r   r   r   r   r   r   r   Ztorch.utils._foreach_utilsr   __all__r   __doc__r_   rF   r   rh   rg   rf   r-   r-   r-   r.   <module>   sP   8 /&J	



L	



o	


 

