o
    iHD                  "   @   s  d dl mZ d dlZd dlmZ ddlmZmZmZmZm	Z	m
Z
mZmZmZmZmZmZmZmZmZ ddgZG dd deZd	d
e de	 de de de d e_dee dee dee dee dee dedededededededededdfddZdee dee dee dee dee dedededededededededdfdd Zeed!		"	"	"	"d%dee dee dee dee dee d#edB dedededededededededdf d$dZdS )&    )castN)Tensor   )_capturable_doc_default_to_fused_or_foreach_differentiable_doc_disable_dynamo_if_unsupported_foreach_doc!_get_capturable_supported_devices_get_scalar_dtype
_get_value_maximize_doc_params_doc
_to_scalar_use_grad_for_differentiable_view_as_real	OptimizerParamsTAdamaxadamaxc                       s   e Zd Z					dddddded	eeB d
eeef dedededB dedededdf fddZ fddZ	dd Z
edddZ  ZS )r   Mb`?g?g+?:0yE>r   NF)maximizedifferentiable
capturableparamslrbetasepsweight_decayforeachr   r   r   returnc             	      s   t |tr| dkrtdd|kstd| d|ks%td| d|d   kr1dk s;n td|d  d|d   krGdk sQn td	|d  d|ks\td
| ||||||||	d}
t ||
 d S )Nr   zTensor lr must be 1-element        zInvalid learning rate: zInvalid epsilon value: r         ?z#Invalid beta parameter at index 0: z#Invalid beta parameter at index 1: zInvalid weight_decay value: )r   r   r   r    r!   r   r   r   )
isinstancer   numel
ValueErrorsuper__init__)selfr   r   r   r   r    r!   r   r   r   defaults	__class__ T/sda-disk/www/egybert/egybert_env/lib/python3.10/site-packages/torch/optim/adamax.pyr)      s,   
zAdamax.__init__c                    s   t  | | jD ]S}|dd  |dd |dd |dd |d D ]4}| j|g }t|dkr[t|d s[t	|d }|d rQtj
|t |jd	ntj
|t d
|d< q'q	d S )Nr!   r   Fr   r   r   r   stepdtypedevicer2   )r(   __setstate__param_groups
setdefaultstategetlentorch	is_tensorfloattensorr   r3   )r*   r8   grouppp_statestep_valr,   r.   r/   r5   D   s&   

zAdamax.__setstate__c           
      C   s   d}|d D ]n}|j d u rq|t|O }|| |j jr"td||j  | j| }	t|	dkr_|d rAtjdt	 |j
dntjdt	 d	|	d
< tj|tjd|	d< tj|tjd|	d< ||	d  ||	d  ||	d
  q|S )NFr   z(Adamax does not support sparse gradientsr   r   r.   r1   r#   r4   r0   )memory_formatexp_avgexp_inf)gradr;   
is_complexappend	is_sparseRuntimeErrorr8   r:   zerosr   r3   r>   
zeros_likepreserve_format)
r*   r?   params_with_gradgradsexp_avgsexp_infsstate_stepshas_complexr@   r8   r.   r.   r/   _init_groupW   s2   




zAdamax._init_groupc                 C   s   |    d}|dur!t  | }W d   n1 sw   Y  | jD ]K}g }g }g }g }g }|d \}	}
|d }|d }|d }|d }|d }|d }|d	 }| ||||||}t|||||||	|
|||||||d
 q$|S )zPerforms a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r    r!   r   r   r   )
r   beta1beta2r   r    r!   r   r   r   rS   ) _cuda_graph_capture_health_checkr;   enable_gradr6   rT   r   )r*   closurelossr?   rN   rO   rP   rQ   rR   rU   rV   r   r   r    r!   r   r   r   rS   r.   r.   r/   r0   z   sR   

zAdamax.step)r   r   r   r   NN)__name__
__module____qualname__r   r=   r   tupleboolr)   r5   rT   r   r0   __classcell__r.   r.   r,   r/   r      sF    	
	
&#a  Implements Adamax algorithm (a variant of Adam based on infinity norm).

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \beta_1, \beta_2
                \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)},
                \: \lambda \text{ (weight decay)},                                                \\
            &\hspace{13mm}    \epsilon \text{ (epsilon)}                                          \\
            &\textbf{initialize} :  m_0 \leftarrow 0 \text{ ( first moment)},
                u_0 \leftarrow 0 \text{ ( infinity norm)}                                 \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm}if \: \lambda \neq 0                                                    \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm}m_t      \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t               \\
            &\hspace{5mm}u_t      \leftarrow   \mathrm{max}(\beta_2 u_{t-1}, |g_{t}|+\epsilon)   \\
            &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \frac{\gamma m_t}{(1-\beta^t_1) u_t} \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
    z
    Args:
        a  
        lr (float, Tensor, optional): learning rate (default: 2e-3)
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        z	
        zd

    .. _Adam\: A Method for Stochastic Optimization:
        https://arxiv.org/abs/1412.6980

    r   rO   rP   rQ   rR   r   rU   rV   r   r    r   r   r   rS   r"   c       	         C   s  t j s	t|}t| D ]\}}|| }|
s|n| }|| }|| }|| }t j sH|rHt }|jj	|jj	kr@|jj	|v sHt
d| d|d7 }|	dkrW|j||	d}t |rpt |}t |}t |}t |}||d|  |st j||| ||d n!t ||d| |dgd}|t j|ddd |r|| d }|| || }||| qd|t|  }|| }|j||| d	 qd S )
NIIf capturable=True, params and state_steps must be on supported devices: .r   r   alpha)outF)keepdim)value)r;   jitis_scriptingr   	enumeratecompileris_compilingr
   r3   typeAssertionErroraddrG   view_as_reallerp_maximummul_absadd_cat	unsqueeze
unsqueeze_copy_amaxdiv_addcdiv_r   )r   rO   rP   rQ   rR   r   rU   rV   r   r    r   r   r   rS   iparamrF   rD   rE   step_tcapturable_supported_devicesnorm_bufneg_bias_correctiondenombias_correctionclrr.   r.   r/   _single_tensor_adamax   sX   






"
r   c       	            sB  |rt dt| dkrd S tj s2|r2tddtfddt| |ddD s2t d	 d
tt	
| ||||g}| D ]\\}}}}}}ttt |}ttt |}ttt |}ttt |}ttt |}|ryt|||| |
rt|}tj s|d jrtj|tjddddd nt|d |	dkr|
rtj|||	d ntj|||	d}t||d   t|| |
s|	dkrt|}nt| t|| t|| |rt |}t|d t| t||}t||| qD fdd|D }fdd|D }t|||| qDd S )Nz#_foreach ops don't support autogradr   F)supports_xlac                 3   s0    | ]\}}|j j|j jko|j j v V  qd S r[   )r3   rn   ).0r@   r0   )r   r.   r/   	<genexpr>N  s    

z'_multi_tensor_adamax.<locals>.<genexpr>T)strictrb   rc   r$   cpu)r3   rd   r   c                    s   g | ]
}d  t |  qS )r   r   )r   r0   )rU   r.   r/   
<listcomp>  s    z(_multi_tensor_adamax.<locals>.<listcomp>c                    s   g | ]
}t  | d  qS )r   )r   bc)r   r.   r/   r     s    )ro   r:   r;   rl   rm   r
   allzipr   r   "_group_tensors_by_device_and_dtypevaluesr   listr   r   _foreach_negis_cpu_foreach_add_r>   _foreach_add_foreach_lerp__foreach_mul__foreach_abs_foreach_abs__foreach_maximum__foreach_pow_foreach_sub__foreach_div__foreach_mul_foreach_addcdiv_)r   rO   rP   rQ   rR   r   rU   rV   r   r    r   r   r   rS   grouped_tensorsgrouped_params_grouped_grads_grouped_exp_avgs_grouped_exp_infs_grouped_state_steps__grouped_paramsgrouped_gradsgrouped_exp_avgsgrouped_exp_infsgrouped_state_stepsbias_correctionsr   	step_sizer.   )rU   r   r   r/   _multi_tensor_adamax2  s   
	


r   )single_tensor_fnFr!   c
                C   s   t j stdd |D std|du rt| |dd\}}|r*t j r*td|r4t j s4t}nt	}|| |||||
|||||||	|d dS )	zrFunctional API that performs adamax algorithm computation.

    See :class:`~torch.optim.Adamax` for details.
    c                 s   s    | ]	}t |tjV  qd S r[   )r%   r;   r   )r   tr.   r.   r/   r     s    
zadamax.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)	use_fusedz6torch.jit.script not supported with foreach optimizers)	r   rU   rV   r   r    r   r   rS   r   )
r;   rl   rm   r   rJ   r   ri   rj   r   r   )r   rO   rP   rQ   rR   r!   r   r   r   rS   r   rU   rV   r   r    r   funcr.   r.   r/   r     s>   

)NFFFF)typingr   r;   r   	optimizerr   r   r   r   r	   r
   r   r   r   r   r   r   r   r   r   __all__r   __doc__r   r=   r`   r   r   r   r.   r.   r.   r/   <module>   s   D 	
0	

P	

v		
