o
    iΜ                  .   @   s  d dl mZ d dlZd dlmZ ddlmZmZmZmZm	Z	m
Z
mZmZmZmZmZmZmZmZmZmZmZmZmZmZ ddgZG dd deZd	d
e de
 de de de de d e_dee dee dee dee dee dee dedB dedB dededeeB deeB deeB dededededed ed!df(d"d#Zdee dee dee dee dee dee dedB dedB dededeeB deeB deeB dededededed ed!df(d$d%Z dee dee dee dee dee dee dedB dedB dededeeB deeB deeB dededededed ed!df(d&d'Z!e	ed(		)	)				)	)d-dee dee dee dee dee dee d*edB deded+edB dedB dedB ded ededeeB deeB deeB dededed!df,d,dZ"dS ).    )castN)Tensor   )_capturable_doc_default_to_fused_or_foreach_device_dtype_check_for_fused_differentiable_doc_disable_dynamo_if_unsupported_foreach_doc
_fused_doc!_get_capturable_supported_devices_get_scalar_dtype
_get_value_maximize_doc_params_doc_stack_if_compiling
_to_scalar_use_grad_for_differentiable_view_as_real
DeviceDictDeviceDtypeDict	OptimizerParamsTAdamadamc                       s   e Zd Z					ddddddddded	eeB d
eeeB eeB f dededededB dededededB deddf fddZ fddZ	dd Z
edddZ  ZS )r   MbP?g?g+?:0yE>r   FN)foreachmaximize
capturabledifferentiablefuseddecoupled_weight_decayparamslrbetasepsweight_decayamsgradr   r   r    r!   r"   r#   returnc                   s  t |tr|r|	std| dkrtdd|ks"td| d|ks-td| d|d   kr9dk sCn td	|d  d|d   krOdk sYn td
|d  d|ksdtd| t |d trrt |d tst |d trt |d tstdt |d tr|	s|rtd|d  dkrtdt |d tr|	s|rtd|d  dkrtdttt|}||||||||	|
||d}t 	|| |r|
rt
dd| _|rt
dd S d S )NElr as a Tensor is not supported for capturable=False and foreach=Truer   Tensor lr must be 1-element        zInvalid learning rate: zInvalid epsilon value: r         ?z#Invalid beta parameter at index 0: z#Invalid beta parameter at index 1: zInvalid weight_decay value: z0betas must be either both floats or both TensorszKbetas[0] as a Tensor is not supported for capturable=False and foreach=Truez!Tensor betas[0] must be 1-elementzKbetas[1] as a Tensor is not supported for capturable=False and foreach=Truez!Tensor betas[1] must be 1-element)r%   r&   r'   r(   r)   r   r   r    r!   r"   r#   z)`fused` does not support `differentiable`Tz0`fused` and `foreach` cannot be `True` together.)
isinstancer   
ValueErrornumelfloattuplemapr   super__init__RuntimeError_step_supports_amp_scaling)selfr$   r%   r&   r'   r(   r)   r   r   r    r!   r"   r#   defaults	__class__ R/sda-disk/www/egybert/egybert_env/lib/python3.10/site-packages/torch/optim/adam.pyr6   #   s|   
zAdam.__init__c                    s   t  | | jD ]k}|dd |dd |dd  |dd |dd |dd |dd }|d	 D ]:}| j|g }t|d
krst|d sst	|d }|d s]|d ritj
|t|d|jdntj
|t d|d< q9q	d S )Nr)   Fr   r   r    r!   r#   r"   r$   r   stepis_fuseddtypedevicerC   )r5   __setstate__param_groups
setdefaultstategetlentorch	is_tensorr2   tensorr   rD   )r9   rI   groupr"   pp_statestep_valr;   r=   r>   rF   s   s4   
zAdam.__setstate__c                 C   s~  d}|d D ]}	|	j d ur|t|	O }||	 |	j jr!td||	j  | j|	 }
t|
dkr||d r:t|	 |d sB|d rPtj	dt
|d d|	jd	ntjd
t
 d|
d< tj|	tjd|
d< tj|	tjd|
d< |d r|tj|	tjd|
d< ||
d  ||
d  |d r||
d  |d r|
d jrtd|d rt|d r|d std||
d  q|S )NFr$   zJAdam does not support sparse gradients, please consider SparseAdam insteadr   r"   r    r=   r@   rB   r-   rE   r?   )memory_formatexp_avg
exp_avg_sqr)   max_exp_avg_sqr!   zB`requires_grad` is not supported for `step` in differentiable moder   r%   r+   )gradrL   
is_complexappend	is_sparser7   rI   rK   r   zerosr   rD   rN   
zeros_likepreserve_formatrequires_gradrM   )r9   rO   params_with_gradgradsexp_avgsexp_avg_sqsmax_exp_avg_sqsstate_stepshas_complexrP   rI   r=   r=   r>   _init_group   sl   








zAdam._init_groupc                 C   s   |    d}|dur!t  | }W d   n1 sw   Y  | jD ]V}g }g }g }g }g }g }	|d \}
}| |||||||	}t||||||	f|d ||
||d |d |d |d |d |d	 |d
 |d t| ddt| dd|d d q$|S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr&   r)   r%   r(   r'   r   r   r    r!   r"   
grad_scale	found_infr#   )r)   re   beta1beta2r%   r(   r'   r   r   r    r!   r"   rg   rh   r#   ) _cuda_graph_capture_health_checkrL   enable_gradrG   rf   r   getattr)r9   closurelossrO   r_   r`   ra   rb   rc   rd   ri   rj   re   r=   r=   r>   r?      s`   





z	Adam.step)r   r   r   r   FN)__name__
__module____qualname__r   r2   r   r3   boolr6   rF   rf   r   r?   __classcell__r=   r=   r;   r>   r   "   sX    		
PKaf  Implements Adam algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \beta_1, \beta_2
                \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)}          \\
            &\hspace{13mm}      \lambda \text{ (weight decay)},  \: \textit{amsgrad},
                \:\textit{maximize},  \: \epsilon \text{ (epsilon)}                              \\
            &\textbf{initialize} :  m_0 \leftarrow 0 \text{ ( first moment)},
                v_0\leftarrow 0 \text{ (second moment)},\: v_0^{max}\leftarrow 0          \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\

            &\hspace{5mm}\textbf{if} \: \textit{maximize}:                                       \\
            &\hspace{10mm}g_t           \leftarrow   -\nabla_{\theta} f_t (\theta_{t-1})         \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})          \\
            &\hspace{5mm}\textbf{if} \: \lambda \neq 0                                           \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{5mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{5mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{5mm}\textbf{if} \: amsgrad                                                  \\
            &\hspace{10mm} v_t^{max} \leftarrow \mathrm{max}(v_{t-1}^{max},v_t)                  \\
            &\hspace{10mm}\widehat{v_t} \leftarrow v_t^{max}/\big(1-\beta_2^t \big)              \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}\widehat{v_t} \leftarrow   v_t/\big(1-\beta_2^t \big)                  \\
            &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}} + \epsilon \big)                                       \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
    z
    Args:
        aG  
        lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR
            is not yet supported for all our implementations. Please use a float
            LR if you are not also specifying fused=True or capturable=True.
        betas (tuple[Union[float, Tensor], Union[float, Tensor]], optional):
            coefficients used for computing running averages of gradient and
            its square. If a tensor is provided, must be 1-element. (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        decoupled_weight_decay (bool, optional): if True, this optimizer is
            equivalent to AdamW and the algorithm will not accumulate weight
            decay in the momentum nor variance. (default: False)
        amsgrad (bool, optional): whether to use the AMSGrad variant of this
            algorithm from the paper `On the Convergence of Adam and Beyond`_
            (default: False)
        z	
        a=  
    .. Note::
        A prototype implementation of Adam and AdamW for MPS supports `torch.float32` and `torch.float16`.
    .. _Adam\: A Method for Stochastic Optimization:
        https://arxiv.org/abs/1412.6980
    .. _On the Convergence of Adam and Beyond:
        https://openreview.net/forum?id=ryQu7f-RZ

    r$   r`   ra   rb   rc   rd   rg   rh   r)   re   ri   rj   r%   r(   r'   r   r    r!   r#   r*   c          '      C   s  |d us|d urt dtj r<t|tst dt| t|
ts-t dt|
 t|ts;t dt| nt|}t|
}
t|}t|
trV|
j	|
j
f|
i}nd }t| D ]\}}|sg|| n||  }|| }|| }|| }tj s|rt }|j	j|j	jkr|j	j|v st d| d|d7 }|dkr|r|d||   n"|rt|tr|jr|| |}n|j||d	}n|j||d	}t|rt|}t|}t|}|rt|| ||< t|}|j	}|d ur|j
}||f}||vr|
j||d
d||< || }n|
}||d|  |rJt|trJ|jr9|jt|d| d n||j||ttd| d n||j||d| d |s]|r|}|r|t|
tr||
jrud|
|   } nd|
|  } nd|
|  } |rt|tr|jrd||   }!nd||  }!nd||  }!||  }"|" }#|! }$|r|r||  }%n|| }%|| t|%| ||  |$|#  ||# }&n| |$|#  ||# }&|r|| |& nL|||& nEt |}d|
|  } d||  }!||  }"|!d }$|r0tj|| ||| d ||  |$ |}&n	| |$ |}&|j||&|" d |rVt| | rVt!|| ||< q\d S )N,Expected grad_scale and found_inf to be Nonez#Expected lr to be a float, but got z&Expected beta1 to be a float, but got z&Expected beta2 to be a float, but got IIf capturable=True, params and state_steps must be on supported devices: .r   r   alphaT)rD   rC   non_blocking)weight)value      ?)out)"AssertionErrorrL   jitis_scriptingr/   r2   typer   r   rD   rC   	enumeratecompileris_compilingr   mul_r^   addcmul_cloneaddrX   view_as_realtolerp_squarer   negsqrtcopy_maximumadd_addcdiv_r   view_as_complex)'r$   r`   ra   rb   rc   rd   rg   rh   r)   re   ri   rj   r%   r(   r'   r   r    r!   r#   
beta1_dictiparamrW   rT   rU   step_tcapturable_supported_devicesrD   rC   keydevice_beta1r?   bias_correction1bias_correction2	step_sizestep_size_negbias_correction2_sqrtrV   denomr=   r=   r>   _single_tensor_adam[  s   














	


 r   c          +   	      s  t | dkrd S ttr|std dkrtdt tr2|s(td  dkr2tdttrG|s=td dkrGtdtj sk|rkt	d	d
t
fddt| |ddD sktd d|d uss|d urwtd|r}tdtt  tt| |||||g}t trt jdkr j ind }| D ]\\}}}}}}}ttt |}ttt |}ttt |}ttt |}ttt |} |d j}!|d ur|!|vr j|!dd||!< |r||! n }"|	r|rttt |}#t|||||# nt|||| |rt|}tj s6| d jr6tj| tjddddd nt| d |dkrc|rOt|d|   n|r[tj|||d ntj|||d}t||ttd|"  t| ttjrt|d }$d}%n|}$d }%t ||$||% ~~$|r t! | }&t!| }'t"|&d t"|'d t#|' t$|& t%|& t&|' |&}(|'})|rttt |}#t'|#| t(|#}*nt(|}*t$|*|) t|*| t$|*|( t)|||* q fdd| D }&fdd| D }'t*fdd|&D }(dd |'D })|r:ttt |}#t'|#| t(|#}*nt(|}*t$|*|) t|*| t)|||*|( qd S )Nr   r+   r   r,   zHbeta1 as a Tensor is not supported for capturable=False and foreach=TruezTensor beta1 must be 1-elementzHbeta2 as a Tensor is not supported for capturable=False and foreach=TruezTensor beta2 must be 1-elementF)supports_xlac                 3   s0    | ]\}}|j j|j jko|j j v V  qd S rp   )rD   r   ).0rP   r?   )r   r=   r>   	<genexpr>`  s    

z%_multi_tensor_adam.<locals>.<genexpr>T)strictrw   rx   rv   z#_foreach ops don't support autogradcpurD   r{   r.   )rD   ry   c                       g | ]
}d  t |  qS r   r   r   r?   )ri   r=   r>   
<listcomp>      z&_multi_tensor_adam.<locals>.<listcomp>c                    r   r   r   r   )rj   r=   r>   r     r   c                    s   g | ]} | d  qS )r=   r   bc)r%   r=   r>   r     s    c                 S   s   g | ]}|d  qS )r~   r=   r   r=   r=   r>   r     s    )+rK   r/   r   r7   r1   r0   rL   r   r   r   allzipr   r   r   "_group_tensors_by_device_and_dtypestrrD   valuesr   listr   r   _foreach_negis_cpu_foreach_add_rN   _foreach_mul__foreach_add_foreach_lerp_r2   _foreach_mul_foreach_addcmul__foreach_pow_foreach_sub__foreach_neg__foreach_div__foreach_reciprocal__foreach_sqrt__foreach_maximum__foreach_sqrt_foreach_addcdiv_r   )+r$   r`   ra   rb   rc   rd   rg   rh   r)   re   ri   rj   r%   r(   r'   r   r    r!   r#   grouped_tensorsr   device_params_device_grads_device_exp_avgs_device_exp_avg_sqs_device_max_exp_avg_sqs_device_state_steps__device_paramsdevice_gradsdevice_exp_avgsdevice_exp_avg_sqsdevice_state_stepsrD   r   device_max_exp_avg_sqsscaled_device_gradsr}   r   r   r   r   exp_avg_sq_sqrtr=   )ri   rj   r   r%   r>   _multi_tensor_adam*  s0  















 r   c          '      C   s  | sd S |r
t dt|
}
t|}|d ur|j|ini }|d ur&|j|ini }t|tr9t|jdkr9|j|ind }t| |||||g}| D ]\\}}\\}}}}}}}t	t
t |}t	t
t |} t	t
t |}!t	t
t |}"t	t
t |}#d\}$}%|d ur|||j|dd}$|d ur|||j|dd}%|d ur||vr|j|dd||< || }t|#d |stjntj}&|&|| |!|"||#|||
|||||$|%d |%d urt|#|%gt|#  qJd S )	Nz9Adam with fused=True does not support differentiable=Truer   )NNT)r{   r   r   )	r)   r%   ri   rj   r(   r'   r   rg   rh   )r7   r   rD   r/   r   r   r   r   itemsr   r   rH   r   rL   r   _fused_adam__fused_adamw_r   rK   )'r$   r`   ra   rb   rc   rd   rg   rh   r)   re   ri   rj   r%   r(   r'   r   r    r!   r#   grad_scale_dictfound_inf_dictlr_dictr   rD   r   r   r   r   r   r   r   r   r   r   r   r   device_grad_scaledevice_found_inffuncr=   r=   r>   _fused_adam#  s   $r   )single_tensor_fnFr   r"   c                C   s  |	du r|du rt | |dd\}}|rt|tr|sd}|	du r"d}	|du r(d}tj s:tdd |D s:td|rEtj	 rEtd|	rPtj	 rPtd|	rZtj	 sZt
}n|rdtj	 sdt}nt}|| |||||f|||||||||||
||d	 dS )
znFunctional API that performs Adam algorithm computation.

    See :class:`~torch.optim.Adam` for details.
    NF)	use_fusedc                 s   s    | ]	}t |tjV  qd S rp   )r/   rL   r   )r   tr=   r=   r>   r     s    
zadam.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsz6torch.jit.script not supported with foreach optimizersz4torch.jit.script not supported with fused optimizers)r)   re   ri   rj   r%   r(   r'   r   r    r!   rg   rh   r#   )r   r/   r   rL   r   r   r   r7   r   r   r   r   r   )r$   r`   ra   rb   rc   rd   r   r    r!   r"   rg   rh   re   r#   r)   ri   rj   r%   r(   r'   r   r   r   r=   r=   r>   r     s^   #
)NFFNNNFF)#typingr   rL   r   	optimizerr   r   r   r   r	   r
   r   r   r   r   r   r   r   r   r   r   r   r   r   r   __all__r   __doc__r   rt   r2   r   r   r   r   r=   r=   r=   r>   <module>   s  X s%H

 P

 z

c
	
