o
    iA                      @   s  d dl mZmZ d dlZd dlmZ ddlmZmZmZm	Z	m
Z
mZmZmZmZmZmZmZmZmZ ddgZG dd deZd	d
e de
 de de de d e_dee dee dee dee dee dededededededededdfddZdee dee dee dee dee dededededededededdfddZe	ed 	!		!	!d$dee dee dee dee dee ded"edB dedededededededdfd#dZdS )%    )AnycastN)Tensor   )_capturable_doc_default_to_fused_or_foreach_differentiable_doc_disable_dynamo_if_unsupported_foreach_doc!_get_capturable_supported_devices_get_scalar_dtype_maximize_doc_params_doc
_to_scalar_use_grad_for_differentiable_view_as_real	OptimizerParamsTAdadeltaadadeltac                       s   e Zd Z					d ddddded	eeB d
ededededB dedededdf fddZ fddZde	e
ef dee dee dee dee dee fddZed!ddZ  ZS )"r         ??ư>r   NF)
capturablemaximizedifferentiableparamslrrhoepsweight_decayforeachr   r   r   returnc             	      s   t |tr| dkrtdd|kstd| d|  kr$dks,n td| d|ks7td| d|ksBtd| ||||||||	d	}
t ||
 d S )
Nr   zTensor lr must be 1-elementg        zInvalid learning rate: r   zInvalid rho value: zInvalid epsilon value: zInvalid weight_decay value: )r   r   r   r    r   r   r!   r   )
isinstancer   numel
ValueErrorsuper__init__)selfr   r   r   r   r    r!   r   r   r   defaults	__class__ V/sda-disk/www/egybert/egybert_env/lib/python3.10/site-packages/torch/optim/adadelta.pyr'      s(   
zAdadelta.__init__c                    s   t  | | jD ]S}|dd  |dd |dd |dd |d D ]4}| j|g }t|dkr[t|d s[t	|d }|d rQtj
|t |jd	ntj
|t d
|d< q'q	d S )Nr!   r   Fr   r   r   r   stepdtypedevicer0   )r&   __setstate__param_groups
setdefaultstategetlentorch	is_tensorfloattensorr   r1   )r(   r6   grouppp_statestep_valr*   r,   r-   r3   A   s&   

zAdadelta.__setstate__r=   params_with_gradgradssquare_avgs
acc_deltasstate_stepsc           
      C   s   d}|d D ]n}|j d u rq|t|O }|| |j jr"td||j  | j| }	t|	dkr_|d rAtjdt	 |j
dntjdt	 d|	d	< tj|tjd
|	d< tj|tjd
|	d< ||	d  ||	d  ||	d	  q|S )NFr   z*Adadelta does not support sparse gradientsr   r   r,   r/   r2   r.   )memory_format
square_avg	acc_delta)gradr9   
is_complexappend	is_sparseRuntimeErrorr6   r8   zerosr   r1   
zeros_likepreserve_format)
r(   r=   rA   rB   rC   rD   rE   has_complexr>   r6   r,   r,   r-   _init_groupT   s2   	




zAdadelta._init_groupc                 C   s   |    d}|dur!t  | }W d   n1 sw   Y  | jD ]J}g }g }g }g }g }|d |d |d |d |d |d |d |d	 f\}	}
}}}}}}| ||||||}t||||||	|
|||||||d
 q$|S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r    r!   r   r   r   )	r   r   r   r    r!   r   r   r   rQ   ) _cuda_graph_capture_health_checkr9   enable_gradr4   rR   r   )r(   closurelossr=   rA   rB   rC   rD   rE   r   r   r   r    r!   r   r   r   rQ   r,   r,   r-   r.      sd   

zAdadelta.step)r   r   r   r   NN)__name__
__module____qualname__r   r;   r   boolr'   r3   dictstrr   listrR   r   r.   __classcell__r,   r,   r*   r-   r      s^    		
$

+a  Implements Adadelta algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)},
                \: f(\theta) \text{ (objective)}, \: \rho \text{ (decay)},
                \: \lambda \text{ (weight decay)}                                                \\
            &\textbf{initialize} :  v_0  \leftarrow 0 \: \text{ (square avg)},
                \: u_0 \leftarrow 0 \: \text{ (accumulate variables)}                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm}if \: \lambda \neq 0                                                    \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm} v_t      \leftarrow v_{t-1} \rho + g^2_t (1 - \rho)                    \\
            &\hspace{5mm}\Delta x_t    \leftarrow   \frac{\sqrt{u_{t-1} +
                \epsilon }}{ \sqrt{v_t + \epsilon}  }g_t \hspace{21mm}                           \\
            &\hspace{5mm} u_t  \leftarrow   u_{t-1}  \rho +
                 \Delta x^2_t  (1 - \rho)                                                        \\
            &\hspace{5mm}\theta_t      \leftarrow   \theta_{t-1} - \gamma  \Delta x_t            \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `ADADELTA: An Adaptive Learning Rate Method`_.
    z
    Args:
        ar  
        lr (float, Tensor, optional): coefficient that scale delta before it is applied
            to the parameters (default: 1.0)
        rho (float, optional): coefficient used for computing a running average
            of squared gradients (default: 0.9). A higher value of `rho` will
            result in a slower average, which can be helpful for preventing
            oscillations in the learning process.
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-6).
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        z	
        zd

    .. _ADADELTA\: An Adaptive Learning Rate Method:
        https://arxiv.org/abs/1212.5701

    r   rB   rC   rD   rE   r   r   r   r    r   r   r   rQ   r"   c                   sf  t j s$|r$tdd t fddt| |ddD s$td  dt j s-t	|}t| ||||ddD ]y\}}}}}|d	7 }|	sF|n| }|d
krT|j
||d}t |rht |}t |}t |}||j||d	| d |
| }|
| }|
r| }||| ||j||d	| d t |rt |}|j|| d q7d S )NFsupports_xlac                 3   0    | ]\}}|j j|j jko|j j v V  qd S rW   r1   type.0r>   r.   capturable_supported_devicesr,   r-   	<genexpr>
      

z*_single_tensor_adadelta.<locals>.<genexpr>TstrictIIf capturable=True, params and state_steps must be on supported devices: .r   r   alphavalue)r9   compileris_compilingr   allzipAssertionErrorjitis_scriptingr   addrJ   view_as_realmul_addcmul_sqrt_clonediv_view_as_complexadd_)r   rB   rC   rD   rE   r   r   r   r    r   r   r   rQ   paramrI   rG   rH   r.   stddeltar,   rg   r-   _single_tensor_adadelta   sD   







r   c                   s:  |
rt dtj s*|r*tdd t fddt| |ddD s*t d  d	t| d
kr2d S t|}t	
| ||||g}| D ]\\}}}}}}ttt |}ttt |}ttt |}ttt |}ttt |}|ryt|||| tj s|d
 jrtj|tjddddd nt|d |	rt|}|d
kr|	rtj|||d ntj|||d}t|| tj|||d| d t||}t| t||}t| t|| t|| t|| tj|||d| d |rt|tjrt||  t|| qDtj||| d qDd S )Nz#_foreach ops don't support autogradFr`   c                 3   rb   rW   rc   re   rg   r,   r-   ri   I  rj   z)_multi_tensor_adadelta.<locals>.<genexpr>Trk   rm   rn   r   r   cpu)r1   ro   r   rq   )rw   r9   rs   rt   r   ru   rv   r8   r   r   "_group_tensors_by_device_and_dtypevaluesr   r^   r   r   is_cpu_foreach_add_r<   _foreach_neg_foreach_add_foreach_mul__foreach_addcmul__foreach_sqrt__foreach_div_r#   )r   rB   rC   rD   rE   r   r   r   r    r   r   r   rQ   grouped_tensorsdevice_params_device_grads_device_square_avgs_device_acc_deltas_device_state_steps__device_paramsdevice_gradsdevice_square_avgsdevice_acc_deltasdevice_state_stepsr   deltasr,   rg   r-   _multi_tensor_adadelta1  s   
	


r   )single_tensor_fnFr!   c	                C   s   t j stdd |D std|du rt| |dd\}}|r*t j r*td|r4t j s4t}nt	}|| |||||	|
||||||d dS )	zvFunctional API that performs Adadelta algorithm computation.

    See :class:`~torch.optim.Adadelta` for details.
    c                 s   s    | ]	}t |tjV  qd S rW   )r#   r9   r   )rf   tr,   r,   r-   ri     s    
zadadelta.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)	use_fusedz6torch.jit.script not supported with foreach optimizers)r   r   r   r    r   r   r   rQ   )
r9   rs   rt   ru   rM   r   rx   ry   r   r   )r   rB   rC   rD   rE   r   r!   r   rQ   r   r   r   r    r   r   funcr,   r,   r-   r     s<   

)FNFF)typingr   r   r9   r   	optimizerr   r   r   r	   r
   r   r   r   r   r   r   r   r   r   __all__r   __doc__r^   r;   r[   r   r   r   r,   r,   r,   r-   <module>   s   @ &5	

<	

j		
