o
    igP                  &   @   s  d Z ddlmZ ddlZddlmZ ddlmZmZmZm	Z	m
Z
mZmZmZmZmZmZmZmZmZ ddgZG d	d deZd
de de de
 de de d e_ dee dee dee dee dee dee dededededededededededdf"d d!Zdee dee dee dee dee dee dededededededededededdf"d"d#Ze	ed$		%	%	%	%d(dee dee dee dee dee dee d&edB dededededededededededdf$d'dZdS ))z)Implementation for the RMSprop algorithm.    )castN)Tensor   )_capturable_doc_default_to_fused_or_foreach_differentiable_doc_disable_dynamo_if_unsupported_foreach_doc!_get_capturable_supported_devices_get_scalar_dtype_maximize_doc_params_doc
_to_scalar_use_grad_for_differentiable_view_as_real	OptimizerParamsTRMSproprmspropc                       s   e Zd Z										ddedeeB d	ed
ededededededB dededdf fddZ fddZdd Z	e
dddZ  ZS )r   {Gz?Gz?:0yE>r   FNparamslralphaepsweight_decaymomentumcentered
capturableforeachmaximizedifferentiablereturnc                    s   t |tr| dkrtdd|kstd| d|ks%td| d|ks0td| d|ks;td| d|ksFtd| ||||||||	|
|d	
}t || d S )
Nr   zTensor lr must be 1-elementg        zInvalid learning rate: zInvalid epsilon value: zInvalid momentum value: zInvalid weight_decay value: zInvalid alpha value: )
r   r   r   r   r   r   r   r    r!   r"   )
isinstancer   numel
ValueErrorsuper__init__)selfr   r   r   r   r   r   r   r   r    r!   r"   defaults	__class__ U/sda-disk/www/egybert/egybert_env/lib/python3.10/site-packages/torch/optim/rmsprop.pyr(      s0   zRMSprop.__init__c                    s   t  | | jD ]_}|dd |dd |dd  |dd |dd |dd |d	 D ]4}| j|g }t|dkrgt|d
 sgt	|d
 }|d r]tj
|t |jdntj
|t d|d
< q3q	d S )Nr   r   r   Fr    r!   r"   r   r   stepdtypedevicer1   )r'   __setstate__param_groups
setdefaultstategetlentorch	is_tensorfloattensorr   r2   )r)   r7   grouppp_statestep_valr+   r-   r.   r4   H   s*   

zRMSprop.__setstate__c                 C   s8  d}|d D ]}	|	j d u rq|t|	O }||	 |	j jr"td||	j  | j|	 }
t|
dkrs|d rAtjdt	 |	j
dntjdt	 d|
d	< tj|	tjd
|
d< |d dkretj|	tjd
|
d< |d rstj|	tjd
|
d< ||
d  ||
d	  |d dkr||
d  |d r||
d  q|S )NFr   z)RMSprop does not support sparse gradientsr   r   r-   r0   r3   r/   )memory_format
square_avgr   momentum_bufferr   grad_avg)gradr:   
is_complexappend	is_sparseRuntimeErrorr7   r9   zerosr   r2   
zeros_likepreserve_format)r)   r>   params_with_gradgradssquare_avgsmomentum_buffer_list	grad_avgsstate_stepshas_complexr?   r7   r-   r-   r.   _init_group]   sD   






zRMSprop._init_groupc                 C   s   |    d}|dur!t  | }W d   n1 sw   Y  | jD ]B}g }g }g }g }g }g }	| |||||||	}
t||||||	|d |d |d |d |d |d |d |d	 |d
 |d |
d q$|S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r   r   r   r    r!   r"   r   )r   r   r   r   r   r   r    r!   r"   r   rT   ) _cuda_graph_capture_health_checkr:   enable_gradr5   rU   r   )r)   closurelossr>   rN   rO   rP   rR   rQ   rS   rT   r-   r-   r.   r/      sT   


zRMSprop.step)
r   r   r   r   r   FFNFFN)__name__
__module____qualname__r   r<   r   boolr(   r4   rU   r   r/   __classcell__r-   r-   r+   r.   r      sP    	
)3aj  Implements RMSprop algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \alpha \text{ (alpha)}, \: \gamma \text{ (lr)},
                \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)}                   \\
            &\hspace{13mm}   \lambda \text{ (weight decay)},\: \mu \text{ (momentum)},
                \: centered, \: \epsilon \text{ (epsilon)}                                       \\
            &\textbf{initialize} : v_0 \leftarrow 0 \text{ (square average)}, \:
                \textbf{b}_0 \leftarrow 0 \text{ (buffer)}, \: g^{ave}_0 \leftarrow 0     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm}if \: \lambda \neq 0                                                    \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm}v_t           \leftarrow   \alpha v_{t-1} + (1 - \alpha) g^2_t
                \hspace{8mm}                                                                     \\
            &\hspace{5mm} \tilde{v_t} \leftarrow v_t                                             \\
            &\hspace{5mm}if \: centered                                                          \\
            &\hspace{10mm} g^{ave}_t \leftarrow g^{ave}_{t-1} \alpha + (1-\alpha) g_t            \\
            &\hspace{10mm} \tilde{v_t} \leftarrow \tilde{v_t} -  \big(g^{ave}_{t} \big)^2        \\
            &\hspace{5mm}if \: \mu > 0                                                           \\
            &\hspace{10mm} \textbf{b}_t\leftarrow \mu \textbf{b}_{t-1} +
                g_t/ \big(\sqrt{\tilde{v_t}} +  \epsilon \big)                                   \\
            &\hspace{10mm} \theta_t \leftarrow \theta_{t-1} - \gamma \textbf{b}_t                \\
            &\hspace{5mm} else                                                                   \\
            &\hspace{10mm}\theta_t      \leftarrow   \theta_{t-1} -
                \gamma  g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big)  \hspace{3mm}              \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to
    `lecture notes <https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_ by G. Hinton.
    and centered version `Generating Sequences
    With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
    The implementation here takes the square root of the gradient average before
    adding epsilon (note that TensorFlow interchanges these two operations). The effective
    learning rate is thus :math:`\gamma/(\sqrt{v} + \epsilon)` where :math:`\gamma`
    is the scheduled learning rate and :math:`v` is the weighted moving average
    of the squared gradient.
    z
    Args:
        a0  
        lr (float, Tensor, optional): learning rate (default: 1e-2)
        alpha (float, optional): smoothing constant (default: 0.99)
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        momentum (float, optional): momentum factor (default: 0)
        centered (bool, optional) : if ``True``, compute the centered RMSProp,
            the gradient is normalized by an estimation of its variance
        z	
        z

    r   rO   rP   rR   rQ   rS   r   r   r   r   r   r   r!   r"   r   rT   r#   c       
         C   s  t j s	t|}t| D ]\}}|| }t j s5|r5t }|jj	|jj	kr-|jj	|v s5t
d| d|| }|s=|n| }|| }|d7 }|	dkrS|j||	d}t |}|rit |}t |}t |}||j||d| d |r|| }|rt |}||d|  |j||dd }n| }|r||}n||}|
dkr|| }|rt |}||
|| |j|| d q|j||| d qd S )NIIf capturable=True, params and state_steps must be on supported devices: .r   r   r   value)r:   jitis_scriptingr   	enumeratecompileris_compilingr
   r2   typeAssertionErroraddrG   view_as_realmul_addcmul_lerp_addcmulsqrt_sqrtadd_addcdiv_)r   rO   rP   rR   rQ   rS   r   r   r   r   r   r   r!   r"   r   rT   iparamr/   capturable_supported_devicesrF   rC   is_complex_paramrE   avgbufr-   r-   r.   _single_tensor_rmsprop	  sR   








r}   c       
   !         s  t | dkrd S |rtdtj s0|r0t  t fddt| |ddD s0td  dt|}t	
| |||||g}| D ]0\\}}}}}}}ttt |}ttt |}ttt |}ttt |}|r||g}|
dkrttt |}|| |rttt |}|| t|g|R   |rt|}tj s|d jrtj|tjd	d
dd	d nt|d |	dkr|rtj|||	d ntj|||	d}t|| tj|||d| d |rttt |}t||d|  tj|||dd}t| t|| nt|}t|| |
dkrQttt |}t||
 t||| |rGt|tjrGt|| } t||  qCtj||| d qC|rjt|tjrjt||  t||| qCtj|||| d qCd S )Nr   z#_foreach ops don't support autogradc                 3   s0    | ]\}}|j j|j jko|j j v V  qd S rZ   )r2   rk   ).0r?   r/   ry   r-   r.   	<genexpr>r  s    

z(_multi_tensor_rmsprop.<locals>.<genexpr>T)strictr`   ra   g      ?cpu)r2   rb   r   rc   re   ) r9   rl   r:   ri   rj   r
   allzipr   r   "_group_tensors_by_device_and_dtypevaluesr   listr   rH   r   _foreach_negis_cpu_foreach_add_r=   _foreach_add_foreach_mul__foreach_addcmul__foreach_lerp__foreach_addcmul_foreach_sqrt__foreach_sqrt_foreach_addcdiv_r$   _foreach_mul_foreach_div_)!r   rO   rP   rR   rQ   rS   r   r   r   r   r   r   r!   r"   r   rT   grouped_tensorsgrouped_params_grouped_grads_grouped_square_avgs_grouped_grad_avgs_grouped_momentum_buffer_list_grouped_state_steps__grouped_paramsgrouped_gradsgrouped_square_avgsgrouped_state_stepsstate_and_gradsgrouped_momentum_buffer_listgrouped_grad_avgsr{   momentum_lrr-   r   r.   _multi_tensor_rmspropV  s   






r   )single_tensor_fnFr    c                C   s   t j stdd |D std|du rt| |dd\}}|r*t j r*td|r4t j s4t}nt	}|| |||||||||||||	||
d dS )	ztFunctional API that performs rmsprop algorithm computation.

    See :class:`~torch.optim.RMSProp` for details.
    c                 s   s    | ]	}t |tjV  qd S rZ   )r$   r:   r   )r~   tr-   r-   r.   r     s    
zrmsprop.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)	use_fusedz6torch.jit.script not supported with foreach optimizers)
r   r   r   r   r   r   r!   r   r"   rT   )
r:   ri   rj   r   rJ   r   rf   rg   r   r}   )r   rO   rP   rR   rQ   rS   r    r!   r"   r   rT   r   r   r   r   r   r   r   funcr-   r-   r.   r     sB   

)NFFFF)__doc__typingr   r:   r   	optimizerr   r   r   r   r	   r
   r   r   r   r   r   r   r   r   __all__r   r   r<   r^   r}   r   r   r-   r-   r-   r.   <module>   s  @ ,,A	

M	

 
	
