o
    i4                     @   s  d Z ddlmZ ddlZddlmZ ddlmZmZ ddl	m
Z
mZmZ e r7ddlmZ dd	lmZmZmZ eeZG d
d dZ	d&dejdejdejdejeejejf B fddZejeB Z					d'dejdedB deeef dB dedB ddf
ddZdejdedejfddZ			d(dejj dejdejdejd eejdf d!e!dB d"e!dB d#ejdB deejejdB f fd$d%Z"dS ))a7  
Partially inspired by torchtune's flex attention implementation

Citation:
@software{torchtune,
  title = {torchtune: PyTorch's finetuning library},
  author = {torchtune maintainers and contributors},
  url = {https//github.com/pytorch/torchtune},
  license = {BSD-3-Clause},
  month = apr,
  year = {2024}
}
    )UnionN)version   )is_torch_flex_attn_availablelogging)get_torch_versionis_torch_less_or_equalis_torchdynamo_compiling)_DEFAULT_SPARSE_BLOCK_SIZE)	BlockMaskcreate_block_maskflex_attentionc                       sJ   e Zd ZdZdZdZdZ fddZej	j
dddd Zd	d
 Z  ZS )WrappedFlexAttentionzh
    We are doing a singleton class so that flex attention is compiled once when it's first called.
    NFc                    s   | j d u rt | | _ | j S N)	_instancesuper__new__)clsargskwargs	__class__ j/sda-disk/www/egybert/egybert_env/lib/python3.10/site-packages/transformers/integrations/flex_attention.pyr   7   s   
zWrappedFlexAttention.__new__)	recursivec                 C   st   | j r|| jkr8|| _tdrtjtdd| _ntt	 j
dkr-|r-tjtddd| _ntt| _d| _ dS dS )	z>
        Initialize or update the singleton instance.
        2.5.1F)dynamicz2.6.0zmax-autotune-no-cudagraphs)r   modeTN)_is_flex_compiledtrainingr   torchcompiler   _compiled_flex_attentionr   parser   base_version)selfr   r   r   r   __init__=   s   

zWrappedFlexAttention.__init__c                 C   s   | j S r   )r"   )r%   r   r   r   __call__S   s   zWrappedFlexAttention.__call__)__name__
__module____qualname____doc__r   r   r"   r   r    compilerdisabler&   r'   __classcell__r   r   r   r   r   .   s    
r   Fquerykeyvaluereturnc                 K   s(   t  st| nt}|| ||fi |S r   )r	   r   r   )r/   r0   r1   r   r   flex_attention_compiledr   r   r   compile_friendly_flex_attentionW   s   	r4   Tattention_mask_2dattention_chunk_sizeoffsets	is_causalr   c              	      s   j \}}|s	|}|s|}|t d t }tjjj dd|| fd  j}	  |dur< d	dd |  fddfdd	}
 fd
d}|sV|n|du r\n|
|dury|d 
|	|d 
|	fdd}n}t||d|||	td dS )aG  
    IMPORTANT NOTICE: This function is deprecated in favor of using the mask primitives in `masking_utils.py`,
    and will be removed in a future version without warnings. New code should not use it. It is only kept here
    for BC for now, while models using it are being patched accordingly.

    Create a block (causal) document mask for a batch of sequences, both packed and unpacked.
    Create Block (causal) logic and passing it into :func:`torch.nn.attention.flex_attention.create_block_mask`.
    The resultant BlockMask is a compressed representation of the full (causal) block
    mask. BlockMask is essential for performant computation of flex attention.
    See: https://pytorch.org/blog/flexattention/

    Args:
        attention_mask_2d (torch.Tensor): Attention mask for packed and padded sequences
        of shape (batch_size, total_seq_len). e.g.

        For unpacked sequence:
        [[1, 1, 1, 1, 0, 0, 0],
         [1, 1, 1, 1, 1, 0, 0]]

        For packed sequence:
        [[1, 1, 1, 2, 2, 2, 0],
         [1, 1, 2, 2, 2, 3, 3]]

    Returns:
        BlockMask
       r   )r1   padNc                    s@   ||k}| |f | |f k} | |f dk}||@ |@ }|S )z
        Defines the logic of a block causal mask by combining both a standard causal mask
        and a block diagonal document mask.
        See :func:`~torchtune.modules.attention_utils.create_block_causal_mask`
        for an illustration.
        r   r   )	batch_idxhead_idxq_idxkv_idxcausal_maskdocument_maskpadding_mask
final_maskr5   document_idsr   r   causal_mask_mod   s
   z4make_flex_block_causal_mask.<locals>.causal_mask_modc                    s.   | |f | |f k} | |||}||@ S )zU
        Combines the chunk mask with the causal mask for chunked attention.
        r   )r<   r=   r>   r?   
chunk_maskcausal_doc_mask)rF   
chunk_idxsr   r   chunk_causal_mask_mod   s   z:make_flex_block_causal_mask.<locals>.chunk_causal_mask_modc                    s4   | |f | |f k} | |f dk}||@ }|S )zp
        Utilizes default attention mask to enable encoder and encoder-decoder
        attention masks.
        r   r   )r<   r=   r>   r?   rA   rB   rC   rD   r   r   default_mask_mod   s   z5make_flex_block_causal_mask.<locals>.default_mask_modc                    s   | }|  }| |||S r   r   )r<   r=   r>   r?   offset_q	offset_kv)	kv_offsetmask_mod_maybe_combinedq_offsetr   r   mask_mod   s   z-make_flex_block_causal_mask.<locals>.mask_modr   )rQ   BHQ_LENKV_LENdevice_compile)shapeflex_default_block_sizer    nn
functionalr:   rV   clonefill_cumsumtor   r   )r5   r6   query_length
key_lengthr7   r8   
batch_sizetotal_seq_lenpad_lenrV   rJ   rK   rQ   r   )r5   rF   rI   rE   rN   rO   rP   r   make_flex_block_causal_maskm   s>   
"re   hidden_statesn_repc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r9   N)rX   expandreshape)rf   rg   batchnum_key_value_headsslenhead_dimr   r   r   	repeat_kv   s
   0rn   moduleattention_maskscalingsoftcaps_auxc                    s  | dddkrtdd }	d  t|tr|}	n|  d ur1 d d d d d d d |jd f   fdd}
d}|jd	 }||d	 @ dkrct||jd	 |jd	  }t||jd	 |jd	  }d
}| d}|jjdk}|sx|d urxtdt||||
|	||||| j	d
}|r|\}}|
|j}|d ur|j\}}}}|d	dd	d	|||d	}|d}tjtj||gddddd}t|| }|| }n|}d }|d	d }||fS )Ndropoutg        r   z`flex_attention` does not support `dropout`. Please use it with inference only (`model.eval()`) or turn off the attention dropout in the respective config.c                    s>   d urt |   }  d ur|  | d | |  } | S )Nr   )r    tanh)scorer<   r=   r>   r?   
score_maskrr   r   r   	score_mod  s
   z)flex_attention_forward.<locals>.score_modTr9   Fkernel_optionscpuzhAttention sinks cannot be run on CPU with flex attention. Please switch to a different device, e.g. CUDA)rz   
block_mask
enable_gqascaler{   
return_lser   r;   )dim)r   keepdimr   )get
ValueError
isinstancer   rX   rn   rV   typer4   r   r_   dtypeviewrh   	unsqueezer    	logsumexpcatexp	transpose
contiguous)ro   r/   r0   r1   rp   rq   rr   rs   r   r}   rz   r~   num_local_query_headsr{   r   flex_attention_outputattention_outputlserb   	num_heads	seq_len_q_sinkslse_expandedcombined_lserenorm_factorr   rx   r   flex_attention_forward   sf   
&



r   )F)NNNNT)NNN)#r+   typingr   r    	packagingr   utilsr   r   utils.import_utilsr   r   r	   !torch.nn.attention.flex_attentionr
   rY   r   r   r   
get_loggerr(   loggerr   Tensortupler4   intOffsetboolre   rn   rZ   Modulefloatr   r   r   r   r   <module>   sz    
-


r
