o
    ip                     @   s   d dl Z d dlZd dlmZmZ d dlZddlmZ ddl	m
Z
mZmZ ddlmZmZmZmZ e r=d dlZddlmZ G d	d
 d
eZG dd de
ZeedddG dd deZeZdS )    N)Anyoverload   )BasicTokenizer)ExplicitEnumadd_end_docstringsis_torch_available   )ArgumentHandlerChunkPipelineDatasetbuild_pipeline_init_args),MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMESc                   @   s&   e Zd ZdZdeee B fddZdS )"TokenClassificationArgumentHandlerz5
    Handles arguments for token classification.
    inputsc                 K   s   | dd}| d}|d ur%t|ttfr%t|dkr%t|}t|}n$t|tr0|g}d}ntd ur9t|ts?t|tjrE||d |fS t	d| d}|rit|tr_t|d tr_|g}t||krit	d||||fS )	Nis_split_into_wordsF	delimiterr   r	   zAt least one input is required.offset_mappingz;offset_mapping should have the same batch size as the input)
get
isinstancelisttuplelenstrr   typesGeneratorType
ValueError)selfr   kwargsr   r   
batch_sizer    r    m/sda-disk/www/egybert/egybert_env/lib/python3.10/site-packages/transformers/pipelines/token_classification.py__call__   s$   
"


z+TokenClassificationArgumentHandler.__call__N)__name__
__module____qualname____doc__r   r   r"   r    r    r    r!   r      s    r   c                   @   s$   e Zd ZdZdZdZdZdZdZdS )AggregationStrategyzDAll the valid aggregation strategies for TokenClassificationPipelinenonesimplefirstaveragemaxN)	r#   r$   r%   r&   NONESIMPLEFIRSTAVERAGEMAXr    r    r    r!   r'   3   s    r'   T)has_tokenizera
  
        ignore_labels (`list[str]`, defaults to `["O"]`):
            A list of labels to ignore.
        grouped_entities (`bool`, *optional*, defaults to `False`):
            DEPRECATED, use `aggregation_strategy` instead. Whether or not to group the tokens corresponding to the
            same entity together in the predictions or not.
        stride (`int`, *optional*):
            If stride is provided, the pipeline is applied on all the text. The text is split into chunks of size
            model_max_length. Works only with fast tokenizers and `aggregation_strategy` different from `NONE`. The
            value of this argument defines the number of overlapping tokens between chunks. In other words, the model
            will shift forward by `tokenizer.model_max_length - stride` tokens each step.
        aggregation_strategy (`str`, *optional*, defaults to `"none"`):
            The strategy to fuse (or not) tokens based on the model prediction.

                - "none" : Will simply not do any aggregation and simply return raw results from the model
                - "simple" : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C,
                  I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{"word": ABC, "entity": "TAG"}, {"word": "D",
                  "entity": "TAG2"}, {"word": "E", "entity": "TAG2"}] Notice that two consecutive B tags will end up as
                  different entities. On word based languages, we might end up splitting words undesirably : Imagine
                  Microsoft being tagged as [{"word": "Micro", "entity": "ENTERPRISE"}, {"word": "soft", "entity":
                  "NAME"}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages
                  that support that meaning, which is basically tokens separated by a space). These mitigations will
                  only work on real words, "New york" might still be tagged with two different entities.
                - "first" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot
                  end up with different tags. Words will simply use the tag of the first token of the word when there
                  is ambiguity.
                - "average" : (works only on word based models) Will use the `SIMPLE` strategy except that words,
                  cannot end up with different tags. scores will be averaged first across tokens, and then the maximum
                  label is applied.
                - "max" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot
                  end up with different tags. Word entity will simply be the token with the maximum score.c                       sL  e Zd ZdZdZdZdZdZdZe	 f fdd	Z
						d5dedB d	eeeef  dB d
ededB dedB f
ddZedededeeeef  fddZedee dedeeeeef   fddZdeee B dedeeeef  eeeeef   B f fddZd6ddZdd ZejdfddZdd Z		d7dedejd ejd	eeeef  dB d!ejded"eedB  dB d#eeeef  dB dee fd$d%Zd&ee dedee fd'd(Zd)ee dedefd*d+Zd)ee dedee fd,d-Z d)ee defd.d/Z!d0edeeef fd1d2Z"d)ee dee fd3d4Z#  Z$S )8TokenClassificationPipelineuv	  
    Named Entity Recognition pipeline using any `ModelForTokenClassification`. See the [named entity recognition
    examples](../task_summary#named-entity-recognition) for more information.

    Example:

    ```python
    >>> from transformers import pipeline

    >>> token_classifier = pipeline(model="Jean-Baptiste/camembert-ner", aggregation_strategy="simple")
    >>> sentence = "Je m'appelle jean-baptiste et je vis à montréal"
    >>> tokens = token_classifier(sentence)
    >>> tokens
    [{'entity_group': 'PER', 'score': 0.9931, 'word': 'jean-baptiste', 'start': 12, 'end': 26}, {'entity_group': 'LOC', 'score': 0.998, 'word': 'montréal', 'start': 38, 'end': 47}]

    >>> token = tokens[0]
    >>> # Start and end provide an easy way to highlight words in the original text.
    >>> sentence[token["start"] : token["end"]]
    ' jean-baptiste'

    >>> # Some models use the same idea to do part of speech.
    >>> syntaxer = pipeline(model="vblagoje/bert-english-uncased-finetuned-pos", aggregation_strategy="simple")
    >>> syntaxer("My name is Sarah and I live in London")
    [{'entity_group': 'PRON', 'score': 0.999, 'word': 'my', 'start': 0, 'end': 2}, {'entity_group': 'NOUN', 'score': 0.997, 'word': 'name', 'start': 3, 'end': 7}, {'entity_group': 'AUX', 'score': 0.994, 'word': 'is', 'start': 8, 'end': 10}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'sarah', 'start': 11, 'end': 16}, {'entity_group': 'CCONJ', 'score': 0.999, 'word': 'and', 'start': 17, 'end': 20}, {'entity_group': 'PRON', 'score': 0.999, 'word': 'i', 'start': 21, 'end': 22}, {'entity_group': 'VERB', 'score': 0.998, 'word': 'live', 'start': 23, 'end': 27}, {'entity_group': 'ADP', 'score': 0.999, 'word': 'in', 'start': 28, 'end': 30}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'london', 'start': 31, 'end': 37}]
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)

    This token recognition pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"ner"` (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous).

    The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the
    up-to-date list of available models on
    [huggingface.co/models](https://huggingface.co/models?filter=token-classification).
    	sequencesFTc                    s2   t  jdi | | t tdd| _|| _d S )NF)do_lower_caser    )super__init__check_model_typer   r   _basic_tokenizer_args_parser)r   args_parserr   	__class__r    r!   r7      s   

z$TokenClassificationPipeline.__init__Naggregation_strategyr   r   strider   c           
      C   s   i }||d< |r|d u rdn||d< |d ur||d< i }|d urAt |tr+t|  }|tjtjtjhv r=| jjs=t	d||d< |d urI||d< |d urw|| jj
krWt	d|tjkrdt	d	| d
| jjrsdd|d}	|	|d< nt	d|i |fS )Nr    r   r   z{Slow tokenizers cannot handle subwords. Please set the `aggregation_strategy` option to `"simple"` or use a fast tokenizer.r>   ignore_labelszl`stride` must be less than `tokenizer.model_max_length` (or even lower if the tokenizer adds special tokens)zI`stride` was provided to process all the text but `aggregation_strategy="z&"`, please select another one instead.T)return_overflowing_tokenspaddingr?   tokenizer_paramszm`stride` was provided to process all the text but you're using a slow tokenizer. Please use a fast tokenizer.)r   r   r'   upperr/   r1   r0   	tokenizeris_fastr   model_max_lengthr-   )
r   rA   r>   r   r   r?   r   preprocess_paramspostprocess_paramsrD   r    r    r!   _sanitize_parameters   sT   	



z0TokenClassificationPipeline._sanitize_parametersr   r   returnc                 K      d S Nr    r   r   r   r    r    r!   r"         z$TokenClassificationPipeline.__call__c                 K   rM   rN   r    rO   r    r    r!   r"      rP   c                    sv   | j |fi |\}}}}||d< ||d< |r+tdd |D s+t j|gfi |S |r1||d< t j|fi |S )a  
        Classify each token of the text(s) given as inputs.

        Args:
            inputs (`str` or `List[str]`):
                One or several texts (or one list of texts) for token classification. Can be pre-tokenized when
                `is_split_into_words=True`.

        Return:
            A list or a list of list of `dict`: Each result comes as a list of dictionaries (one for each token in the
            corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy) with
            the following keys:

            - **word** (`str`) -- The token/word classified. This is obtained by decoding the selected tokens. If you
              want to have the exact string in the original sentence, use `start` and `end`.
            - **score** (`float`) -- The corresponding probability for `entity`.
            - **entity** (`str`) -- The entity predicted for that token/word (it is named *entity_group* when
              *aggregation_strategy* is not `"none"`.
            - **index** (`int`, only present when `aggregation_strategy="none"`) -- The index of the corresponding
              token in the sentence.
            - **start** (`int`, *optional*) -- The index of the start of the corresponding entity in the sentence. Only
              exists if the offsets are available within the tokenizer
            - **end** (`int`, *optional*) -- The index of the end of the corresponding entity in the sentence. Only
              exists if the offsets are available within the tokenizer
        r   r   c                 s   s    | ]}t |tV  qd S rN   )r   r   ).0inputr    r    r!   	<genexpr>   s    z7TokenClassificationPipeline.__call__.<locals>.<genexpr>r   )r:   allr6   r"   )r   r   r   _inputsr   r   r   r<   r    r!   r"      s   c                 +   s   | di }| jjo| jjdk}d }|d }|rT|d }t|ts&td|}	||	}g }t|}
d}|	D ]}|||t| f |t||
 7 }q7|	}d|d< nt|t	s]td|}| j|fd|d| jj
d	|}|ry| jj
sytd
| dd  t|d }t|D ]9  fdd| D }|d ur||d<  dkr|nd |d<  |d k|d< |d ur| |d< ||d< |V  qd S )NrD   r   r   r   zEWhen `is_split_into_words=True`, `sentence` must be a list of tokens.TzKWhen `is_split_into_words=False`, `sentence` must be an untokenized string.pt)return_tensors
truncationreturn_special_tokens_maskreturn_offsets_mappingz@is_split_into_words=True is only supported with fast tokenizers.overflow_to_sample_mapping	input_idsc                    s    i | ]\}}||   d qS )r   )	unsqueeze)rQ   kvir    r!   
<dictcomp>%  s     z:TokenClassificationPipeline.preprocess.<locals>.<dictcomp>r   sentencer	   is_lastword_idsword_to_chars_map)poprF   rH   r   r   r   joinr   appendr   rG   rangeitemsre   )r   rc   r   rI   rD   rX   rf   r   r   wordsdelimiter_lenchar_offsetwordtext_to_tokenizer   
num_chunksmodel_inputsr    r`   r!   
preprocess   s^   



	z&TokenClassificationPipeline.preprocessc           
      C   s   | d}| dd }| d}| d}| dd }| dd }| jd
i |}t|tr2|d n|d }	|	||||||d	|S )Nspecial_tokens_maskr   rc   rd   re   rf   logitsr   )ru   rt   r   rc   rd   re   rf   r    )rg   modelr   dict)
r   rr   rt   r   rc   rd   re   rf   outputru   r    r    r!   _forward1  s$   


z$TokenClassificationPipeline._forwardc                    sL   d u rdg g }|d  d}|D ]}|d d jtjtjfv r.|d d tj }n|d d  }|d d }|d d }	|d d urN|d d nd }
|d d  }| d	}tj	|d
dd}t
|| }||jd
dd }| j||	||
||||d}| ||} fdd|D }|| qt|}|dkr| |}|S )NOr   rf   ru   rc   r\   r   rt   re   T)axiskeepdims)re   rf   c                    s0   g | ]}| d d vr| dd vr|qS )entityNentity_group)r   rQ   r~   rA   r    r!   
<listcomp>n  s    z;TokenClassificationPipeline.postprocess.<locals>.<listcomp>r	   )r   dtypetorchbfloat16float16tofloat32numpynpr,   expsumgather_pre_entities	aggregateextendr   aggregate_overlapping_entities)r   all_outputsr>   rA   all_entitiesrf   model_outputsru   rc   r\   r   rt   re   maxesshifted_expscorespre_entitiesgrouped_entitiesentitiesrq   r    r   r!   postprocessH  sH   



z'TokenClassificationPipeline.postprocessc                 C   s   t |dkr|S t|dd d}g }|d }|D ]>}|d |d   kr*|d k rOn n#|d |d  }|d |d  }||ksL||krN|d |d krN|}q|| |}q|| |S )Nr   c                 S   s   | d S )Nstartr    )xr    r    r!   <lambda>}  s    zLTokenClassificationPipeline.aggregate_overlapping_entities.<locals>.<lambda>keyr   endscore)r   sortedri   )r   r   aggregated_entitiesprevious_entityr~   current_lengthprevious_lengthr    r    r!   r   z  s$   $

z:TokenClassificationPipeline.aggregate_overlapping_entitiesrc   r\   r   rt   re   rf   c	                 C   sf  g }	t |D ]\}
}||
 rq| jt||
 }|dur||
 \}}|durA|durA||
 }|durA|| \}}||7 }||7 }t|tsN| }| }||| }t| jddrmt| jjjddrmt	|t	|k}n |t
jt
jt
jhv r}tdt |dkod||d |d  v}t||
 | jjkr|}d}nd}d}d}|||||
|d	}|	| q|	S )
zTFuse various numpy arrays into dicts with all the information needed for aggregationN
_tokenizercontinuing_subword_prefixz?Tokenizer does not support real words, using fallback heuristicr   r@   r	   F)ro   r   r   r   index
is_subword)	enumeraterF   convert_ids_to_tokensintr   itemgetattrr   rv   r   r'   r/   r0   r1   warningswarnUserWarningunk_token_idri   )r   rc   r\   r   r   rt   r>   re   rf   r   idxtoken_scoresro   	start_indend_ind
word_index
start_char_word_refr   
pre_entityr    r    r!   r     s^   
 z/TokenClassificationPipeline.gather_pre_entitiesr   c                 C   s   |t jt jhv r7g }|D ])}|d  }|d | }| jjj| ||d |d |d |d d}|| qn| ||}|t jkrD|S | 	|S )Nr   r   ro   r   r   )r~   r   r   ro   r   r   )
r'   r-   r.   argmaxrv   configid2labelri   aggregate_wordsgroup_entities)r   r   r>   r   r   
entity_idxr   r~   r    r    r!   r     s$   

z%TokenClassificationPipeline.aggregater   c                 C   s  | j dd |D }|tjkr&|d d }| }|| }| jjj| }nK|tjkrGt	|dd d}|d }| }|| }| jjj| }n*|tj
krmtdd |D }tj|dd	}	|	 }
| jjj|
 }|	|
 }ntd
||||d d |d d d}|S )Nc                 S      g | ]}|d  qS ro   r    r   r    r    r!   r         z>TokenClassificationPipeline.aggregate_word.<locals>.<listcomp>r   r   c                 S   s   | d   S )Nr   )r,   )r~   r    r    r!   r     s    z<TokenClassificationPipeline.aggregate_word.<locals>.<lambda>r   c                 S   r   )r   r    r   r    r    r!   r     r   )r|   zInvalid aggregation_strategyr   r{   r   )r~   r   ro   r   r   )rF   convert_tokens_to_stringr'   r/   r   rv   r   r   r1   r,   r0   r   stacknanmeanr   )r   r   r>   ro   r   r   r   r~   
max_entityaverage_scoresr   
new_entityr    r    r!   aggregate_word  s4   





z*TokenClassificationPipeline.aggregate_wordc                 C   s   |t jt jhv rtdg }d}|D ] }|du r|g}q|d r&|| q|| || |g}q|dur@|| || |S )z
        Override tokens from a given word that disagree to force agreement on word boundaries.

        Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft|
        company| B-ENT I-ENT
        z;NONE and SIMPLE strategies are invalid for word aggregationNr   )r'   r-   r.   r   ri   r   )r   r   r>   word_entities
word_groupr~   r    r    r!   r     s"   z+TokenClassificationPipeline.aggregate_wordsc                 C   sl   |d d  ddd }tdd |D }dd |D }|t|| j||d d	 |d d
 d}|S )z
        Group together the adjacent tokens with the same entity predicted.

        Args:
            entities (`dict`): The entities predicted by the pipeline.
        r   r~   -r	   r{   c                 S   r   )r   r    r   r    r    r!   r   1  r   zBTokenClassificationPipeline.group_sub_entities.<locals>.<listcomp>c                 S   r   r   r    r   r    r    r!   r   2  r   r   r   )r   r   ro   r   r   )splitr   r   meanrF   r   )r   r   r~   r   tokensr   r    r    r!   group_sub_entities(  s   


z.TokenClassificationPipeline.group_sub_entitiesentity_namec                 C   sT   | drd}|dd  }||fS | dr"d}|dd  }||fS d}|}||fS )NzB-Br   zI-I)
startswith)r   r   bitagr    r    r!   get_tag=  s   
	
z#TokenClassificationPipeline.get_tagc           	      C   s   g }g }|D ]7}|s| | q| |d \}}| |d d \}}||kr2|dkr2| | q| | | |g}q|rH| | | |S )z
        Find and group together the adjacent tokens with the same entity predicted.

        Args:
            entities (`dict`): The entities predicted by the pipeline.
        r~   r{   r   )ri   r   r   )	r   r   entity_groupsentity_group_disaggr~   r   r   last_bilast_tagr    r    r!   r   K  s   
z*TokenClassificationPipeline.group_entities)NNNFNNrN   )NN)%r#   r$   r%   r&   default_input_names_load_processor_load_image_processor_load_feature_extractor_load_tokenizerr   r7   r'   r   r   r   boolr   rK   r   r   rw   r"   rs   ry   r-   r   r   r   ndarrayr   r   r   r   r   r   r   __classcell__r    r    r<   r!   r3   =   sz    #$

;$,B
%82	

H"r3   )r   r   typingr   r   r   r   $models.bert.tokenization_bert_legacyr   utilsr   r   r   baser
   r   r   r   r   models.auto.modeling_autor   r   r'   r3   NerPipeliner    r    r    r!   <module>   s,    
"    