
    fThd                     2   S SK r S SKrS SKrS SKJr  SSKJr  SSKJrJ	r	J
r
Jr  SSKJrJr  \" 5       (       a  S SKrSSKJr  SS	KJr  \
" 5       (       a
  S SKrSS
KJr   " S S\ R0                  5      r " S S5      r\	" \" SS95       " S S\5      5       rg)    N)Dict   )GenerationConfig)ModelOutputadd_end_docstringsis_tf_availableis_torch_available   )Pipelinebuild_pipeline_init_args)!MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
KeyDataset)$TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMESc                        \ rS rSrSrSrSrSrg)
ReturnType   r   r
   r    N)__name__
__module____qualname____firstlineno__TENSORSNEW_TEXT	FULL_TEXT__static_attributes__r       ^/var/www/auris/envauris/lib/python3.13/site-packages/transformers/pipelines/text_generation.pyr   r      s    GHIr   r   c                   &    \ rS rSrSrS\4S jrSrg)Chat   a  This class is intended to just be used internally in this pipeline and not exposed to users. We convert chats
to this format because the rest of the pipeline code tends to assume that lists of messages are
actually a batch of samples rather than messages in the same conversation.messagesc                 P    U H  nSU;   a  SU;   a  M  [        S5      e   Xl        g )NrolecontentzQWhen passing chat dicts as input, each dict must have a 'role' and 'content' key.)
ValueErrorr!   )selfr!   messages      r   __init__Chat.__init__"   s.    Gg%)w*> !tuu   !r   )r!   N)r   r   r   r   __doc__r   r(   r   r   r   r   r   r      s    R! !r   r   T)has_tokenizerc                      ^  \ rS rSrSrSrSr\" SSSS9rU 4S jr	           SS
 jr
U 4S jrU 4S jr       SS jrS r\R                   SS	4S jrSrU =r$ )TextGenerationPipeline)   a+
  
Language generation pipeline using any `ModelWithLMHead` or `ModelForCausalLM`. This pipeline predicts the words
that will follow a specified text prompt. When the underlying model is a conversational model, it can also accept
one or more chats, in which case the pipeline will operate in chat mode and will continue the chat(s) by adding
its response(s). Each chat takes the form of a list of dicts, where each dict contains "role" and "content" keys.

Unless the model you're using explicitly sets these generation parameters in its configuration files
(`generation_config.json`), the following default values will be used:
- max_new_tokens: 256
- do_sample: True
- temperature: 0.7

Examples:

```python
>>> from transformers import pipeline

>>> generator = pipeline(model="openai-community/gpt2")
>>> generator("I can't believe you did such a ", do_sample=False)
[{'generated_text': "I can't believe you did such a icky thing to me. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I"}]

>>> # These parameters will return suggestions, and only the newly created text making it easier for prompting suggestions.
>>> outputs = generator("My tart needs some", num_return_sequences=4, return_full_text=False)
```

```python
>>> from transformers import pipeline

>>> generator = pipeline(model="HuggingFaceH4/zephyr-7b-beta")
>>> # Zephyr-beta is a conversational model, so let's pass it a chat instead of a single string
>>> generator([{"role": "user", "content": "What is the capital of France? Answer in one word."}], do_sample=False, max_new_tokens=2)
[{'generated_text': [{'role': 'user', 'content': 'What is the capital of France? Answer in one word.'}, {'role': 'assistant', 'content': 'Paris'}]}]
```

Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text
generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about
text generation parameters in [Text generation strategies](../generation_strategies) and [Text
generation](text_generation).

This language generation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"text-generation"`.

The models that this pipeline can use are models that have been trained with an autoregressive language modeling
objective. See the list of available [text completion models](https://huggingface.co/models?filter=text-generation)
and the list of [conversational models](https://huggingface.co/models?other=conversational)
on [huggingface.co/models].
a  
    In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
    voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
    Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
    and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
    accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
    the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
    begging for his blessing. <eod> </s> <eos>
    T   gffffff?)max_new_tokens	do_sampletemperaturec                   > [         TU ]  " U0 UD6  U R                  U R                  S:X  a  [        O[
        5        SU R                  ;  a  S nU R                  b  U R                  nUc0  U R                  R                  R                  S;   a  U R                  nUbL  U R                  " SSU0U R                  D6u  pEn0 U R                  EUEU l        0 U R                  EUEU l        g g g )Ntfprefix)XLNetLMHeadModelTransfoXLLMHeadModelTFXLNetLMHeadModelTFTransfoXLLMHeadModelr   )superr(   check_model_type	frameworkr   r   _preprocess_paramsr5   model	__class__r   	XL_PREFIX_sanitize_parameters_forward_params)r&   argskwargsr5   preprocess_paramsforward_params_r?   s          r   r(   TextGenerationPipeline.__init__q   s    $)&)48NNd4J0Pq	
 4222
 F{{&~$**"6"6"?"? D # !7;7P7P7wX^7wbfbvbv7w4!1*ZT-D-D*ZHY*Z''Q$*>*>'Q.'Q$	 "! 3r   Nc                 h   0 nSnSU;   a  UR                  S5      =oS'   SU;   a  UR                  S5      US'   U	b  XS'   U
b  XS'   XS'   Ub  XmS'   U(       a0  U R                  USXR                  S9nUS   R                  S	   US
'   Ub  US;  a  [	        U S35      eX}S'   Ub  XS'   UR                  U5        Un0 nUbF  UcC  Ub  [	        S5      eUb  [	        S5      eU(       a  [        R                  O[        R                  nUb!  Uc  Ub  [	        S5      e[        R                  nUb  UUS'   Ub  UUS'   Ub  UUS'   Ub  U R                  R                  USS9nUUS'   U R                  b  U R                  US'   U R                  b  U R                  US'   U R                  US'   UUU4$ )NFadd_special_tokenspadding
truncation
max_lengthr5   )rK   rJ   return_tensors	input_idsprefix_length>   holezT is not a valid value for `handle_long_generation` parameter expected [None, 'hole']handle_long_generationcontinue_final_messagez;`return_text` is mutually exclusive with `return_full_text`z>`return_full_text` is mutually exclusive with `return_tensors`z9`return_text` is mutually exclusive with `return_tensors`return_typeclean_up_tokenization_spaces)rJ   eos_token_idassistant_model	tokenizerassistant_tokenizer)poprY   r<   shaper%   updater   r   r   r   encoderX   rZ   )r&   return_full_textrN   return_textrU   rV   r5   rS   stop_sequencerL   rM   rT   generate_kwargsrE   rJ   prefix_inputsrF   postprocess_paramsstop_sequence_idss                      r   rA   +TextGenerationPipeline._sanitize_parameters   sE    "?2KZK^K^_sKtt3G!H'+:+>+>y+Ii(!.8l+!.8l+,6L)*0h' NN:L]k]k + M 0=[/I/O/OPR/SOO,!-%X5 -. /& &  ;Q67!-:P67  1('K,?& !^__) !abb2B*..
H[H[K%+*=& !\]]$,,K"0;}-'3A]=>!-;Q78$ $ 5 5mX] 5 ^.?ON++040D0DN,-##/*...N;'484L4LN01 .2DDDr   c                    > U R                   R                  R                  S;   a  UR                  SS05        [        TU ]  " U0 UD6$ )z
Parse arguments and tokenize
)r7   add_space_before_punct_symbolT)r>   r?   r   r]   r:   _parse_and_tokenize)r&   rC   rD   r?   s      r   ri   *TextGenerationPipeline._parse_and_tokenize   sE    
 ::((,DDMM:DABw*D;F;;r   c                   > [        U[        5       (       a   [        [        [        R
                  [        4O[        [        [        R
                  45      (       a  [        U[        R
                  5      (       a,  [        R                  " U5      u  pS U 5       [        U5      pAOUS   n[        U[        [        [        45      (       a  [        U[        5      (       a  [        TU ]0  " [        U5      40 UD6$ S U 5       n[        U[        R
                  5      (       a  [        TU ]0  " U40 UD6$ [        TU ]0  " [        U5      40 UD6$ [        TU ]0  " U40 UD6$ )a  
Complete the prompt(s) given as inputs.

Args:
    text_inputs (`str`, `List[str]`, List[Dict[str, str]], or `List[List[Dict[str, str]]]`):
        One or several prompts (or one list of prompts) to complete. If strings or a list of string are
        passed, this pipeline will continue each prompt. Alternatively, a "chat", in the form of a list
        of dicts with "role" and "content" keys, can be passed, or a list of such chats. When chats are passed,
        the model's chat template will be used to format them before passing them to the model.
    return_tensors (`bool`, *optional*, defaults to `False`):
        Returns the tensors of predictions (as token indices) in the outputs. If set to
        `True`, the decoded text is not returned.
    return_text (`bool`, *optional*):
        Returns the decoded texts in the outputs.
    return_full_text (`bool`, *optional*, defaults to `True`):
        If set to `False` only added text is returned, otherwise the full text is returned. Cannot be
        specified at the same time as `return_text`.
    clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
        Whether or not to clean up the potential extra spaces in the text output.
    continue_final_message( `bool`, *optional*): This indicates that you want the model to continue the
        last message in the input chat rather than starting a new one, allowing you to "prefill" its response.
        By default this is `True` when the final message in the input chat has the `assistant` role and
        `False` otherwise, but you can manually override that behaviour by setting this flag.
    prefix (`str`, *optional*):
        Prefix added to prompt.
    handle_long_generation (`str`, *optional*):
        By default, this pipelines does not handle long generation (ones that exceed in one form or the other
        the model maximum length). There is no perfect way to address this (more info
        :https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227). This provides common
        strategies to work around that problem depending on your use case.

        - `None` : default strategy where nothing in particular happens
        - `"hole"`: Truncates left of input, and leaves a gap wide enough to let generation happen (might
          truncate a lot of the prompt and not suitable when generation exceed the model capacity)
    generate_kwargs (`dict`, *optional*):
        Additional keyword arguments to pass along to the generate method of the model (see the generate method
        corresponding to your framework [here](./text_generation)).

Return:
    A list or a list of lists of `dict`: Returns one of the following dictionaries (cannot return a combination
    of both `generated_text` and `generated_token_ids`):

    - **generated_text** (`str`, present when `return_text=True`) -- The generated text.
    - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
      ids of the generated text.
c              3   $   #    U  H  ov   M     g 7fNr   ).0xs     r   	<genexpr>2TextGenerationPipeline.__call__.<locals>.<genexpr>!  s     *Bk1ks   r   c              3   8   #    U  H  n[        U5      v   M     g 7frm   )r   )rn   chats     r   rp   rq   )  s     @KDT$ZZKs   )
isinstancer	   listtupletypesGeneratorTyper   	itertoolsteenextdictr:   __call__r   )r&   text_inputsrD   rG   
first_itemchatsr?   s         r   r}   TextGenerationPipeline.__call__   s   ^ !## 5%--z:u223	
 
 +u':':;;!*{!;*Bk*BDGZ(^
*tUD&9::j$// 7+D,=HHH@K@E!+u/B/BCC$w/@@@$w/UFvFFw6v66r   c	                 N   UUUUS.n
U
R                  5        VVs0 s H  u  pUc  M
  X_M     n
nn[        U[        5      (       af  U
R                  SS 5        Uc  UR                  S   S   S:H  nU R
                  R                  " UR                  4U(       + USU R                  S.U
D6nO!U R
                  " X!-   4SU R                  0U
D6nXS	'   US
:X  a  US   R                  S   nSU	;   a  U	S   nO:U	R                  SU R                  R                  5      U-
  nUS:  a  [        S5      eX-   U R
                  R                  :  aT  U R
                  R                  U-
  nUS::  a  [        S5      eUS   S S 2U* S 24   US'   SU;   a  US   S S 2U* S 24   US'   U$ s  snnf )N)rJ   rL   rK   rM   rJ   rP   r#   	assistantT)add_generation_promptrT   return_dictrN   rN   prompt_textrR   rO   r0   rM   r   z0We cannot infer how many new tokens are expectedziWe cannot use `hole` to handle this generation the number of desired tokens exceeds the models max lengthattention_mask)itemsrt   r   r[   r!   rY   apply_chat_templater<   r\   getgeneration_configrM   r%   model_max_length)r&   r   r5   rS   rJ   rL   rK   rM   rT   rb   tokenizer_kwargskeyvalueinputscur_len
new_tokenskeep_lengths                    r   
preprocess!TextGenerationPipeline.preprocess0  s    #5$$	
 :J9O9O9Qg9Q:3UZJCJ9Qgk4((  !5t< &-)4)=)=b)A&)I[)X&^^77$$*@&@'= #~~ #F ^^F$8ll[klF +}!V+[)//3G?2,-=>
,00t?U?U?`?`adkk
>$%WXX#dnn&E&EE"nn==
J!#$- 
 '-[&9!k\]:J&K{##v-/56F/GK<=HX/YF+,S hs
   	F!F!c                    US   nUR                  SS 5      nUR                  S   S:X  a  S nS nSnOUR                  S   nUR                  S5      nUR                  SS5      nUS:  a  SU;   =(       d    SU;   =(       a    US   R                  S LnU(       d>  UR                  S	5      =(       d    U R                  R
                  US	'   US	==   U-  ss'   S
U;   =(       d    SU;   =(       a    US   R                  S Ln	U	(       d  SU;   a  US==   U-  ss'   SU;  a  U R                  US'   U R                  R                  " SX4S.UD6n
[        U
[        5      (       Ga  U
R                  nU
R                  5        VVs0 s H  u  pUS;  d  M  X_M     nnnUR                  S   nU R                  S:X  a  UR                  5        H  u  nn[        U[        R                  5      (       a9  UR                  S   U:X  a&  UR                   " X_U-  /UR                  SS  Q76 UU'   [        U["        5      (       d  Mu  [%        US   5      U:X  d  M  [        R&                  " U5      R)                  SS5      nUUU'   M     OU R                  S:X  a  UR                  5        H  u  nn[        U[*        R                  5      (       aA  UR                  S   U:X  a.  [*        R                   " UX_U-  /UR                  SS  Q75      UU'   [        U["        5      (       d  M}  [%        US   5      U:X  d  M  [*        R&                  " U5      R)                  SS5      nUUU'   M     OU
n0 nUR                  S   nU R                  S:X  a$  UR                   " X_U-  /UR                  SS  Q76 nO:U R                  S:X  a*  [*        R                   " XX-  /UR                  SS  Q75      nUUUS.nU(       a  UR-                  SU05        U$ s  snnf )NrO   r   r
   r   r   rQ   r0   r   rM   min_new_tokens
min_length)rO   r   >   	sequencespast_key_valuesptr4   )generated_sequencerO   r   additional_outputsr   )r   r\   r[   r0   r   rM   r   r>   generatert   r   r   r   r<   torchTensorreshaperv   lenstackswapaxesr4   r]   )r&   model_inputsrb   rO   r   in_br   rQ   has_max_new_tokenshas_min_new_tokensoutputr   kvother_outputsout_br   r   model_outputss                      r   _forwardTextGenerationPipeline._forwardn  s    -	%))*:DA??1"I!ND??1%D"&&}5 (++OQ?1!1_!D "#6 T#$78GGtS  &0?0C0CL0Q0vUYUkUkUvUv-->-!1_!D "#6 T#$78GGtS  &,/*I->- o5373I3IO/0$$kyk[jkfk**!'!1!1.4llnjndaIi@iTQTnMj&,,Q/E~~%"/"5"5"7JC!%665;;q>U;R-2]]4$-aQVQ\Q\]^]_Q`-ac*!%//CaMU4J %E 2 ; ;Aq A-2c* #8 4'"/"5"5"7JC!%33A%8O-/ZZtm?fV[VaVabcbdVe?f-gc*!%//CaMU4J " 8 8A >-2c* #8 "(M"((+>>T!!3!;!;D4-!oRdRjRjklkmRn!o^^t#!#,>u}@tWiWoWopqprWs@t!u #5"&

   "6!FGE ks   PPc           	         US   S   nUS   nUS   nUR                  5       R                  5       n/ nUR                  S0 5      n	0 n
U	(       Ga  U R                  S:X  ay  U	R	                  5        Hd  u  p[        U[        R                  5      (       d  M&  UR                  S   [        U5      :X  d  MD  UR                  5       R                  5       X'   Mf     OU R                  S:X  ax  U	R	                  5        Hd  u  p[        U[        R                  5      (       d  M&  UR                  S   [        U5      :X  d  MD  UR                  5       R                  5       X'   Mf     [        U5       GH  u  pU[        R                  :X  a  SU0nGOZU[        R                  [        R                  1;   Ga5  U R                   R#                  US	US
9nUc  SnO'[        U R                   R#                  US   S	US
95      nUUS  nU[        R                  :X  a  [        U[$        5      (       a  UU-   nO[        U[&        5      (       a~  Uc  UR(                  S   S   S:H  nU(       aC  [+        UR(                  5      S S UR(                  S   S   UR(                  S   S   U-   S./-   nO[+        UR(                  5      SUS./-   nSU0nU
R	                  5        H  u  nnUU   UU'   M     UR-                  W5        GM     U$ )Nr   r   rO   r   r   r   r4   generated_token_idsT)skip_special_tokensrV   rP   r#   r   r$   )r#   r$   generated_text)numpytolistr   r<   r   rt   r   r   r\   r   r4   	enumerater   r   r   r   rY   decodestrr   r!   ru   append)r&   r   rU   rV   rT   r   rO   r   recordsr   splitted_keysr   r   idxsequencerecordtextprompt_lengthall_textr   valuess                        r   postprocess"TextGenerationPipeline.postprocess  s    ++?@C!+.	#M2/557>>@%))*>C~~%)//1DA!!U\\22qwwqzSI[E\7\+,779+;+;+=( 2 4')//1DA!!RYY//AGGAJ#FXBY4Y+,779+;+;+=( 2 ''9:MCj000/:!4!4j6J6J KK~~,,(,1M -  $$%M$'--%aL049U . %M  /*"6"66!+s33#.#9#K6619 6A5I5I"5Mf5UYd5d21'+K,@,@'A#2'F,7,@,@,DV,L/:/C/CB/G	/RU]/]!"J (H (,K,@,@'AkfnEoDp'pH*H5#0#6#6#8KC"(+F3K $9NN6"] ;` r   )rB   r=   )NNNNNNNNNNN) NNNNNN)r   r   r   r   r*   r@   _pipeline_calls_generater   _default_generation_configr(   rA   ri   r}   r   r   r   r   r   r   __classcell__)r?   s   @r   r-   r-   )   s    .hI  $!1"R: %)##QEh<D7R ##<|HZ ((%)#H Hr   r-   )enumry   rw   typingr   
generationr   utilsr   r   r   r	   baser   r   r   models.auto.modeling_autor   pt_utilsr   
tensorflowr4   models.auto.modeling_tf_autor   Enumr   r   r-   r   r   r   <module>r      s        ) X X 4 M$S 	! 	! ,4@AVX V BVr   