
    fTh              !          S r SSKrSSKrSSKrSSKrSSKrSSKJr  S rS r	S r
S rSrS	rS
 rS rS]S jrSrSrSrSrSrSrSrSrSrSrSrSrSrSrSr\\\\\\\\\\\\\\S.r Sr!Sr"\r#\r$Sr%S r&S!r'S!r(S!r)S!r*S!r+S!r,\r-S!r.S!r/S!r0S!r1\r2S!r3S!r4S!r5\r6S!r7\r8S!r9\r:\r;\r<S!r=S"r>\" S#\!4S$\"4S%\<4S&\#4S'\;4S(\$4S)\>4S*\%4S+\04S,\&4S-\'4S.\=4S/\24S0\(4S1\54S2\34S3\)4S4\*4S5\14S6\94S7\+4S8\64S9\74S:\,4S;\-4S<\84S=\:4S>\44S?\/4S@\.4/5      r?\" / SAQ5      r@SBrASCrBSDrCSErDSFrESGrFSHrGSIrHSJrISKrJSLrK\C\B\A\F\D\G\E\H\I\J\KSM.rLSNrMSOrNSNrOSPrPSQrQSRrRSSrS\O\N\M\R\P\Q\SST.rTSU rUSSSSSVSWSXSSSSSSSY.SZ jrVS^S[ jrWS\ rXg)_z3
Doc utilities: Utilities related to documentation
    N)OrderedDictc                     [         R                  " U 5      (       a  g[         R                  " U 5      nUR                  5       S   n[	        U5      [	        UR                  5       5      -
  nSU-   $ )z^Return the indentation level of the start of the docstring of a class or function (or method).   r   )inspectisclass	getsource
splitlineslenlstrip)funcsource
first_linefunction_def_levels       N/var/www/auris/envauris/lib/python3.13/site-packages/transformers/utils/doc.pyget_docstring_indentation_levelr      sb     tt$F""$Q'JZ3z/@/@/B+CC!!!    c                     ^  U 4S jnU$ )Nc                 l   > SR                  T5      U R                  b  U R                  OS-   U l        U $ N )join__doc__fndocstrs    r   docstring_decorator1add_start_docstrings.<locals>.docstring_decorator&   s,    WWV_bjj6L

RTU
	r    r   r   s   ` r   add_start_docstringsr    %        r   c                     ^  U 4S jnU$ )Nc                   > SU R                   R                  S5      S    S3nSU S3n[        U 5      nU R                  b  U R                  OSn [	        S UR                  5        5       5      n[        U5      [        UR                  5       5      -
  nT
nUS	U-   :X  al  T
 Vs/ s H1  n[        R                  " [        R                  " U5      S
U-  5      PM3     nn[        R                  " [        R                  " U5      S
U-  5      nSR                  U5      U-   n	X)-   U l        U $ ! [         a    Un Nf = fs  snf )Nz[`.r   z`]z    The aa   forward method, overrides the `__call__` special method.

    <Tip>

    Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`]
    instance afterwards instead of this since the former takes care of running the pre and post processing steps while
    the latter silently ignores them.

    </Tip>
r   c              3   P   #    U  H  oR                  5       S :w  d  M  Uv   M     g7f)r   N)strip).0lines     r   	<genexpr>Uadd_start_docstrings_to_model_forward.<locals>.docstring_decorator.<locals>.<genexpr>>   s#     "c4LDPZPZP\`bPb444Ls   &	&r    )__qualname__splitr   r   nextr	   r
   r   StopIterationtextwrapindentdedentr   )r   
class_nameintrocorrect_indentationcurrent_docfirst_non_emptydoc_indentationdocsdoc	docstringr   s             r   r   Badd_start_docstrings_to_model_forward.<locals>.docstring_decorator.   s8   "////4Q78;
j\ 	* 	 >bA$&JJ$:bjj	2""cK4J4J4L"ccO!/2S9O9O9Q5RRO  a"555`fg`fY\HOOHOOC$8#@S:ST`fDgOOHOOE$:CBU<UVEGGDMK/	&
	  	21O	2 hs   AD, %8D>,D;:D;r   r   s   ` r   %add_start_docstrings_to_model_forwardr=   -   s    @ r   c                     ^  U 4S jnU$ )Nc                 l   > U R                   b  U R                   OSSR                  T5      -   U l         U $ r   )r   r   r   s    r   r   /add_end_docstrings.<locals>.docstring_decoratorR   s+    $&JJ$:bjjbggfoU
	r   r   r   s   ` r   add_end_docstringsrA   Q   r!   r   a:  
    Returns:
        [`{full_output_type}`] or `tuple(torch.FloatTensor)`: A [`{full_output_type}`] or a tuple of
        `torch.FloatTensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various
        elements depending on the configuration ([`{config_class}`]) and inputs.

a*  
    Returns:
        [`{full_output_type}`] or `tuple(tf.Tensor)`: A [`{full_output_type}`] or a tuple of `tf.Tensor` (if
        `return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the
        configuration ([`{config_class}`]) and inputs.

c                 `    [         R                  " SU 5      nUc  S$ UR                  5       S   $ )z.Returns the indentation in the first line of tz^(\s*)\Sr   r   )researchgroups)trD   s     r   _get_indentrG   k   s,    YY{A&F27V]]_Q%77r   c                    [        U 5      n/ nSnU R                  S5       HF  n[        U5      U:X  a*  [        U5      S:  a  UR                  USS 5        U S3nM<  X4SS  S3-  nMH     UR                  USS 5        [	        [        U5      5       H;  n[
        R                  " SSX%   5      X%'   [
        R                  " S	S
X%   5      X%'   M=     SR                  U5      $ )z,Convert output_args_doc to display properly.r   
r   N   z^(\s+)(\S+)(\s+)z\1- **\2**\3z:\s*\n\s*(\S)z -- \1)rG   r-   r
   appendrangerC   subr   )output_args_docr1   blockscurrent_blockr(   is         r   _convert_output_args_docrS   q   s     )FFM%%d+t&=!A%mCR01#fBKM QRz_,M , MM-$% 3v;FF.K	FF+Y	B	   99Vr   c                    U R                   nSnUb  UR                  S5      nSnU[        U5      :  aJ  [        R                  " SXg   5      c0  US-  nU[        U5      :  a  [        R                  " SXg   5      c  M0  U[        U5      :  a"  SR                  XgS-   S 5      n[        U5      nO U(       a  [        SU R                   S35      eU(       aW  U R                   SU R                   3nU R                  R                  S	5      (       a  [        O[        n	U	R                  XS
9n	O[        U 5      nSU S3n	Ub  U	S-  n	U	n
Ub  X-  n
Ub  U
R                  S5      nSn[        Xg   5      S:X  a  US-  n[        Xg   5      S:X  a  M  [        [        Xg   5      5      nX:  a?  SX+-
  -  nU Vs/ s H  n[        U5      S:  a  U U 3OUPM     nnSR                  U5      n
U
$ s  snf )z@
Prepares the return part of the docstring using `output_type`.
NrI   r   z^\s*(Args|Parameters):\s*$   z@No `Args` or `Parameters` section is found in the docstring of `zH`. Make sure it has docstring and contain either `Args` or `Parameters`.r$   TF)full_output_typeconfig_classz
Returns:
    ``z:
r+   )r   r-   r
   rC   rD   r   rS   
ValueError__name__
__module__
startswithTF_RETURN_INTRODUCTIONPT_RETURN_INTRODUCTIONformatstrrG   )output_typerX   
min_indent	add_introoutput_docstringparams_docstringlinesrR   rW   r4   resultr1   to_addr(   s                 r   _prepare_output_docstringsrj      s    #**# &&t,#e*n+H%(!S![FA #e*n+H%(!S![s5z>#yyAy)9:78HIRS^SgSgRh iG G  )445Q{7K7K6LM*5*>*>*I*I$*O*O&Uk.>Z{+#$4#5Q7'UNEF#" T"%(mq FA %(mq [*+J/0FPUVPU3t9q='dBPUEVYYu%FM Ws   4!G*aJ  
    <Tip warning={true}>

    This example uses a random model as the real ones are all very big. To get proper results, you should use
    {real_checkpoint} instead of {fake_checkpoint}. If you get out-of-memory when loading that checkpoint, you can try
    adding `device_map="auto"` in the `from_pretrained` call.

    </Tip>
a  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}
    >>> import torch

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = tokenizer(
    ...     "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt"
    ... )

    >>> with torch.no_grad():
    ...     logits = model(**inputs).logits

    >>> predicted_token_class_ids = logits.argmax(-1)

    >>> # Note that tokens are classified rather then input words which means that
    >>> # there might be more predicted token classes than words.
    >>> # Multiple token classes might account for the same word
    >>> predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]]
    >>> predicted_tokens_classes
    {expected_output}

    >>> labels = predicted_token_class_ids
    >>> loss = model(**inputs, labels=labels).loss
    >>> round(loss.item(), 2)
    {expected_loss}
    ```
a_  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}
    >>> import torch

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"

    >>> inputs = tokenizer(question, text, return_tensors="pt")
    >>> with torch.no_grad():
    ...     outputs = model(**inputs)

    >>> answer_start_index = outputs.start_logits.argmax()
    >>> answer_end_index = outputs.end_logits.argmax()

    >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
    >>> tokenizer.decode(predict_answer_tokens, skip_special_tokens=True)
    {expected_output}

    >>> # target is "nice puppet"
    >>> target_start_index = torch.tensor([{qa_target_start_index}])
    >>> target_end_index = torch.tensor([{qa_target_end_index}])

    >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index)
    >>> loss = outputs.loss
    >>> round(loss.item(), 2)
    {expected_loss}
    ```
a  
    Example of single-label classification:

    ```python
    >>> import torch
    >>> from transformers import AutoTokenizer, {model_class}

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")

    >>> with torch.no_grad():
    ...     logits = model(**inputs).logits

    >>> predicted_class_id = logits.argmax().item()
    >>> model.config.id2label[predicted_class_id]
    {expected_output}

    >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
    >>> num_labels = len(model.config.id2label)
    >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels)

    >>> labels = torch.tensor([1])
    >>> loss = model(**inputs, labels=labels).loss
    >>> round(loss.item(), 2)
    {expected_loss}
    ```

    Example of multi-label classification:

    ```python
    >>> import torch
    >>> from transformers import AutoTokenizer, {model_class}

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}", problem_type="multi_label_classification")

    >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")

    >>> with torch.no_grad():
    ...     logits = model(**inputs).logits

    >>> predicted_class_ids = torch.arange(0, logits.shape[-1])[torch.sigmoid(logits).squeeze(dim=0) > 0.5]

    >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
    >>> num_labels = len(model.config.id2label)
    >>> model = {model_class}.from_pretrained(
    ...     "{checkpoint}", num_labels=num_labels, problem_type="multi_label_classification"
    ... )

    >>> labels = torch.sum(
    ...     torch.nn.functional.one_hot(predicted_class_ids[None, :].clone(), num_classes=num_labels), dim=1
    ... ).to(torch.float)
    >>> loss = model(**inputs, labels=labels).loss
    ```
a   
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}
    >>> import torch

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt")

    >>> with torch.no_grad():
    ...     logits = model(**inputs).logits

    >>> # retrieve index of {mask}
    >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0]

    >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1)
    >>> tokenizer.decode(predicted_token_id)
    {expected_output}

    >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"]
    >>> # mask labels of non-{mask} tokens
    >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100)

    >>> outputs = model(**inputs, labels=labels)
    >>> round(outputs.loss.item(), 2)
    {expected_loss}
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}
    >>> import torch

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
    >>> outputs = model(**inputs)

    >>> last_hidden_states = outputs.last_hidden_state
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}
    >>> import torch

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
    >>> choice0 = "It is eaten with a fork and a knife."
    >>> choice1 = "It is eaten while held in the hand."
    >>> labels = torch.tensor(0).unsqueeze(0)  # choice0 is correct (according to Wikipedia ;)), batch size 1

    >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True)
    >>> outputs = model(**{{k: v.unsqueeze(0) for k, v in encoding.items()}}, labels=labels)  # batch size is 1

    >>> # the linear classifier still needs to be trained
    >>> loss = outputs.loss
    >>> logits = outputs.logits
    ```
a  
    Example:

    ```python
    >>> import torch
    >>> from transformers import AutoTokenizer, {model_class}

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
    >>> outputs = model(**inputs, labels=inputs["input_ids"])
    >>> loss = outputs.loss
    >>> logits = outputs.logits
    ```
aY  
    Example:

    ```python
    >>> from transformers import AutoProcessor, {model_class}
    >>> import torch
    >>> from datasets import load_dataset

    >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation", trust_remote_code=True)
    >>> dataset = dataset.sort("id")
    >>> sampling_rate = dataset.features["audio"].sampling_rate

    >>> processor = AutoProcessor.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> # audio file is decoded on the fly
    >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
    >>> with torch.no_grad():
    ...     outputs = model(**inputs)

    >>> last_hidden_states = outputs.last_hidden_state
    >>> list(last_hidden_states.shape)
    {expected_output}
    ```
au  
    Example:

    ```python
    >>> from transformers import AutoProcessor, {model_class}
    >>> from datasets import load_dataset
    >>> import torch

    >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation", trust_remote_code=True)
    >>> dataset = dataset.sort("id")
    >>> sampling_rate = dataset.features["audio"].sampling_rate

    >>> processor = AutoProcessor.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> # audio file is decoded on the fly
    >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
    >>> with torch.no_grad():
    ...     logits = model(**inputs).logits
    >>> predicted_ids = torch.argmax(logits, dim=-1)

    >>> # transcribe speech
    >>> transcription = processor.batch_decode(predicted_ids)
    >>> transcription[0]
    {expected_output}

    >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="pt").input_ids

    >>> # compute loss
    >>> loss = model(**inputs).loss
    >>> round(loss.item(), 2)
    {expected_loss}
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoFeatureExtractor, {model_class}
    >>> from datasets import load_dataset
    >>> import torch

    >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation", trust_remote_code=True)
    >>> dataset = dataset.sort("id")
    >>> sampling_rate = dataset.features["audio"].sampling_rate

    >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> # audio file is decoded on the fly
    >>> inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")

    >>> with torch.no_grad():
    ...     logits = model(**inputs).logits

    >>> predicted_class_ids = torch.argmax(logits, dim=-1).item()
    >>> predicted_label = model.config.id2label[predicted_class_ids]
    >>> predicted_label
    {expected_output}

    >>> # compute loss - target_label is e.g. "down"
    >>> target_label = model.config.id2label[0]
    >>> inputs["labels"] = torch.tensor([model.config.label2id[target_label]])
    >>> loss = model(**inputs).loss
    >>> round(loss.item(), 2)
    {expected_loss}
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoFeatureExtractor, {model_class}
    >>> from datasets import load_dataset
    >>> import torch

    >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation", trust_remote_code=True)
    >>> dataset = dataset.sort("id")
    >>> sampling_rate = dataset.features["audio"].sampling_rate

    >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> # audio file is decoded on the fly
    >>> inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=sampling_rate)
    >>> with torch.no_grad():
    ...     logits = model(**inputs).logits

    >>> probabilities = torch.sigmoid(logits[0])
    >>> # labels is a one-hot array of shape (num_frames, num_speakers)
    >>> labels = (probabilities > 0.5).long()
    >>> labels[0].tolist()
    {expected_output}
    ```
a.  
    Example:

    ```python
    >>> from transformers import AutoFeatureExtractor, {model_class}
    >>> from datasets import load_dataset
    >>> import torch

    >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation", trust_remote_code=True)
    >>> dataset = dataset.sort("id")
    >>> sampling_rate = dataset.features["audio"].sampling_rate

    >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> # audio file is decoded on the fly
    >>> inputs = feature_extractor(
    ...     [d["array"] for d in dataset[:2]["audio"]], sampling_rate=sampling_rate, return_tensors="pt", padding=True
    ... )
    >>> with torch.no_grad():
    ...     embeddings = model(**inputs).embeddings

    >>> embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu()

    >>> # the resulting embeddings can be used for cosine similarity-based retrieval
    >>> cosine_sim = torch.nn.CosineSimilarity(dim=-1)
    >>> similarity = cosine_sim(embeddings[0], embeddings[1])
    >>> threshold = 0.7  # the optimal threshold is dataset-dependent
    >>> if similarity < threshold:
    ...     print("Speakers are not the same!")
    >>> round(similarity.item(), 2)
    {expected_output}
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoImageProcessor, {model_class}
    >>> import torch
    >>> from datasets import load_dataset

    >>> dataset = load_dataset("huggingface/cats-image", trust_remote_code=True)
    >>> image = dataset["test"]["image"][0]

    >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = image_processor(image, return_tensors="pt")

    >>> with torch.no_grad():
    ...     outputs = model(**inputs)

    >>> last_hidden_states = outputs.last_hidden_state
    >>> list(last_hidden_states.shape)
    {expected_output}
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoImageProcessor, {model_class}
    >>> import torch
    >>> from datasets import load_dataset

    >>> dataset = load_dataset("huggingface/cats-image", trust_remote_code=True)
    >>> image = dataset["test"]["image"][0]

    >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = image_processor(image, return_tensors="pt")

    >>> with torch.no_grad():
    ...     logits = model(**inputs).logits

    >>> # model predicts one of the 1000 ImageNet classes
    >>> predicted_label = logits.argmax(-1).item()
    >>> print(model.config.id2label[predicted_label])
    {expected_output}
    ```
)SequenceClassificationQuestionAnsweringTokenClassificationMultipleChoiceMaskedLMLMHead	BaseModelSpeechBaseModelCTCAudioClassificationAudioFrameClassificationAudioXVectorVisionBaseModelImageClassificationa  
    Example:

    ```python
    >>> from transformers import AutoProcessor, {model_class}, SpeechT5HifiGan

    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> processor = AutoProcessor.from_pretrained("{checkpoint}")
    >>> vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
    >>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt")

    >>> # generate speech
    >>> speech = model.generate(inputs["input_ids"], speaker_embeddings=speaker_embeddings, vocoder=vocoder)
    ```
az  
    Example:

    ```python
    >>> from transformers import AutoProcessor, {model_class}

    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> processor = AutoProcessor.from_pretrained("{checkpoint}")
    >>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt")

    >>> # generate speech
    >>> speech = model(inputs["input_ids"])
    ```
a  
    Example:

    ```python
    >>> from PIL import Image
    >>> import requests
    >>> from transformers import AutoProcessor, {model_class}

    >>> processor = AutoProcessor.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
    >>> image = Image.open(requests.get(url, stream=True).raw)

    >>> inputs = processor(images=image, return_tensors="pt")

    >>> outputs = model(**inputs)
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoImageProcessor, {model_class}
    >>> import torch
    >>> from PIL import Image
    >>> import requests

    >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
    >>> image = Image.open(requests.get(url, stream=True).raw)

    >>> processor = AutoImageProcessor.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    >>> model.to(device)

    >>> # prepare image for the model
    >>> inputs = processor(images=image, return_tensors="pt").to(device)

    >>> with torch.no_grad():
    ...     outputs = model(**inputs)

    >>> # interpolate to original size
    >>> post_processed_output = processor.post_process_depth_estimation(
    ...     outputs, [(image.height, image.width)],
    ... )
    >>> predicted_depth = post_processed_output[0]["predicted_depth"]
    ```
z%
    Example:

    ```python
    ```
a  
    Example:

    ```python
    >>> from PIL import Image
    >>> import requests
    >>> from transformers import AutoProcessor, {model_class}

    >>> model = {model_class}.from_pretrained("{checkpoint}")
    >>> processor = AutoProcessor.from_pretrained("{checkpoint}")

    >>> messages = [
    ...     {{
    ...         "role": "user", "content": [
    ...             {{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"}},
    ...             {{"type": "text", "text": "Where is the cat standing?"}},
    ...         ]
    ...     }},
    ... ]

    >>> inputs = processor.apply_chat_template(
    ...     messages,
    ...     tokenizer=True,
    ...     return_dict=True,
    ...     return_tensors="pt",
    ...     add_generation_prompt=True
    ... )
    >>> # Generate
    >>> generate_ids = model.generate(**inputs)
    >>> processor.batch_decode(generate_ids, skip_special_tokens=True)[0]
    ```
text-to-audio-spectrogramtext-to-audio-waveformautomatic-speech-recognitionaudio-frame-classificationaudio-classificationaudio-xvectorimage-text-to-textimage-to-textvisual-question-answeringdepth-estimationvideo-classificationzero-shot-image-classificationimage-classificationzero-shot-object-detectionobject-detectionimage-segmentationimage-to-imageimage-feature-extractiontext-generationtable-question-answeringdocument-question-answeringquestion-answeringtext2text-generationnext-sentence-predictionmultiple-choicetext-classificationtoken-classification	fill-maskmask-generationpretraining))+MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMESry   )(MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMESrz   )(MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMESr{   )MODEL_FOR_CTC_MAPPING_NAMESr{   )2MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMESr|   ),MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMESr}   )%MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMESr~   )*MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMESr   )$MODEL_FOR_VISION_2_SEQ_MAPPING_NAMESr   )1MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMESr   )(MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMESr   ),MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMESr   )6MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMESr   ),MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMESr   )2MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMESr   )(MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMESr   )*MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMESr   )&MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMESr   )MODEL_FOR_IMAGE_MAPPING_NAMESr   )!MODEL_FOR_CAUSAL_LM_MAPPING_NAMESr   )0MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMESr   )3MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMESr   )*MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMESr   ),MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMESr   )0MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMESr   )'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMESr   )/MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMESr   ),MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMESr   )!MODEL_FOR_MASKED_LM_MAPPING_NAMESr   )'MODEL_FOR_MASK_GENERATION_MAPPING_NAMESr   )#MODEL_FOR_PRETRAINING_MAPPING_NAMESr   aI  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}
    >>> import tensorflow as tf

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = tokenizer(
    ...     "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="tf"
    ... )

    >>> logits = model(**inputs).logits
    >>> predicted_token_class_ids = tf.math.argmax(logits, axis=-1)

    >>> # Note that tokens are classified rather then input words which means that
    >>> # there might be more predicted token classes than words.
    >>> # Multiple token classes might account for the same word
    >>> predicted_tokens_classes = [model.config.id2label[t] for t in predicted_token_class_ids[0].numpy().tolist()]
    >>> predicted_tokens_classes
    {expected_output}
    ```

    ```python
    >>> labels = predicted_token_class_ids
    >>> loss = tf.math.reduce_mean(model(**inputs, labels=labels).loss)
    >>> round(float(loss), 2)
    {expected_loss}
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}
    >>> import tensorflow as tf

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"

    >>> inputs = tokenizer(question, text, return_tensors="tf")
    >>> outputs = model(**inputs)

    >>> answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0])
    >>> answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0])

    >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
    >>> tokenizer.decode(predict_answer_tokens)
    {expected_output}
    ```

    ```python
    >>> # target is "nice puppet"
    >>> target_start_index = tf.constant([{qa_target_start_index}])
    >>> target_end_index = tf.constant([{qa_target_end_index}])

    >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index)
    >>> loss = tf.math.reduce_mean(outputs.loss)
    >>> round(float(loss), 2)
    {expected_loss}
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}
    >>> import tensorflow as tf

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")

    >>> logits = model(**inputs).logits

    >>> predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0])
    >>> model.config.id2label[predicted_class_id]
    {expected_output}
    ```

    ```python
    >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
    >>> num_labels = len(model.config.id2label)
    >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels)

    >>> labels = tf.constant(1)
    >>> loss = model(**inputs, labels=labels).loss
    >>> round(float(loss), 2)
    {expected_loss}
    ```
a4  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}
    >>> import tensorflow as tf

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="tf")
    >>> logits = model(**inputs).logits

    >>> # retrieve index of {mask}
    >>> mask_token_index = tf.where((inputs.input_ids == tokenizer.mask_token_id)[0])
    >>> selected_logits = tf.gather_nd(logits[0], indices=mask_token_index)

    >>> predicted_token_id = tf.math.argmax(selected_logits, axis=-1)
    >>> tokenizer.decode(predicted_token_id)
    {expected_output}
    ```

    ```python
    >>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"]
    >>> # mask labels of non-{mask} tokens
    >>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100)

    >>> outputs = model(**inputs, labels=labels)
    >>> round(float(outputs.loss), 2)
    {expected_loss}
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}
    >>> import tensorflow as tf

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
    >>> outputs = model(inputs)

    >>> last_hidden_states = outputs.last_hidden_state
    ```
a#  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}
    >>> import tensorflow as tf

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
    >>> choice0 = "It is eaten with a fork and a knife."
    >>> choice1 = "It is eaten while held in the hand."

    >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="tf", padding=True)
    >>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}}
    >>> outputs = model(inputs)  # batch size is 1

    >>> # the linear classifier still needs to be trained
    >>> logits = outputs.logits
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}
    >>> import tensorflow as tf

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
    >>> outputs = model(inputs)
    >>> logits = outputs.logits
    ```
a"  
    Example:

    ```python
    >>> from transformers import AutoProcessor, {model_class}
    >>> from datasets import load_dataset

    >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation", trust_remote_code=True)
    >>> dataset = dataset.sort("id")
    >>> sampling_rate = dataset.features["audio"].sampling_rate

    >>> processor = AutoProcessor.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> # audio file is decoded on the fly
    >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf")
    >>> outputs = model(**inputs)

    >>> last_hidden_states = outputs.last_hidden_state
    >>> list(last_hidden_states.shape)
    {expected_output}
    ```
aw  
    Example:

    ```python
    >>> from transformers import AutoProcessor, {model_class}
    >>> from datasets import load_dataset
    >>> import tensorflow as tf

    >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation", trust_remote_code=True)
    >>> dataset = dataset.sort("id")
    >>> sampling_rate = dataset.features["audio"].sampling_rate

    >>> processor = AutoProcessor.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> # audio file is decoded on the fly
    >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf")
    >>> logits = model(**inputs).logits
    >>> predicted_ids = tf.math.argmax(logits, axis=-1)

    >>> # transcribe speech
    >>> transcription = processor.batch_decode(predicted_ids)
    >>> transcription[0]
    {expected_output}
    ```

    ```python
    >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="tf").input_ids

    >>> # compute loss
    >>> loss = model(**inputs).loss
    >>> round(float(loss), 2)
    {expected_loss}
    ```
aq  
    Example:

    ```python
    >>> from transformers import AutoImageProcessor, {model_class}
    >>> from datasets import load_dataset

    >>> dataset = load_dataset("huggingface/cats-image", trust_remote_code=True)
    >>> image = dataset["test"]["image"][0]

    >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = image_processor(image, return_tensors="tf")
    >>> outputs = model(**inputs)

    >>> last_hidden_states = outputs.last_hidden_state
    >>> list(last_hidden_states.shape)
    {expected_output}
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoImageProcessor, {model_class}
    >>> import tensorflow as tf
    >>> from datasets import load_dataset

    >>> dataset = load_dataset("huggingface/cats-image", trust_remote_code=True)
    >>> image = dataset["test"]["image"][0]

    >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = image_processor(image, return_tensors="tf")
    >>> logits = model(**inputs).logits

    >>> # model predicts one of the 1000 ImageNet classes
    >>> predicted_label = int(tf.math.argmax(logits, axis=-1))
    >>> print(model.config.id2label[predicted_label])
    {expected_output}
    ```
)rk   rl   rm   rn   ro   rp   rq   rr   rs   rw   rx   ar  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax")

    >>> outputs = model(**inputs)
    >>> logits = outputs.logits
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
    >>> inputs = tokenizer(question, text, return_tensors="jax")

    >>> outputs = model(**inputs)
    >>> start_scores = outputs.start_logits
    >>> end_scores = outputs.end_logits
    ```
a}  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="jax")

    >>> outputs = model(**inputs)
    >>> logits = outputs.logits
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax")
    >>> outputs = model(**inputs)

    >>> last_hidden_states = outputs.last_hidden_state
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
    >>> choice0 = "It is eaten with a fork and a knife."
    >>> choice1 = "It is eaten while held in the hand."

    >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="jax", padding=True)
    >>> outputs = model(**{{k: v[None, :] for k, v in encoding.items()}})

    >>> logits = outputs.logits
    ```
a  
    Example:

    ```python
    >>> from transformers import AutoTokenizer, {model_class}

    >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}")
    >>> model = {model_class}.from_pretrained("{checkpoint}")

    >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
    >>> outputs = model(**inputs)

    >>> # retrieve logts for next token
    >>> next_token_logits = outputs.logits[:, -1]
    ```
)rk   rl   rm   rn   ro   rq   rp   c                     UR                  5        H.  u  p#Ub  M
  SU-   S-   n[        R                  " SU S3SU 5      n M0     U $ )zg
Removes the lines testing an output with the doctest syntax in a code sample when it's set to `None`.
{}z\n([^\n]+)\n\s+z\nrI   )itemsrC   rN   )r;   kwargskeyvaluedoc_keys        r   filter_outputs_from_exampler     sO     lln
)c/FFogYb94K	 % r   z[MASK]      )processor_class
checkpointrb   rX   maskqa_target_start_indexqa_target_end_index	model_clsmodalityexpected_outputexpected_lossreal_checkpointrevisionc                 H   ^ ^^^^^^^^^	^
^^^ UUUU
U	UUUUU UUUU4S jnU$ )Nc                   > Tc  U R                   R                  S5      S   OTnUS S S:X  a  [        nOUS S S:X  a  [        nO[        nUTTTTTTTTTSS.nS	U;   d  S
U;   a  TS:X  a  US
   nOS	U;   a  US	   nOSU;   a  US   nOSU;   a  US   nOSU;   a  US   nOSU;   d  US;   a  US   nOSU;   d  SU;   a  US   nOzSU;   a  US   nOnSU;   a  US   nObSU;   a  TS:X  a  US   nOPSU;   a  TS:X  a  US   nO>SU;   a  TS:X  a  US   nO,SU;   d  SU;   a  US   nOSU;   a  US   nO[        SU 35      e[        UTTS9nTb	  [        U-   nU R                  =(       d    S S R                  T
5      -   nTc  S O[        TT	5      nUR                  " S'0 UD6nTbH  [        R                  " S!T5      (       a  [        S"T S#35      eUR                  S$T S%3S$T S&T S%35      nXV-   U-   U l        U $ )(Nr$   r   rK   rV   r   Flaxz{true})model_classr   r   r   r   r   r   r   r   fake_checkpointtruerk   rt   audiorl   rm   rn   ro   )FlaubertWithLMHeadModelXLMWithLMHeadModelrp   CausalLMrs   ru   XVectorrv   Modelrr   visionrw   Encoderrq   rx   z#Docstring can't be built for model )r   r   r   z^refs/pr/\\d+zThe provided revision 'zW' is incorrect. It should point to a pull request reference on the hub like 'refs/pr/6'zfrom_pretrained("z")z", revision="r   )r,   r-   TF_SAMPLE_DOCSTRINGSFLAX_SAMPLE_DOCSTRINGSPT_SAMPLE_DOCSTRINGSrZ   r   FAKE_MODEL_DISCLAIMERr   r   rj   r`   rC   matchreplace)r   r   sample_docstrings
doc_kwargscode_samplefunc_doc
output_doc	built_docr   rX   r   r   r   r   r   r   rb   r   r   r   r   r   s           r   r   7add_code_sample_docstrings.<locals>.docstring_decorator  s   7@7Hboo++C03ir?d" 4!_& 6 4 '.$%:#6.*.)

 %37LP[7[aimtat+,ABK%4+,DEK K/+,?@K"k1+,ABK,+,<=K;&+9j*j+J7K$
k(A+H5Kk!+E2K';6+,FGK+%(g*=+N;K#G(;+,=>K#H(<+,=>K#yK'?+K8K"k1+,ABKB;-PQQ1
 &/+=KJJ$"7&.R4N{\h4i
&&44	xx((33 -hZ 8L L  "))#J<r26G
|S`ai`jjl4mI *Y6
	r   r   )r   r   rb   rX   r   r   r   r   r   r   r   r   r   r   r   s   `````````````` r   add_code_sample_docstringsr     s     N N N` r   c                    ^ ^ UU 4S jnU$ )Nc                   > U R                   nUR                  S5      nSnU[        U5      :  aJ  [        R                  " SX#   5      c0  US-  nU[        U5      :  a  [        R                  " SX#   5      c  M0  U[        U5      :  a5  [        [        X#   5      5      n[        TTUS9X#'   SR                  U5      nO[        SU  SU 35      eXl         U $ )NrI   r   z^\s*Returns?:\s*$rU   )rc   zThe function ze should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, current docstring is:
)	r   r-   r
   rC   rD   rG   rj   r   rZ   )r   r   rg   rR   r1   rX   rb   s        r   r   6replace_return_docstrings.<locals>.docstring_decorator  s    ::t$#e*n+?!J!RFA #e*n+?!J!Rs5z>UX./F1+|X^_EHyy'Ht $**25  
	r   r   )rb   rX   r   s   `` r   replace_return_docstringsr     s    $ r   c                     [         R                  " U R                  U R                  U R                  U R
                  U R                  S9n[        R                  " X5      nU R                  Ul	        U$ )zReturns a copy of a function f.)nameargdefsclosure)
typesFunctionType__code____globals__r[   __defaults____closure__	functoolsupdate_wrapper__kwdefaults__)fgs     r   	copy_funcr   (  sV     	1::q}}1::q~~ghgtgtuA  &A''AHr   )NT)NN)Yr   r   r   rC   r0   r   collectionsr   r   r    r=   rA   r_   r^   rG   rS   rj   r   PT_TOKEN_CLASSIFICATION_SAMPLEPT_QUESTION_ANSWERING_SAMPLE!PT_SEQUENCE_CLASSIFICATION_SAMPLEPT_MASKED_LM_SAMPLEPT_BASE_MODEL_SAMPLEPT_MULTIPLE_CHOICE_SAMPLEPT_CAUSAL_LM_SAMPLEPT_SPEECH_BASE_MODEL_SAMPLEPT_SPEECH_CTC_SAMPLEPT_SPEECH_SEQ_CLASS_SAMPLEPT_SPEECH_FRAME_CLASS_SAMPLEPT_SPEECH_XVECTOR_SAMPLEPT_VISION_BASE_MODEL_SAMPLEPT_VISION_SEQ_CLASS_SAMPLEr    TEXT_TO_AUDIO_SPECTROGRAM_SAMPLETEXT_TO_AUDIO_WAVEFORM_SAMPLE!AUDIO_FRAME_CLASSIFICATION_SAMPLEAUDIO_XVECTOR_SAMPLEIMAGE_TO_TEXT_SAMPLEDEPTH_ESTIMATION_SAMPLEVIDEO_CLASSIFICATION_SAMPLE!ZERO_SHOT_OBJECT_DETECTION_SAMPLEIMAGE_TO_IMAGE_SAMPLEIMAGE_FEATURE_EXTRACTION_SAMPLE"DOCUMENT_QUESTION_ANSWERING_SAMPLENEXT_SENTENCE_PREDICTION_SAMPLEMULTIPLE_CHOICE_SAMPLEPRETRAINING_SAMPLEMASK_GENERATION_SAMPLE VISUAL_QUESTION_ANSWERING_SAMPLETEXT_GENERATION_SAMPLEIMAGE_CLASSIFICATION_SAMPLEIMAGE_SEGMENTATION_SAMPLEFILL_MASK_SAMPLEOBJECT_DETECTION_SAMPLEQUESTION_ANSWERING_SAMPLETEXT2TEXT_GENERATION_SAMPLETEXT_CLASSIFICATION_SAMPLETABLE_QUESTION_ANSWERING_SAMPLETOKEN_CLASSIFICATION_SAMPLEAUDIO_CLASSIFICATION_SAMPLE#AUTOMATIC_SPEECH_RECOGNITION_SAMPLE%ZERO_SHOT_IMAGE_CLASSIFICATION_SAMPLE$IMAGE_TEXT_TO_TEXT_GENERATION_SAMPLE#PIPELINE_TASKS_TO_SAMPLE_DOCSTRINGSMODELS_TO_PIPELINETF_TOKEN_CLASSIFICATION_SAMPLETF_QUESTION_ANSWERING_SAMPLE!TF_SEQUENCE_CLASSIFICATION_SAMPLETF_MASKED_LM_SAMPLETF_BASE_MODEL_SAMPLETF_MULTIPLE_CHOICE_SAMPLETF_CAUSAL_LM_SAMPLETF_SPEECH_BASE_MODEL_SAMPLETF_SPEECH_CTC_SAMPLETF_VISION_BASE_MODEL_SAMPLETF_VISION_SEQ_CLASS_SAMPLEr    FLAX_TOKEN_CLASSIFICATION_SAMPLEFLAX_QUESTION_ANSWERING_SAMPLE#FLAX_SEQUENCE_CLASSIFICATION_SAMPLEFLAX_MASKED_LM_SAMPLEFLAX_BASE_MODEL_SAMPLEFLAX_MULTIPLE_CHOICE_SAMPLEFLAX_CAUSAL_LM_SAMPLEr   r   r   r   r   r   r   r   <module>r=     sM     	   #"!H  842j " B   D8% !t @ " 0 " 4! F! H  :! F 2 8 @59/#!%25 <,25 $$  $! " %A ! 0  * B % ! # & "#  3   $    9     9   ? #  =  9  '; #) %( $D '2	$&FG	!#@A	')LM	%'HI	!<=	./	CD	./	$&FG	45	!<=	)+PQ	!<=	%'HI	45	89	01	#%DE	23	#%DE	&(JK	89	!<=	#%DE	23	 :;	!<=	&'	23	*+=!' #J !#% P" B!  F% !> B " .   0" H , 2 @59/#!%225 $   " $' #      ( $ B7;1%'#   	`F,r   