
    fTh%                         S SK JrJrJrJr  S SKrSSKJrJ	r	J
r
JrJr  SSKJrJr  \
" 5       (       a  S SKJr  SSKJr  \	" 5       (       a  SS	KJrJrJrJr  \R2                  " \5      r\\\4   r\\   r\" \" S
S95       " S S\5      5       rg)    )AnyDictListUnionN   )add_end_docstringsis_torch_availableis_vision_availableloggingrequires_backends   )Pipelinebuild_pipeline_init_args)Image)
load_image)*MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES-MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES-MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES.MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMEST)has_image_processorc                   v   ^  \ rS rSrSrU 4S jrS rSS\\\	\
   4   4U 4S jjjrSS jrS r SS	 jrS
rU =r$ )ImageSegmentationPipeline   at  
Image segmentation pipeline using any `AutoModelForXXXSegmentation`. This pipeline predicts masks of objects and
their classes.

Example:

```python
>>> from transformers import pipeline

>>> segmenter = pipeline(model="facebook/detr-resnet-50-panoptic")
>>> segments = segmenter("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
>>> len(segments)
2

>>> segments[0]["label"]
'bird'

>>> segments[1]["label"]
'bird'

>>> type(segments[0]["mask"])  # This is a black and white mask showing where is the bird on the original image.
<class 'PIL.Image.Image'>

>>> segments[0]["mask"].size
(768, 512)
```


This image segmentation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"image-segmentation"`.

See the list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=image-segmentation).
c                 Z  > [         TU ]  " U0 UD6  U R                  S:X  a  [        SU R                   S35      e[        U S5        [        R                  " 5       nUR                  [        5        UR                  [        5        UR                  [        5        U R                  U5        g )NtfzThe z is only available in PyTorch.vision)super__init__	framework
ValueError	__class__r   r   copyupdater   r   r   check_model_type)selfargskwargsmappingr!   s       a/var/www/auris/envauris/lib/python3.13/site-packages/transformers/pipelines/image_segmentation.pyr   "ImageSegmentationPipeline.__init__C   s    $)&)>>T!tDNN#33QRSS$)<AACDEDEEFg&    c                     0 n0 nSU;   a  US   US'   US   US'   SU;   a  US   US'   SU;   a  US   US'   SU;   a  US   US'   SU;   a  US   US'   U0 U4$ )Nsubtask	thresholdmask_thresholdoverlap_mask_area_thresholdtimeout )r%   r'   preprocess_kwargspostprocess_kwargss       r)   _sanitize_parameters.ImageSegmentationPipeline._sanitize_parametersP   s    ,29,=y)+1)+<i(& .4[.A{+v%39:J3K/0(F2@FGd@e<=+1)+<i( "&888r+   returnc                 p   > SU;   a  UR                  S5      nUc  [        S5      e[        TU ]  " U40 UD6$ )a  
Perform segmentation (detect masks & classes) in the image(s) passed as inputs.

Args:
    inputs (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
        The pipeline handles three types of images:

        - A string containing an HTTP(S) link pointing to an image
        - A string containing a local path to an image
        - An image loaded in PIL directly

        The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the
        same format: all as HTTP(S) links, all as local paths, or all as PIL images.
    subtask (`str`, *optional*):
        Segmentation task to be performed, choose [`semantic`, `instance` and `panoptic`] depending on model
        capabilities. If not set, the pipeline will attempt tp resolve in the following order:
          `panoptic`, `instance`, `semantic`.
    threshold (`float`, *optional*, defaults to 0.9):
        Probability threshold to filter out predicted masks.
    mask_threshold (`float`, *optional*, defaults to 0.5):
        Threshold to use when turning the predicted masks into binary values.
    overlap_mask_area_threshold (`float`, *optional*, defaults to 0.5):
        Mask overlap threshold to eliminate small, disconnected segments.
    timeout (`float`, *optional*, defaults to None):
        The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
        the call may block forever.

Return:
    A dictionary or a list of dictionaries containing the result. If the input is a single image, will return a
    list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries
    corresponding to each image.

    The dictionaries contain the mask, label and score (where applicable) of each detected object and contains
    the following keys:

    - **label** (`str`) -- The class label identified by the model.
    - **mask** (`PIL.Image`) -- A binary mask of the detected object as a Pil Image of shape (width, height) of
      the original image. Returns a mask filled with zeros if no object is found.
    - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of the
      "object" described by the label and the mask.
imageszICannot call the image-classification pipeline without an inputs argument!)popr    r   __call__)r%   inputsr'   r!   s      r)   r;   "ImageSegmentationPipeline.__call__a   sB    V vZZ)F>hiiw1&11r+   c                 F   [        XS9nUR                  UR                  4/nU R                  R                  R
                  R                  S:X  a  Uc  0 nOSU/0nU R                  " S
U/SS.UD6nU R                  S:X  a  UR                  U R                  5      nU R                  US   SU R                  R                  R                  U R                  S9S   US'   O<U R                  U/SS9nU R                  S:X  a  UR                  U R                  5      nXFS	'   U$ )N)r1   OneFormerConfigtask_inputspt)r9   return_tensors
max_length)paddingrC   rB   	input_idstarget_sizer2   )r   heightwidthmodelconfigr!   __name__image_processorr   totorch_dtype	tokenizertask_seq_len)r%   imager-   r1   rF   r'   r<   s          r)   
preprocess$ImageSegmentationPipeline.preprocess   s    52ekk23::&&//3DD''3))X%XQWXF~~%4#3#34$(NN}%$::,,99#~~	 %3 %
 %F=! ))%)NF~~%4#3#34 +}r+   c                 T    UR                  S5      nU R                  " S0 UD6nX#S'   U$ )NrF   r2   )r:   rI   )r%   model_inputsrF   model_outputss       r)   _forward"ImageSegmentationPipeline._forward   s1    "&&}5

2\2'2m$r+   c                 T   S nUS;   a2  [        U R                  S5      (       a  U R                  R                  nO7US;   a1  [        U R                  S5      (       a  U R                  R                  nUb  U" UUUUUS   S9S   n/ nUS   n	US	    H  n
XS
   :H  S-  n[        R
                  " UR                  5       R                  [        R                  5      SS9nU R                  R                  R                  U
S      nU
S   nUR                  XUS.5        M     U$ US;   a  [        U R                  S5      (       a  U R                  R                  XS   S9S   n/ nUR                  5       n	[        R                  " U	5      nU Hs  nX:H  S-  n[        R
                  " UR                  [        R                  5      SS9nU R                  R                  R                  U   nUR                  S XS.5        Mu     U$ [!        SU S[#        U R                  5       35      e)N>   Npanoptic"post_process_panoptic_segmentation>   Ninstance"post_process_instance_segmentationrF   )r.   r/   r0   target_sizesr   segmentationsegments_infoid   L)modelabel_idscore)rf   labelmask>   Nsemantic"post_process_semantic_segmentation)r^   zSubtask z is not supported for model )hasattrrL   r[   r]   r   	fromarraynumpyastypenpuint8rI   rJ   id2labelappendrj   uniquer    type)r%   rV   r-   r.   r/   r0   fnoutputs
annotationr_   segmentrh   rg   rf   labelss                  r)   postprocess%ImageSegmentationPipeline.postprocess   s%    ((WT5I5IKo-p-p%%HHB**wt7K7KMq/r/r%%HHB>#-,G*=9 G J">2L"?3$5<tzz|':':288'D3O

))227:3FG(!!E4"PQ 4. ! **wt7K7KMq/r/r**MM-,H N G J"==?LYY|,F$-4t{{288'<3G

))2259!!D5"OP	    xy0LTRVR\R\M]L^_``r+   r2   )N)NN)Ng?      ?r|   )rK   
__module____qualname____firstlineno____doc__r   r5   r   Predictionsr   
Predictionr;   rR   rW   rz   __static_attributes____classcell__)r!   s   @r)   r   r      sO    !F'9"/2{DDT7T1U /2 /2b0 kn, ,r+   r   ) typingr   r   r   r   rm   ro   utilsr   r	   r
   r   r   baser   r   PILr   image_utilsr   models.auto.modeling_autor   r   r   r   
get_loggerrK   loggerstrr   r   r   r2   r+   r)   <module>r      s    ) )  k k 4 (  
		H	% #s(^
: ,FG} } H}r+   