
    h}.                        S SK JrJr  S SKrS SKrS SKrS SKJrJr  S SKJ	r	  SSK
Jr  SSKJr  \R                  R                  S	\S
\\   S\4S j5       r   S(S\S\S\S\S\4
S jjr " S S5      rS\\   S\4S jrS\S\\   S\4S jr\R.                  R0                  S\\   S\\\\4      S\S\S\\\   \4   4
S j5       r\R.                  R0                  S\\\4   S\\   S\\   4S j5       r\R.                  R0                  S \\   S\\   S!\\   S"\S#\\\      S$\\   S\4S% j5       r " S& S'\R>                  5      r g))    )OptionalUnionN)nnTensor)box_area   )_log_api_usage_once   )	roi_alignlevelsunmerged_resultsreturnc           	      N   US   nUR                   UR                  pC[        R                  " U R	                  S5      UR	                  S5      UR	                  S5      UR	                  S5      4X4S9n[        [        U5      5       H  n[        R                  " X:H  5      S   R                  SSSS5      nUR                  UR	                  S5      X   R	                  S5      X   R	                  S5      X   R	                  S5      5      nUR                  SXqU   5      nM     U$ )Nr   r
   r      dtypedevice)r   r   torchzerossizerangelenwhereviewexpandscatter)r   r   first_resultr   r   reslevelindexs           O/var/www/auris/envauris/lib/python3.13/site-packages/torchvision/ops/poolers.py_onnx_merge_levelsr#      s   #A&L &&(;(;6
++	Q**1-|/@/@/C\EVEVWXEYZbgC s+,-FO,Q/44RAqAJJqM#((+#((+#((+	
 kk!UU$;< . J    k_mink_maxcanonical_scalecanonical_levelepsc                     [        XX#U5      $ N)LevelMapper)r%   r&   r'   r(   r)   s        r"   initLevelMapperr-   %   s     u_sKKr$   c                   X    \ rS rSrSr   SS\S\S\S\S\4
S jjrS	\\	   S
\	4S jr
Srg)r,   /   zDetermine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.

Args:
    k_min (int)
    k_max (int)
    canonical_scale (int)
    canonical_level (int)
    eps (float)
r%   r&   r'   r(   r)   c                 @    Xl         X l        X0l        X@l        XPl        g r+   )r%   r&   s0lvl0r)   )selfr%   r&   r'   r(   r)   s         r"   __init__LevelMapper.__init__;   s     

!#	r$   boxlistsr   c           
      R   [         R                  " [         R                  " U Vs/ s H  n[        U5      PM     sn5      5      n[         R                  " U R
                  [         R                  " X0R                  -  5      -   [         R                  " U R                  UR                  S9-   5      n[         R                  " X@R                  U R                  S9nUR                  [         R                  5      U R                  -
  R                  [         R                  5      $ s  snf )z$
Args:
    boxlists (list[BoxList])
r   )minmax)r   sqrtcatr   floorr2   log2r1   tensorr)   r   clampr%   r&   toint64)r3   r6   boxliststarget_lvlss        r"   __call__LevelMapper.__call__I   s     JJuyy8!L8(7"38!LMN kk$))ejjWW.E"EUYU]U]efelelHm"mnkk+::4::Nu{{+djj8<<U[[II "Ms   D$)r)   r&   r%   r2   r1   N      gư>)__name__
__module____qualname____firstlineno____doc__intfloatr4   listr   rF   __static_attributes__ r$   r"   r,   r,   /   sa    	  #   	
  Jf J& Jr$   r,   boxesc                 T   [         R                  " U SS9nUR                  UR                  p2[         R                  " [	        U 5       VVs/ s H3  u  pE[         R
                  " US S 2S S24   XC[         R                  US9PM5     snnSS9n[         R                  " Xa/SS9nU$ s  snnf )Nr   )dimr
   )r   layoutr   )r   r<   r   r   	enumerate	full_likestrided)rU   concat_boxesr   r   ibidsroiss           r"   _convert_to_roi_formatra   W   s    99U*L ''););E
))dmnsdtudt\`\]1bqb51%--PV	WdtuC 99c(a0DK	 	vs   :B$
featureoriginal_sizec                 "   U R                   SS  n/ n[        X!5       Hk  u  pE[        U5      [        U5      -  nS[        [        R                  " U5      R                  5       R                  5       5      -  nUR                  U5        Mm     US   $ )Nr   r   )shapeziprQ   r   r?   r>   roundappend)rb   rc   r   possible_scaless1s2approx_scalescales           r"   _infer_scalero   b   s    ==D#%Od*Ry59,U5<<5::<BBDEEu% + 1r$   featuresimage_shapesc                 (   U(       d  [        S5      eSnSnU H!  n[        US   U5      n[        US   U5      nM#     XE4nU  Vs/ s H  n[        X5      PM     n	n[        R                  " [        R
                  " U	S   [        R                  S95      R                  5       * n
[        R                  " [        R
                  " U	S   [        R                  S95      R                  5       * n[        [        U
5      [        U5      UUS9nX4$ s  snf )Nzimages list should not be emptyr   r
   r8   r   r'   r(   )

ValueErrorr:   ro   r   r>   r?   float32itemr-   rP   )rp   rq   r'   r(   max_xmax_yrf   original_input_shapefeatscaleslvl_minlvl_max
map_levelss                r"   _setup_scalesr   m   s     :;;EEE!He$E!He$  ">CKL84l468FL zz%,,vayFGLLNNGzz%,,vbzGHMMOOG GG''	J  Ms   Dxfeatmap_namesc                 l    / nU R                  5        H  u  p4X1;   d  M  UR                  U5        M     U$ r+   )itemsri   )r   r   
x_filteredkvs        r"   _filter_inputr      s5    J	a   r$   r   output_sizesampling_ratior{   mapperc           	         Ub  Uc  [        S5      e[        U 5      n[        U5      nUS:X  a  [        U S   UUUS   US9$ U" U5      n[        U5      n	U S   R                  S   n
U S   R
                  U S   R                  p[        R                  " U	U
4U-   UUS9n/ n[        [        X5      5       H  u  nu  nn[        R                  " X:H  5      S   nUU   n[        UUUUUS9n[        R                  " 5       (       a"  UR                  UR                  U5      5        Mr  UR                  UR
                  5      UU'   M     [        R                  " 5       (       a  [!        X5      nU$ )a  
Args:
    x_filtered (List[Tensor]): List of input tensors.
    boxes (List[Tensor[N, 4]]): boxes to be used to perform the pooling operation, in
        (x1, y1, x2, y2) format and in the image reference size, not the feature map
        reference. The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
    output_size (Union[List[Tuple[int, int]], List[int]]): size of the output
    sampling_ratio (int): sampling ratio for ROIAlign
    scales (Optional[List[float]]): If None, scales will be automatically inferred. Default value is None.
    mapper (Optional[LevelMapper]): If none, mapper will be automatically inferred. Default value is None.
Returns:
    result (Tensor)
z$scales and mapper should not be Noner
   r   )r   spatial_scaler   r   )rt   r   ra   r   rf   r   r   r   r   rY   rg   r   torchvision_is_tracingri   rA   r#   )r   rU   r   r   r{   r   
num_levelsr`   r   num_roisnum_channelsr   r   resulttracing_resultsr    per_level_featurern   idx_in_levelrois_per_levelresult_idx_in_levels                        r"   _multiscale_roi_alignr      s   , ~?@@ZJ!%(DQqM# ))
 	
 E]F4yHa=&&q)LqM''A)=)=6[[	
 		
 F O-6s:7N-O))!5{{6?3A6l+'#)
 ""$$""#6#9#9%#@A $7#9#9&,,#GF< - .P0   #F<Mr$   c                      ^  \ rS rSrSr\\\      \\   S.r	SSS.S\\
   S\\\\   \\   4   S	\S
\S\4
U 4S jjjrS\\
\4   S\\   S\\\\4      S\4S jrS\
4S jrSrU =r$ )MultiScaleRoIAlign   a  
Multi-scale RoIAlign pooling, which is useful for detection with or without FPN.

It infers the scale of the pooling via the heuristics specified in eq. 1
of the `Feature Pyramid Network paper <https://arxiv.org/abs/1612.03144>`_.
They keyword-only parameters ``canonical_scale`` and ``canonical_level``
correspond respectively to ``224`` and ``k0=4`` in eq. 1, and
have the following meaning: ``canonical_level`` is the target level of the pyramid from
which to pool a region of interest with ``w x h = canonical_scale x canonical_scale``.

Args:
    featmap_names (List[str]): the names of the feature maps that will be used
        for the pooling.
    output_size (List[Tuple[int, int]] or List[int]): output size for the pooled region
    sampling_ratio (int): sampling ratio for ROIAlign
    canonical_scale (int, optional): canonical_scale for LevelMapper
    canonical_level (int, optional): canonical_level for LevelMapper

Examples::

    >>> m = torchvision.ops.MultiScaleRoIAlign(['feat1', 'feat3'], 3, 2)
    >>> i = OrderedDict()
    >>> i['feat1'] = torch.rand(1, 5, 64, 64)
    >>> i['feat2'] = torch.rand(1, 5, 32, 32)  # this feature won't be used in the pooling
    >>> i['feat3'] = torch.rand(1, 5, 16, 16)
    >>> # create some random bounding boxes
    >>> boxes = torch.rand(6, 4) * 256; boxes[:, 2:] += boxes[:, :2]
    >>> # original image size, before computing the feature maps
    >>> image_sizes = [(512, 512)]
    >>> output = m(i, [boxes], image_sizes)
    >>> print(output.shape)
    >>> torch.Size([6, 5, 3, 3])

)r{   r~   rI   rJ   rs   r   r   r   r'   r(   c                   > [         TU ]  5         [        U 5        [        U[        5      (       a  X"4nXl        X0l        [        U5      U l        S U l	        S U l
        X@l        XPl        g r+   )superr4   r	   
isinstancerP   r   r   tupler   r{   r~   r'   r(   )r3   r   r   r   r'   r(   	__class__s         r"   r4   MultiScaleRoIAlign.__init__  s`     	D!k3''&4K*, -..r$   r   rU   rq   r   c                 ,   [        XR                  5      nU R                  b  U R                  c.  [	        XCU R
                  U R                  5      u  U l        U l        [        UUU R                  U R                  U R                  U R                  5      $ )a  
Args:
    x (OrderedDict[Tensor]): feature maps for each level. They are assumed to have
        all the same number of channels, but they can have different sizes.
    boxes (List[Tensor[N, 4]]): boxes to be used to perform the pooling operation, in
        (x1, y1, x2, y2) format and in the image reference size, not the feature map
        reference. The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
    image_shapes (List[Tuple[height, width]]): the sizes of each image before they
        have been fed to a CNN to obtain feature maps. This allows us to infer the
        scale factor for each one of the levels to be pooled.
Returns:
    result (Tensor)
)
r   r   r{   r~   r   r'   r(   r   r   r   )r3   r   rU   rq   r   s        r"   forwardMultiScaleRoIAlign.forward!  s    & #1&8&89
;;$//"9+8$*>*>@T@T,(DK %KKOO
 	
r$   c                     U R                   R                   SU R                   SU R                   SU R                   S3$ )Nz(featmap_names=z, output_size=z, sampling_ratio=))r   rK   r   r   r   )r3   s    r"   __repr__MultiScaleRoIAlign.__repr__C  sM    ~~&&'t7I7I6J K++,,=d>Q>Q=RRSU	
r$   )r(   r'   r   r~   r   r   r{   )rK   rL   rM   rN   rO   r   rR   rQ   r,   __annotations__strr   rP   r   r4   dictr   r   r   rS   __classcell__)r   s   @r"   r   r      s    !F "*$u+!6h{F[\O  # /Cy/ 3c
DI56/ 	/ / / /* 
V 
 F| 
 5c?+	 

 
 
D
# 
 
r$   r   rH   )!typingr   r   r   torch.fxr   r   r   torchvision.ops.boxesr   utilsr	   r   jitunusedrR   r#   rP   rQ   r-   r,   ra   ro   fxwrapr   r   r   r   r   r   Moduler   rT   r$   r"   <module>r      s   "     * '   v f &  , LLL L 	L
 
L%J %JP$v, 6 & c u  6l*.uS#X*?RUhk
4;#$ 6 T#v+& tCy T&\   PVP<P cP 	P
 T%[!P [!P P Pfa
 a
r$   