
    h
m                        S SK r S SKrS SKrS SKrS SKrS SKJr  S SK Jr  S SKJ	r	  S SK
JrJrJrJr  S SKrS SKrS SKJrJr  S SKJrJrJr  SS	/r " S
 S\R0                  5      r " S S\5      rS rS\S\4S jrS\\   4S jrS\\ \!\4      S\ \!\4   4S jr"   S!S\RF                  S\\ \!\4      S\$S\\ \!\4      S\%\\!   \\!   4   4
S jjr& " S S\RN                  5      r(      S"S\RF                  S\\\\!   \ \!\!4   4      S\\\\!   \ \!\!4   4      S\\\\!   \ \!\!4   4      S\\ \!\4      S\$S\\ \!\4      S\RN                  4S  jjr)g)#    N)OrderedDict)deepcopy)chain)AnyCallableOptionalUnion)fxnn)_CodeOnlyModule
_copy_attr_USER_PRESERVED_ATTRIBUTES_KEYcreate_feature_extractorget_graph_node_namesc                   \   ^  \ rS rSrSrU 4S jrS\R                  S\S\	4U 4S jjr
SrU =r$ )	LeafModuleAwareTracer   z
An fx.Tracer that allows the user to specify a set of leaf modules, i.e.
modules that are not to be traced through. The resulting graph ends up
having single nodes referencing calls to the leaf modules' forward methods.
c                 n   > 0 U l         SU;   a  UR                  S5      nX0l         [        TU ]  " U0 UD6  g )Nleaf_modules)r   popsuper__init__)selfargskwargsr   	__class__s       ]/var/www/auris/envauris/lib/python3.13/site-packages/torchvision/models/feature_extraction.pyr   LeafModuleAwareTracer.__init__   s9    V#!::n5L ,$)&)    mmodule_qualnamereturnc                 l   > [        U[        U R                  5      5      (       a  g[        TU ]  X5      $ )NT)
isinstancetupler   r   is_leaf_module)r   r    r!   r   s      r   r&   $LeafModuleAwareTracer.is_leaf_module"   s.    at00122w%a99r   )r   )__name__
__module____qualname____firstlineno____doc__r   r   Modulestrboolr&   __static_attributes____classcell__r   s   @r   r   r      s0    *:		 :C :D : :r   r   c                     ^  \ rS rSrSrU 4S jrS\R                  R                  S\	4S jr
 SS\S\R                  R                  S	\R                  R                   4U 4S
 jjjrS\S\R                  R$                  S	\4S jrSrU =r$ )NodePathTracer(   a,  
NodePathTracer is an FX tracer that, for each operation, also records the
name of the Node from which the operation originated. A node name here is
a `.` separated path walking the hierarchy from top level module down to
leaf operation or leaf module. The name of the top level module is not
included as part of the node name. For example, if we trace a module whose
forward method applies a ReLU module, the name for that node will simply
be 'relu'.

Some notes on the specifics:
    - Nodes are recorded to `self.node_to_qualname` which is a dictionary
      mapping a given Node object to its node name.
    - Nodes are recorded in the order which they are executed during
      tracing.
    - When a duplicate node name is encountered, a suffix of the form
      _{int} is added. The counter starts from 1.
c                 R   > [         TU ]  " U0 UD6  SU l        [        5       U l        g )N )r   r   current_module_qualnamer   node_to_qualname)r   r   r   r   s      r   r   NodePathTracer.__init__;   s)    $)&)')$
 !,r   r    forwardc                     U R                   n U R                  U5      nX`l         U R                  X5      (       d  U" U0 UD6nUXPl         $ U R                  SXcU5      XPl         $ ! XPl         f = f)a_  
Override of `fx.Tracer.call_module`
This override:
1) Stores away the qualified name of the caller for restoration later
2) Adds the qualified name of the caller to
   `current_module_qualname` for retrieval by `create_proxy`
3) Once a leaf module is reached, calls `create_proxy`
4) Restores the caller's qualified name into current_module_qualname
call_module)r8   path_of_moduler&   create_proxy)r   r    r;   r   r   old_qualnamer!   outs           r   r=   NodePathTracer.call_moduleE   sv     33	8"11!4O+:(&&q::t.v. ,8( $$]O6R+7(<(s   6A$ A$ $A,kindtargetr"   c                    > [         T	U ]  XX4XV5      nU R                  U R                  UR                  5      U R
                  UR                  '   U$ )z
Override of `Tracer.create_proxy`. This override intercepts the recording
of every operation and stores away the current traced module's qualified
name in `node_to_qualname`
)r   r?   _get_node_qualnamer8   noder9   )
r   rC   rD   r   r   name	type_expr_proxyr   s
            r   r?   NodePathTracer.create_proxyZ   sK     $T4Q,0,C,CDD`D`bgblbl,mejj)r   r!   rG   c                    UnUR                   S:w  a"  [        U5      S:  a  US-  nU[        U5      -  n[        R                  " SU5      b  UR                  SS5      S   n[        U R                  R                  5       5       H_  n[        R                  " U S3U5      c  M   UR                  US5      n[        U5      (       a  [        USS  5      S-   nOSnUSU 3-  n  U$    U$ )	Nr=   r   .z
.+_[0-9]+$rJ      z(_[0-9]+)?$r7   )oplenr.   rematchrsplitreversedr9   valuesreplaceint)r   r!   rG   node_qualnameexisting_qualnamepostfix
next_indexs          r   rF   !NodePathTracer._get_node_qualnamef   s    '77m# =!A%$SY&M 88M=1=)00a8;M "*$*?*?*F*F*H!I xxM?+68IJV+33M2Fw<<!$WQR[!1A!5J "#J1ZL!11 "J r   )r8   r9   )NN)r(   r)   r*   r+   r,   r   torchr   r-   r   r=   r.   r
   rG   TargetrK   Proxyr?   NoderF   r0   r1   r2   s   @r   r4   r4   (   s    $.8UXX__ 8x 8, UY

!#
	
 
"# "RWW\\ "c " "r   r4   c                 D   ^ [        U 5      m[        U4S jU 5       5      $ )zNCheck if y is a subsequence of x
https://stackoverflow.com/a/24017747/4391249
c              3   P   >^#    U  H  m[        U4S  jT 5       5      v   M     g7f)c              3   ,   >#    U  H	  oT:H  v   M     g 7fN ).0x_itemy_items     r   	<genexpr>'_is_subseq.<locals>.<genexpr>.<genexpr>   s     9&V#&s   N)any)rg   ri   iter_xs    @r   rj   _is_subseq.<locals>.<genexpr>   s     Jfs9&999s   "&)iterall)xyrm   s     @r   
_is_subseqrs      s     !WFJJJJr   train_tracereval_tracerc                    [        U R                  R                  5       5      n[        UR                  R                  5       5      n[        U5      [        U5      :X  a!  [	        S [        X#5       5       5      (       a  gSn[        X#5      (       a  SnO[        X25      (       a  SnOSn[        R                  " XT-   5        g)zx
Utility function for warning the user if there are differences between
the train graph nodes and the eval graph nodes.
c              3   .   #    U  H  u  pX:H  v   M     g 7fre   rf   )rg   tes      r   rj   *_warn_graph_differences.<locals>.<genexpr>   s     2cFbda16Fbs   NztWhen choosing nodes for feature extraction, you may need to specify output nodes for train and eval mode separately.znNOTE: The nodes obtained by tracing the model in eval mode are a subsequence of those obtained in train mode. znNOTE: The nodes obtained by tracing the model in train mode are a subsequence of those obtained in eval mode. zdThe nodes obtained by tracing the model in train mode are different to those obtained in eval mode. )	listr9   rV   rQ   rp   ziprs   warningswarn)rt   ru   train_nodes
eval_nodessuggestion_msgmsgs         r   _warn_graph_differencesr      s    
 |44;;=>Kk2299;<J
;3z?*s2cc+Fb2c/c/c	; 
 +**B 	 
J	,	,A 	
 uMM#&'r   r"   c                     [         R                  " [        R                  5      n / nU  H^  u  p#[         R                  " U5      (       d  M"  [        U[        R                  R                  5      (       d  MM  UR                  U5        M`     U$ re   )
inspect
getmemberstorchvisionopsisclass
issubclassr^   r   r-   append)membersresultrJ   objs       r   _get_leaf_modules_for_opsr      s]      1GF??3JsEHHOO$D$DMM#  Mr   original_tr_kwargsc                     [         [        R                  4n[        5       nU c  0 OU nSU;   a  [	        [        US   U-   5      5      OUUS'   SU;   a  [        [        US   U-   5      5      OUUS'   U$ )Nautowrap_modulesr   )mathr   r   r   r%   setr{   )r   default_autowrap_modulesdefault_leaf_modulesresult_tracer_kwargss       r   _set_default_tracer_kwargsr      s     $koo646!3!;2AS !55 	c&'9:=UUVW% +, 11 	S%n58LLMN! (
  r   modeltracer_kwargssuppress_diff_warningconcrete_argsc                    [        U5      nU R                  n[        S0 UD6nUR                  U R	                  5       US9  [        S0 UD6nUR                  U R                  5       US9  [        UR                  R                  5       5      n[        UR                  R                  5       5      nU(       d  [        XV5        U R	                  U5        Xx4$ )a  
Dev utility to return node names in order of execution. See note on node
names under :func:`create_feature_extractor`. Useful for seeing which node
names are available for feature extraction. There are two reasons that
node names can't easily be read directly from the code for a model:

    1. Not all submodules are traced through. Modules from ``torch.nn`` all
       fall within this category.
    2. Nodes representing the repeated application of the same operation
       or leaf module get a ``_{counter}`` postfix.

The model is traced twice: once in train mode, and once in eval mode. Both
sets of node names are returned.

For more details on the node naming conventions used here, please see the
:ref:`relevant subheading <about-node-names>` in the
`documentation <https://pytorch.org/vision/stable/feature_extraction.html>`_.

Args:
    model (nn.Module): model for which we'd like to print node names
    tracer_kwargs (dict, optional): a dictionary of keyword arguments for
        ``NodePathTracer`` (they are eventually passed onto
        `torch.fx.Tracer <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer>`_).
        By default, it will be set to wrap and make leaf nodes all torchvision ops:
        {"autowrap_modules": (math, torchvision.ops,),"leaf_modules": _get_leaf_modules_for_ops(),}
        WARNING: In case the user provides tracer_kwargs, above default arguments will be appended to the user
        provided dictionary.
    suppress_diff_warning (bool, optional): whether to suppress a warning
        when there are discrepancies between the train and eval version of
        the graph. Defaults to False.
    concrete_args (Optional[Dict[str, any]]): Concrete arguments that should
        not be treated as Proxies. According to the `Pytorch docs
        <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer.trace>`_,
        this parameter's API may not be guaranteed.

Returns:
    tuple(list, list): a list of node names from tracing the model in
    train mode, and another from tracing the model in eval mode.

Examples::

    >>> model = torchvision.models.resnet18()
    >>> train_nodes, eval_nodes = get_graph_node_names(model)
r   rf   )
r   trainingr4   tracetrainevalr{   r9   rV   r   )	r   r   r   r   is_trainingrt   ru   r   r   s	            r   r   r      s    d /}=M..K!2M2Lu{{}MB 1=1Kejjl-@|44;;=>Kk2299;<J :	KK""r   c            	          ^  \ rS rSrSr SS\R                  R                  S\R                  S\R                  S\
4U 4S jjjrSU 4S jjrS	 rS
 rSrU =r$ )DualGraphModulei  a  
A derivative of `fx.GraphModule`. Differs in the following ways:
- Requires a train and eval version of the underlying graph
- Copies submodules according to the nodes of both train and eval graphs.
- Calling train(mode) switches between train graph and eval graph.
roottrain_graph
eval_graph
class_namec                 z  > [         [        R                  U ]  5         X@R                  l        X l        X0l        [        [        UR                  5      [        UR                  5      5       Hk  nUR                  S;   d  M  [        UR                  [        5      (       d!  [        S[!        UR                  5       35      e[#        XUR                  5        Mm     U R%                  5         X l        U R                  R(                  U R                  R(                  :w  a:  [        SU R                  R(                   SU R                  R(                   S35      eSU l        U R&                  R(                  (       aA  SU R&                  R(                  R*                  ;  a  U R&                  R(                  U l        ggg)z
Args:
    root (nn.Module): module from which the copied module hierarchy is
        built
    train_graph (fx.Graph): the graph that should be used in train mode
    eval_graph (fx.Graph): the graph that should be used in eval mode
)get_attrr=   z-node.target should be of type str instead of zGTrain mode and eval mode should use the same tracer class. Instead got z for eval vs z
 for trainNz<locals>)r   r
   GraphModuler   r   r(   r   r   r   ro   nodesrP   r$   rD   r.   	TypeErrortyper   r   graph_tracer_clsr*   )r   r   r   r   r   rG   r   s         r   r   DualGraphModule.__init__  s{    	bnnd,.",&$ ${0014
8H8H3IJDww55!$++s33#&STXY]YdYdTeSf$ghh4t{{3	 K 	

 
 ??&&$*:*:*F*FFYZ^ZiZiZuZuYv  wD  EI  EU  EU  Ea  Ea  Db  bl  m   ::!!j

8N8N8[8[&[#zz55D '\!r   c                    > U(       a#  U R                   (       d  U R                  U l        O)U(       d"  U R                   (       a  U R                  U l        [        TU ]  US9$ )z
Swap out the graph depending on the selected training mode.
NOTE this should be safe when calling model.eval() because that just
calls this with mode == False.
)mode)r   r   r   r   r   r   )r   r   r   s     r   r   DualGraphModule.train@  s@     ))DJ$--DJw}$}''r   c                 "    [         R                  $ re   )r   r   )r   s    r   _deepcopy_initDualGraphModule._deepcopy_initN  s    '''r   c           	         [        U 5      R                  [        U 5      5      nX![        U 5      '   [        [        R
                  " U R                  U5      5      nU R                  5       " X#UR                  S   UR                  S   5        / SQnU HB  nXPR                  ;   d  M  [        X%[        R
                  " U R                  U   U5      5        MD     [        R
                  " [        U S0 5      U5      Ul
        [        UR                  ;   a6  UR                  [           R                  5        H  u  pe[        X&U5        M     U$ )Nr   r   )_state_dict_hooks_load_state_dict_pre_hooks_load_state_dict_post_hooks_replace_hook_create_node_hooks_erase_node_hooksmeta)r   __new__idr   copyr   __dict__r   setattrgetattrr   r   items)r   memoresfake_modextra_preserved_attrsattr	attr_names          r   __deepcopy__DualGraphModule.__deepcopy__R  s     4j  d,RX"4==#EFcX->->}-MxO`O`amOno!
 *D}}$4==t1Dd#KL * ==vr!:DA)SXX5#&88,J#K#Q#Q#S	- $T
r   )r   r   r   r   )r   )T)r(   r)   r*   r+   r,   r^   r   r-   r
   Graphr.   r   r   r   r   r0   r1   r2   s   @r   r   r     s^     er)6HHOO)624(()6HJ)6^a)6 )6V(( r   r   return_nodestrain_return_nodeseval_return_nodesc                    [        U5      nU R                  n[        S XU4 5       5      (       a  [        S5      eUSL USL -  (       a  [        S5      eUSL USL -  (       d  [        S5      eS[        [
        [
        4   4S jnUc  U" U5      n[        U5      n[        U5      nOU" U5      nU" U5      n0 n	0 n
X#S.nS GHH  nUS	:X  a  U R                  5         OUS
:X  a  U R                  5         [        S0 UD6nUR                  XS9n[        U [        R                  5      (       a  U R                  R                  OU R                  n[         R"                  " UR$                  X5      n['        UR(                  R+                  5       5      n[-        [/        U5      5      [-        U5      :w  a  [        S5      eX   R1                  5        HN  n[3        U Vs/ s H   n[4        R6                  " SU S3U5      SLPM"     sn5      (       a  MA  [        SU S35      e   / n[9        UR:                  R<                  5       H&  nUR>                  S:X  d  M  URA                  U5        M(     U(       d  [        S5      eU H  nUR:                  RC                  U5        M      UR:                  R<                   Vs/ s H  nUPM     nn[E        5       n[9        U5       H  nUR(                  RG                  U5      nUc  M#  X    H^  nURI                  S5      nSRK                  URM                  S5      SUS-    5      U:X  d  M@  UUX   U   '   X   RO                  U5          M     M     [E        [9        ['        URQ                  5       5      5      5      nUR:                  RS                  US   5         UR:                  RU                  U5        SSS5        UR:                  RW                  5         URY                  5         XU'   XU'   GMK     U(       d  [[        U	S	   U	S
   5        []        X
S	   U
S
   WS9nU R                  U5        UR                  U5        U$ s  snf s  snf ! , (       d  f       N= f)a  
Creates a new graph module that returns intermediate nodes from a given
model as dictionary with user specified keys as strings, and the requested
outputs as values. This is achieved by re-writing the computation graph of
the model via FX to return the desired nodes as outputs. All unused nodes
are removed, together with their corresponding parameters.

Desired output nodes must be specified as a ``.`` separated
path walking the module hierarchy from top level module down to leaf
operation or leaf module. For more details on the node naming conventions
used here, please see the :ref:`relevant subheading <about-node-names>`
in the `documentation <https://pytorch.org/vision/stable/feature_extraction.html>`_.

Not all models will be FX traceable, although with some massaging they can
be made to cooperate. Here's a (not exhaustive) list of tips:

    - If you don't need to trace through a particular, problematic
      sub-module, turn it into a "leaf module" by passing a list of
      ``leaf_modules`` as one of the ``tracer_kwargs`` (see example below).
      It will not be traced through, but rather, the resulting graph will
      hold a reference to that module's forward method.
    - Likewise, you may turn functions into leaf functions by passing a
      list of ``autowrap_functions`` as one of the ``tracer_kwargs`` (see
      example below).
    - Some inbuilt Python functions can be problematic. For instance,
      ``int`` will raise an error during tracing. You may wrap them in your
      own function and then pass that in ``autowrap_functions`` as one of
      the ``tracer_kwargs``.

For further information on FX see the
`torch.fx documentation <https://pytorch.org/docs/stable/fx.html>`_.

Args:
    model (nn.Module): model on which we will extract the features
    return_nodes (list or dict, optional): either a ``List`` or a ``Dict``
        containing the names (or partial names - see note above)
        of the nodes for which the activations will be returned. If it is
        a ``Dict``, the keys are the node names, and the values
        are the user-specified keys for the graph module's returned
        dictionary. If it is a ``List``, it is treated as a ``Dict`` mapping
        node specification strings directly to output names. In the case
        that ``train_return_nodes`` and ``eval_return_nodes`` are specified,
        this should not be specified.
    train_return_nodes (list or dict, optional): similar to
        ``return_nodes``. This can be used if the return nodes
        for train mode are different than those from eval mode.
        If this is specified, ``eval_return_nodes`` must also be specified,
        and ``return_nodes`` should not be specified.
    eval_return_nodes (list or dict, optional): similar to
        ``return_nodes``. This can be used if the return nodes
        for train mode are different than those from eval mode.
        If this is specified, ``train_return_nodes`` must also be specified,
        and `return_nodes` should not be specified.
    tracer_kwargs (dict, optional): a dictionary of keyword arguments for
        ``NodePathTracer`` (which passes them onto it's parent class
        `torch.fx.Tracer <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer>`_).
        By default, it will be set to wrap and make leaf nodes all torchvision ops:
        {"autowrap_modules": (math, torchvision.ops,),"leaf_modules": _get_leaf_modules_for_ops(),}
        WARNING: In case the user provides tracer_kwargs, above default arguments will be appended to the user
        provided dictionary.
    suppress_diff_warning (bool, optional): whether to suppress a warning
        when there are discrepancies between the train and eval version of
        the graph. Defaults to False.
    concrete_args (Optional[Dict[str, any]]): Concrete arguments that should
        not be treated as Proxies. According to the `Pytorch docs
        <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer.trace>`_,
        this parameter's API may not be guaranteed.

Examples::

    >>> # Feature extraction with resnet
    >>> model = torchvision.models.resnet18()
    >>> # extract layer1 and layer3, giving as names `feat1` and feat2`
    >>> model = create_feature_extractor(
    >>>     model, {'layer1': 'feat1', 'layer3': 'feat2'})
    >>> out = model(torch.rand(1, 3, 224, 224))
    >>> print([(k, v.shape) for k, v in out.items()])
    >>>     [('feat1', torch.Size([1, 64, 56, 56])),
    >>>      ('feat2', torch.Size([1, 256, 14, 14]))]

    >>> # Specifying leaf modules and leaf functions
    >>> def leaf_function(x):
    >>>     # This would raise a TypeError if traced through
    >>>     return int(x)
    >>>
    >>> class LeafModule(torch.nn.Module):
    >>>     def forward(self, x):
    >>>         # This would raise a TypeError if traced through
    >>>         int(x.shape[0])
    >>>         return torch.nn.functional.relu(x + 4)
    >>>
    >>> class MyModule(torch.nn.Module):
    >>>     def __init__(self):
    >>>         super().__init__()
    >>>         self.conv = torch.nn.Conv2d(3, 1, 3)
    >>>         self.leaf_module = LeafModule()
    >>>
    >>>     def forward(self, x):
    >>>         leaf_function(x.shape[0])
    >>>         x = self.conv(x)
    >>>         return self.leaf_module(x)
    >>>
    >>> model = create_feature_extractor(
    >>>     MyModule(), return_nodes=['leaf_module'],
    >>>     tracer_kwargs={'leaf_modules': [LeafModule],
    >>>                    'autowrap_functions': [leaf_function]})

c              3   (   #    U  H  oS L v   M
     g 7fre   rf   )rg   args     r   rj   +create_feature_extractor.<locals>.<genexpr>  s     
X"W3$;"Ws   zcEither `return_nodes` or `train_return_nodes` and `eval_return_nodes` together, should be specifiedNzcIf any of `train_return_nodes` and `eval_return_nodes` are specified, then both should be specifiedz\If `train_return_nodes` and `eval_return_nodes` are specified, then both should be specifiedr"   c                     [        U [        5      (       a&  U  Vs0 s H  n[        U5      [        U5      _M     sn$ U R                  5        VVs0 s H  u  p#[        U5      [        U5      _M     snn$ s  snf s  snnf re   )r$   r{   r.   r   )nikvs       r   
to_strdict,create_feature_extractor.<locals>.to_strdict  sa    a,-.AqCFCFNA..+,7795941AA955 /5s   A4!A9)r   r   r   r   r   zYThere are duplicate nodes! Please raise an issue https://github.com/pytorch/vision/issues^z(\.|$)znode: 'z' is not present in model. Hint: use `get_graph_node_names` to make sure the `return_nodes` you specified are present. It may even be that you need to specify `train_return_nodes` and `eval_return_nodes` separately.outputz1No output nodes found in graph_module.graph.nodesrN   rO   )r   rf   )/r   r   rp   
ValueErrordictr.   r   r   r   r4   r   r$   r   r-   r   r(   r
   r   r   r{   r9   rV   rQ   r   keysrl   rR   rS   rU   r   r   rP   r   
erase_noder   getcountjoinsplitr   r   inserting_afterr   eliminate_dead_code	recompiler   r   )r   r   r   r   r   r   r   r   r   tracersgraphsmode_return_nodesr   tracerr   rH   graph_moduleavailable_nodesqueryr   orig_output_nodesr   output_nodesr!   depths                            r   r   r   q  s6   j /}=M..K

X<EV"W
XXXq
 	
 	d"'8D'@Aq
 	
 T!&8D&@Awxx6c3h 6
 !!,/%l3$\2'(:;&'89 GF=O3k!7?KKMV^JJL  0-0U@+5eRYY+G+Gu''U^^~~fkk5?v66==?@s?#$O(<<k  ',113E _]_AeWF!3Q7tC_]^^ eW %6 6 	 4 ,,,223Attx!((+ 4 !PQQ"A))!, # )..4454q45"}%A$5599!<O&
 *0C(88O11#6{CDMCDL!2!8!?@%+//6 1 ! #8D1C1C1E,F#GH //b	:%%l3 ; 	..0  tQ "X ! 0'&/B #5/6&>VZ[L 
KK{# ^* 6& ;:s   'Q;Q"4Q''
Q5	)NFN)NNNNFN)*r   r   r   rR   r}   collectionsr   r   	itertoolsr   typingr   r   r   r	   r^   r   r
   r   torch.fx.graph_moduler   r   r   __all__Tracerr   r4   rs   r   r{   r   r   r   r.   r   r-   r/   r%   r   r   r   r   rf   r   r   <module>r      s      	  #   1 1    ] ] &'=
>:BII :(`* `FK(. (~ (>4:  8DcN3K  PTUXZ]U]P^  & /3"'.2	>#99>#DcN+>#  ># DcN+	>#
 49d3i >#Babnn aL @DEIDH.2"'.2n99n5cDcN!:;<n !tCy$sCx.'@!ABn  d3ic3h&? @A	n
 DcN+n  n DcN+n ^^nr   