
    [hO                     $   S SK Jr  SSKJr   \R
                  r\S 5       rS r	\SS j5       r
S r\SS	 j5       rS
 r\S 5       r0 4S jr\S 5       r\SS j5       r\SS j5       r\S 5       r\S 5       rg! \ a    \R                  r Ntf = f)   )xrange   )defunc                     [        U5      nU R                  nSUS-  -  n[        US-   5       H  nX4X   -  -  nXEU-
  -  US-   -  nM     U$ )z
Given a sequence `(s_k)` containing at least `n+1` items, returns the
`n`-th forward difference,

.. math ::

    \Delta^n = \sum_{k=0}^{\infty} (-1)^{k+n} {n \choose k} s_k.
r   )intzeror   )ctxsndbks         W/var/www/auris/envauris/lib/python3.13/site-packages/mpmath/calculus/differentiation.py
differencer      s]     	AAA	QAAaC[	XA#YAaC   H    c                    UR                  S5      nUR                  SS5      nUR                  SS5      nUSU-  -   US-   -  n	U R                  n
 Xl        UR                  S5      nUcM  UR                  S	5      (       a  [        U R                  U5      5      nOSnU R	                  SU* U-
  U-
  5      nOU R                  U5      nUR                  SS5      nU(       a$  XR                  U5      -  n[        US-   5      nUnO[        U* US-   S5      nSU-  nU(       a  US
U-  -  nU Vs/ s H  o" X/U-  -   5      PM     nnUX4Xl        $ s  snf ! Xl        f = f)Nsingularaddprec
   	direction    r   r   hrelativeg      ?)getprecr   magldexpconvertsignr   )r
   fxr   r   optionsr   r   r   workprecorigr   	hextramagstepsnormr   valuess                    r   hstepsr*      sW   {{:&Hkk)R(GK+IQwY1Q3'H88DKK9{{:&&
O				!dU7]945AAAKKQ/	)$$A1Q3KED A2qsA&EaCDQJA$)*Eq!AcE(E*t% + s   CE /EE E Ec                 `  ^ ^^^^ Sn [        T5      n[        T5      mSnU(       a1  T Vs/ s H  nT R                  U5      PM     snm[        T TTWU5      $ UR	                  SS5      nTS:X  a3  US:w  a-  UR	                  S5      (       d  T" T R                  T5      5      $ T R
                  n	 US:X  a3  [        T TTTU	40 UD6u  pnUT l        T R                  U
T5      UT-  -  nOUS:X  a  T =R
                  S-  sl        T R                  UR	                  S	S
5      5      mU UUUU4S jnT R                  USST R                  -  /5      nUT R                  T5      -  ST R                  -  -  nO[        SU-  5      eU	T l        U7$ ! [         a     GNzf = fs  snf ! U	T l        f = f)a|  
Numerically computes the derivative of `f`, `f'(x)`, or generally for
an integer `n \ge 0`, the `n`-th derivative `f^{(n)}(x)`.
A few basic examples are::

    >>> from mpmath import *
    >>> mp.dps = 15; mp.pretty = True
    >>> diff(lambda x: x**2 + x, 1.0)
    3.0
    >>> diff(lambda x: x**2 + x, 1.0, 2)
    2.0
    >>> diff(lambda x: x**2 + x, 1.0, 3)
    0.0
    >>> nprint([diff(exp, 3, n) for n in range(5)])   # exp'(x) = exp(x)
    [20.0855, 20.0855, 20.0855, 20.0855, 20.0855]

Even more generally, given a tuple of arguments `(x_1, \ldots, x_k)`
and order `(n_1, \ldots, n_k)`, the partial derivative
`f^{(n_1,\ldots,n_k)}(x_1,\ldots,x_k)` is evaluated. For example::

    >>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (0,1))
    2.75
    >>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (1,1))
    3.0

**Options**

The following optional keyword arguments are recognized:

``method``
    Supported methods are ``'step'`` or ``'quad'``: derivatives may be
    computed using either a finite difference with a small step
    size `h` (default), or numerical quadrature.
``direction``
    Direction of finite difference: can be -1 for a left
    difference, 0 for a central difference (default), or +1
    for a right difference; more generally can be any complex number.
``addprec``
    Extra precision for `h` used to account for the function's
    sensitivity to perturbations (default = 10).
``relative``
    Choose `h` relative to the magnitude of `x`, rather than an
    absolute value; useful for large or tiny `x` (default = False).
``h``
    As an alternative to ``addprec`` and ``relative``, manually
    select the step size `h`.
``singular``
    If True, evaluation exactly at the point `x` is avoided; this is
    useful for differentiating functions with removable singularities.
    Default = False.
``radius``
    Radius of integration contour (with ``method = 'quad'``).
    Default = 0.25. A larger radius typically is faster and more
    accurate, but it must be chosen so that `f` has no
    singularities within the radius from the evaluation point.

A finite difference requires `n+1` function evaluations and must be
performed at `(n+1)` times the target precision. Accordingly, `f` must
support fast evaluation at high precision.

With integration, a larger number of function evaluations is
required, but not much extra precision is required. For high order
derivatives, this method may thus be faster if f is very expensive to
evaluate at high precision.

**Further examples**

The direction option is useful for computing left- or right-sided
derivatives of nonsmooth functions::

    >>> diff(abs, 0, direction=0)
    0.0
    >>> diff(abs, 0, direction=1)
    1.0
    >>> diff(abs, 0, direction=-1)
    -1.0

More generally, if the direction is nonzero, a right difference
is computed where the step size is multiplied by sign(direction).
For example, with direction=+j, the derivative from the positive
imaginary direction will be computed::

    >>> diff(abs, 0, direction=j)
    (0.0 - 1.0j)

With integration, the result may have a small imaginary part
even even if the result is purely real::

    >>> diff(sqrt, 1, method='quad')    # doctest:+ELLIPSIS
    (0.5 - 4.59...e-26j)
    >>> chop(_)
    0.5

Adding precision to obtain an accurate value::

    >>> diff(cos, 1e-30)
    0.0
    >>> diff(cos, 1e-30, h=0.0001)
    -9.99999998328279e-31
    >>> diff(cos, 1e-30, addprec=100)
    -1.0e-30

FTmethodstepr   quadr   r   radiusg      ?c                 R   > TTR                  U 5      -  nTU-   nT" U5      UT-  -  $ N)expj)treizr
   r!   r   r/   r"   s      r   gdiff.<locals>.g   s0    SXXa[(Gtc1f}$r   r   zunknown method: %r)list	TypeErrorr   _partial_diffr   r   r*   r   quadtspi	factorial
ValueError)r
   r!   r"   r   r#   partialorders_r,   r   r)   r(   r$   vr6   r   r/   s   ````            @r   diffrC   C   s   R GaG %&'QS[[^Q'S!Q88[[6*FAv&F"7;;z+B+BQ  88DV%+CAq$%J'%J"F(CHvq)D!G3AvHHNH[[Xt!<=F% % 

1q!CFF(m,ACMM!$$#&&1A1F:;;2I7   (. s#   F F1CF$ 
FF$	F-c                    ^ ^^^^ U(       d  T" 5       $ [        U5      (       d  T" U6 $ Sm[        [        U5      5       H  mUT   (       d  M    O   UT   mU UUUU4S jnSUT'   [        T XRUT5      $ )Nr   c                  F   >^  UU U4S jnTR                   " UT T   T40 TD6$ )Nc                 .   > T" TS T U 4-   TTS-   S  -   6 $ Nr    )r3   r!   f_argsis    r   inner1_partial_diff.<locals>.fdiff_inner.<locals>.inner   s*    vbqzQD(6!A#$<799r   rC   )rI   rK   r
   r!   rJ   r#   orders   ` r   fdiff_inner"_partial_diff.<locals>.fdiff_inner   s$    	:xxvay%;7;;r   )sumrangelenr:   )r
   r!   xsr@   r#   rO   rJ   rN   s   ``  ` @@r   r:   r:      st    s
v;;"v	A3v;!99   1IE< < F1Ikvw??r   Nc              +     #    Uc  U R                   nO[        U5      nUR                  SS5      S:w  a0  SnXSS-   :  a%  U R                  " XU40 UD6v   US-  nXSS-   :  a  M%  gUR                  S5      nU(       a  U R                  XSSS9v   OU" U R	                  U5      5      v   US:  a  gX0R                   :X  a  S	u  pxOSUS-   p U R
                  n	[        XX(U	40 UD6u  pn[        Xx5       H2  n Xl        U R                  X5      X-  -  nXl        U7v   XS:  d  M2    g   U[        US
-  S-   5      p[        X5      nM}  ! Xl        f = f7f)a  
Returns a generator that yields the sequence of derivatives

.. math ::

    f(x), f'(x), f''(x), \ldots, f^{(k)}(x), \ldots

With ``method='step'``, :func:`~mpmath.diffs` uses only `O(k)`
function evaluations to generate the first `k` derivatives,
rather than the roughly `O(k^2)` evaluations
required if one calls :func:`~mpmath.diff` `k` separate times.

With `n < \infty`, the generator stops as soon as the
`n`-th derivative has been generated. If the exact number of
needed derivatives is known in advance, this is further
slightly more efficient.

Options are the same as for :func:`~mpmath.diff`.

**Examples**

    >>> from mpmath import *
    >>> mp.dps = 15
    >>> nprint(list(diffs(cos, 1, 5)))
    [0.540302, -0.841471, -0.540302, 0.841471, 0.540302, -0.841471]
    >>> for i, d in zip(range(6), diffs(cos, 1)):
    ...     print("%s %s" % (i, d))
    ...
    0 0.54030230586814
    1 -0.841470984807897
    2 -0.54030230586814
    3 0.841470984807897
    4 0.54030230586814
    5 -0.841470984807897

Nr,   r-   r   r   r   T)r   )r   r   gffffff?)
infr   r   rC   r   r   r*   r   r   min)r
   r!   r"   r   r#   r   r   ABcallprecyr(   r$   r   s                 r   diffsr\      s_    L 	yGGF{{8V$.a%i((1.g..FA a%i 	{{:&HhhqQh..A1uGG|1!A#1
88"31EWEA$#NN1(472#"Hv  #aeAg,1I  $s+   AE#BE7EE%#EEEc                 2   ^ ^ [        T 5      m / mUU 4S jnU$ )Nc                    > [        [        T5      U S-   5       H  nTR                  [        T5      5        M     TU    $ rG   )r   rS   appendnext)r   rJ   datagens     r   r!   iterable_to_function.<locals>.f,  s5    D	1Q3'AKKS	" (Awr   )iter)rb   r!   ra   s   ` @r   iterable_to_functionre   )  s    
s)CD Hr   c              #     #    [        U5      nUS:X  a  US    H  nUv   M	     g[        U R                  USUS-   5      5      n[        U R                  XS-  S 5      5      nSn U" U5      U" S5      -  nSn[        SUS-   5       H(  n	XU	-
  S-   -  U	-  nXxU" Xi-
  5      -  U" U	5      -  -  nM*     Uv   US-  nMY  7f)a  
Given a list of `N` iterables or generators yielding
`f_k(x), f'_k(x), f''_k(x), \ldots` for `k = 1, \ldots, N`,
generate `g(x), g'(x), g''(x), \ldots` where
`g(x) = f_1(x) f_2(x) \cdots f_N(x)`.

At high precision and for large orders, this is typically more efficient
than numerical differentiation if the derivatives of each `f_k(x)`
admit direct computation.

Note: This function does not increase the working precision internally,
so guard digits may have to be added externally for full accuracy.

**Examples**

    >>> from mpmath import *
    >>> mp.dps = 15; mp.pretty = True
    >>> f = lambda x: exp(x)*cos(x)*sin(x)
    >>> u = diffs(f, 1)
    >>> v = mp.diffs_prod([diffs(exp,1), diffs(cos,1), diffs(sin,1)])
    >>> next(u); next(v)
    1.23586333600241
    1.23586333600241
    >>> next(u); next(v)
    0.104658952245596
    0.104658952245596
    >>> next(u); next(v)
    -5.96999877552086
    -5.96999877552086
    >>> next(u); next(v)
    -12.4632923122697
    -12.4632923122697

r   r   Nr   )rS   re   
diffs_prodr   )
r
   factorsNcurB   r   r   ar   s
             r   rg   rg   2  s     H 	GAAvAG  !A!?@ 1!?@!qtAAAac]1QK1$13Z!A$&& # GFA s   B>C c                     X;   a  X   $ U(       d  SS0US'   [        U S-
  5      n[        S [        U5       5       5      n0 n[        U5       H+  u  pEUS   S-   4USS -   nXc;   a  X6==   U-  ss'   M'  XSU'   M-     [        U5       Hl  u  pE[        U5      (       d  M  [	        U5       HF  u  pxU(       d  M  USU US-
  XGS-      S-   4-   XGS-   S -   n	X;   a  X9==   X-  -  ss'   M@  X-  X9'   MH     Mn     X1U '   X   $ )z
nth differentiation polynomial for exp (Faa di Bruno's formula).

TODO: most exponents are zero, so maybe a sparse representation
would be better.
r   r   r   c              3   4   #    U  H  u  pUS -   U4v   M     g7f)rn   NrH   ).0rj   rB   s      r   	<genexpr>dpoly.<locals>.<genexpr>t  s     2\EQafQZ\s   Nr   )dpolydict	iteritemsrQ   	enumerate)
r   _cacheRRapowerscountpowers1r   ppowers2s
             r   rs   rs   h  s    	{y!Hq	ac
A2Yq\22A	B"1!9Q;.6!":-=K5 KwK & #16{{V$CAq !*!FQ3KM'::VaCD\I=K17*K"#'BK % & 1I9r   c           	   #   &  ^#    [        U5      mU R                  T" S5      5      nUv   Sn U R                  S5      n[        [	        U5      5       H.  u  pVXFU R                  U4S j[        U5       5       5      -  -  nM0     XB-  v   US-  nMd  7f)aD  
Given an iterable or generator yielding `f(x), f'(x), f''(x), \ldots`
generate `g(x), g'(x), g''(x), \ldots` where `g(x) = \exp(f(x))`.

At high precision and for large orders, this is typically more efficient
than numerical differentiation if the derivatives of `f(x)`
admit direct computation.

Note: This function does not increase the working precision internally,
so guard digits may have to be added externally for full accuracy.

**Examples**

The derivatives of the gamma function can be computed using
logarithmic differentiation::

    >>> from mpmath import *
    >>> mp.dps = 15; mp.pretty = True
    >>>
    >>> def diffs_loggamma(x):
    ...     yield loggamma(x)
    ...     i = 0
    ...     while 1:
    ...         yield psi(i,x)
    ...         i += 1
    ...
    >>> u = diffs_exp(diffs_loggamma(3))
    >>> v = diffs(gamma, 3)
    >>> next(u); next(v)
    2.0
    2.0
    >>> next(u); next(v)
    1.84556867019693
    1.84556867019693
    >>> next(u); next(v)
    2.49292999190269
    2.49292999190269
    >>> next(u); next(v)
    3.44996501352367
    3.44996501352367

r   r   c              3   V   >#    U  H  u  pU(       d  M  T" US -   5      U-  v   M      g7f)r   NrH   )rp   r   r}   fns      r   rq   diffs_exp.<locals>.<genexpr>  s%     L5FEQ!ZR!WaZ5Fs   )))re   expmpfru   rs   fprodrv   )r
   fdiffsf0rJ   r   rz   rj   r   s          @r   	diffs_expr     s     X 
f	%B	AB
H	A
GGAJ"58,IF399LYv5FLLLLA -f	Q s   BBc           	         ^ ^^^ [        [        T R                  T R                  U5      5      5      S-   S5      nXS-
  S-
  mU UUU4S jnT R	                  XbU5      T R                  XS-
  5      -  $ )a  
Calculates the Riemann-Liouville differintegral, or fractional
derivative, defined by

.. math ::

    \,_{x_0}{\mathbb{D}}^n_xf(x) = \frac{1}{\Gamma(m-n)} \frac{d^m}{dx^m}
    \int_{x_0}^{x}(x-t)^{m-n-1}f(t)dt

where `f` is a given (presumably well-behaved) function,
`x` is the evaluation point, `n` is the order, and `x_0` is
the reference point of integration (`m` is an arbitrary
parameter selected automatically).

With `n = 1`, this is just the standard derivative `f'(x)`; with `n = 2`,
the second derivative `f''(x)`, etc. With `n = -1`, it gives
`\int_{x_0}^x f(t) dt`, with `n = -2`
it gives `\int_{x_0}^x \left( \int_{x_0}^t f(u) du \right) dt`, etc.

As `n` is permitted to be any number, this operator generalizes
iterated differentiation and iterated integration to a single
operator with a continuous order parameter.

**Examples**

There is an exact formula for the fractional derivative of a
monomial `x^p`, which may be used as a reference. For example,
the following gives a half-derivative (order 0.5)::

    >>> from mpmath import *
    >>> mp.dps = 15; mp.pretty = True
    >>> x = mpf(3); p = 2; n = 0.5
    >>> differint(lambda t: t**p, x, n)
    7.81764019044672
    >>> gamma(p+1)/gamma(p-n+1) * x**(p-n)
    7.81764019044672

Another useful test function is the exponential function, whose
integration / differentiation formula easy generalizes
to arbitrary order. Here we first compute a third derivative,
and then a triply nested integral. (The reference point `x_0`
is set to `-\infty` to avoid nonzero endpoint terms.)::

    >>> differint(lambda x: exp(pi*x), -1.5, 3)
    0.278538406900792
    >>> exp(pi*-1.5) * pi**3
    0.278538406900792
    >>> differint(lambda x: exp(pi*x), 3.5, -3, -inf)
    1922.50563031149
    >>> exp(pi*3.5) / pi**3
    1922.50563031149

However, for noninteger `n`, the differentiation formula for the
exponential function must be modified to give the same result as the
Riemann-Liouville differintegral::

    >>> x = mpf(3.5)
    >>> c = pi
    >>> n = 1+2*j
    >>> differint(lambda x: exp(c*x), x, n)
    (-123295.005390743 + 140955.117867654j)
    >>> x**(-n) * exp(c)**x * (x*c)**n * gammainc(-n, 0, x*c) / gamma(-n)
    (-123295.005390743 + 140955.117867654j)


r   c                 :   >^  TR                  UUU 4S jTT /5      $ )Nc                 &   > TU -
  T-  T" U 5      -  $ r1   rH   )r3   r!   rr"   s    r   <lambda>-differint.<locals>.<lambda>.<locals>.<lambda>  s    acAX!_r   )r.   )r"   r
   r!   r   x0s   `r   r   differint.<locals>.<lambda>  s    #((4r1g>r   )maxr   ceilrerC   gamma)r
   r!   r"   r   r   mr6   r   s   ``  `  @r   	differintr     s]    H 	C#$Q&*A	AA>A88A!syy~--r   c                 0   ^ ^^^ TS:X  a  T$ U UUU4S jnU$ )a  
Given a function `f`, returns a function `g(x)` that evaluates the nth
derivative `f^{(n)}(x)`::

    >>> from mpmath import *
    >>> mp.dps = 15; mp.pretty = True
    >>> cos2 = diffun(sin)
    >>> sin2 = diffun(sin, 4)
    >>> cos(1.3), cos2(1.3)
    (0.267498828624587, 0.267498828624587)
    >>> sin(1.3), sin2(1.3)
    (0.963558185417193, 0.963558185417193)

The function `f` must support arbitrary precision evaluation.
See :func:`~mpmath.diff` for additional details and supported
keyword options.
r   c                 .   > TR                   " TU T40 TD6$ r1   rM   )r"   r
   r!   r   r#   s    r   r6   diffun.<locals>.g  s    xx1a+7++r   rH   )r
   r!   r   r#   r6   s   ```` r   diffunr   	  s     & 	Av, ,Hr   c                 B   [        U R                  " XU40 UD65      nUR                  SS5      (       a8  U VVs/ s H(  u  pgU R                  U5      U R	                  U5      -  PM*     snn$ U VVs/ s H  u  pgXpR	                  U5      -  PM     snn$ s  snnf s  snnf )aW  
Produces a degree-`n` Taylor polynomial around the point `x` of the
given function `f`. The coefficients are returned as a list.

    >>> from mpmath import *
    >>> mp.dps = 15; mp.pretty = True
    >>> nprint(chop(taylor(sin, 0, 5)))
    [0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333]

The coefficients are computed using high-order numerical
differentiation. The function must be possible to evaluate
to arbitrary precision. See :func:`~mpmath.diff` for additional details
and supported keyword options.

Note that to evaluate the Taylor polynomial as an approximation
of `f`, e.g. with :func:`~mpmath.polyval`, the coefficients must be reversed,
and the point of the Taylor expansion must be subtracted from
the argument:

    >>> p = taylor(exp, 2.0, 10)
    >>> polyval(p[::-1], 2.5 - 2.0)
    12.1824939606092
    >>> exp(2.5)
    12.1824939607035

chopT)rv   r\   r   r   r=   )r
   r!   r"   r   r#   rb   rJ   r   s           r   taylorr   "  s    8 CIIaA11
2C{{64  9<=CMM!,,==/23stq--""s33 >3s   /B2Bc                    [        U5      X#-   S-   :  a  [        S5      eUS:X  a5  US:X  a  U R                  /U R                  /4$ USUS-    U R                  /4$ U R                  U5      n[	        U5       H2  n[	        [        X2U-   S-   5      5       H  nXU-   U-
     XEU4'   M     M4     U R                  XS-   X#-   S-    5      * nU R                  XG5      nU R                  /[        U5      -   n	S/US-   -  n
[	        US-   5       H9  nX   n[	        S[        X65      S-   5       H  nXU   XU-
     -  -  nM     XU'   M;     X4$ )ax  
Computes a Pade approximation of degree `(L, M)` to a function.
Given at least `L+M+1` Taylor coefficients `a` approximating
a function `A(x)`, :func:`~mpmath.pade` returns coefficients of
polynomials `P, Q` satisfying

.. math ::

    P = \sum_{k=0}^L p_k x^k

    Q = \sum_{k=0}^M q_k x^k

    Q_0 = 1

    A(x) Q(x) = P(x) + O(x^{L+M+1})

`P(x)/Q(x)` can provide a good approximation to an analytic function
beyond the radius of convergence of its Taylor series (example
from G.A. Baker 'Essentials of Pade Approximants' Academic Press,
Ch.1A)::

    >>> from mpmath import *
    >>> mp.dps = 15; mp.pretty = True
    >>> one = mpf(1)
    >>> def f(x):
    ...     return sqrt((one + 2*x)/(one + x))
    ...
    >>> a = taylor(f, 0, 6)
    >>> p, q = pade(a, 3, 3)
    >>> x = 10
    >>> polyval(p[::-1], x)/polyval(q[::-1], x)
    1.38169105566806
    >>> f(x)
    1.38169855941551

r   z%L+M+1 Coefficients should be providedr   N)rS   r>   onematrixrR   rW   lu_solver8   )r
   rl   LMrX   jrJ   rB   r"   qr}   r   s               r   pader   D  sZ   P 1vA~@AAAv6GG9swwi''Tac7SWWI%% 	

1A1Xs1c!e}%A!AhAdG &  
AsQSU$	%%AQA		DGA	
QqS	A1Q3ZDq#a(Q,'A1a!fA (!	 
 4Kr   )r   r1   )r   r   )libmp.backendr   calculusr   rt   ru   AttributeErroritemsr   r*   rC   r:   r\   re   rg   rs   r   r   r   r   r   rH   r   r   <module>r      s   " I  "!H H HT@" G GR 3 3j  B 4 4l F. F.P  0 4 4B B B  

Is   A: :BB