Skip to content

Commit 747ee93

Browse files
committed
MAINT: doc enhancements and style fixes for optimize.newton.
Follow up of scipygh-8357.
1 parent e1d50a7 commit 747ee93

File tree

1 file changed

+74
-44
lines changed

1 file changed

+74
-44
lines changed

scipy/optimize/zeros.py

Lines changed: 74 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323

2424
class RootResults(object):
2525
""" Represents the root finding result.
26+
2627
Attributes
2728
----------
2829
root : float
@@ -35,6 +36,7 @@ class RootResults(object):
3536
True if the routine converged.
3637
flag : str
3738
Description of the cause of termination.
39+
3840
"""
3941
def __init__(self, root, iterations, function_calls, flag):
4042
self.root = root
@@ -79,7 +81,7 @@ def _results_select(full_output, r):
7981

8082

8183
def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
82-
fprime2=None, full_output=False, disp=True, converged=None):
84+
fprime2=None, full_output=False, disp=True, converged=False):
8385
"""
8486
Find a zero using the Newton-Raphson or secant method.
8587
@@ -90,7 +92,7 @@ def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
9092
used.
9193
9294
If `x0` is a sequence, then `newton` returns an array, and `func` must be
93-
vectorized and return a sequence or array of the same shape as it's first
95+
vectorized and return a sequence or array of the same shape as its first
9496
argument. If an optional argument, `converged`, is `True`, then the
9597
return is a named tuple `(root, converged, zero_der)` in which `root` is an
9698
array of the locations where `func` is zero, `converged` is an array, same
@@ -102,15 +104,15 @@ def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
102104
103105
Parameters
104106
----------
105-
func : function
107+
func : callable
106108
The function whose zero is wanted. It must be a function of a
107109
single variable of the form f(x,a,b,c...), where a,b,c... are extra
108110
arguments that can be passed in the `args` parameter.
109-
x0 : float, sequence, or array
111+
x0 : float, sequence or ndarray
110112
An initial estimate of the zero that should be somewhere near the
111113
actual zero. If not scalar, then `func` must be vectorized and return
112-
a sequence or array of the same shape as it's first argument.
113-
fprime : function, optional
114+
a sequence or array of the same shape as its first argument.
115+
fprime : callable, optional
114116
The derivative of the function when available and convenient. If it
115117
is None (default), then the secant method is used.
116118
args : tuple, optional
@@ -119,37 +121,41 @@ def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
119121
The allowable error of the zero value.
120122
maxiter : int, optional
121123
Maximum number of iterations.
122-
fprime2 : function, optional
124+
fprime2 : callable, optional
123125
The second order derivative of the function when available and
124126
convenient. If it is None (default), then the normal Newton-Raphson
125127
or the secant method is used. If it is not None, then Halley's method
126128
is used.
127129
full_output : bool, optional
128-
If `full_output` is False, the root is returned. If `full_output` is
129-
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
130-
a RootResults object.
130+
If `full_output` is False (default), the root is returned.
131+
If True, the return value is ``(x, r)``, where ``x`` is the root, and
132+
``r`` is a `RootResults` object.
131133
disp : bool, optional
132-
If True, display a RuntimeError if the algorithm didn't converge.
133-
converged : boolean, optional
134-
Only used if `x0` is a sequence. If `True` then two extras boolean
135-
arrays of converged and zero derivatives are appended to the return.
134+
If True, raise a RuntimeError if the algorithm didn't converge, with
135+
the error message containing the number of iterations and current
136+
function value. *Note: this has little to do with displaying, however
137+
the `disp` keyword cannot be renamed for backwards compatibility.*
138+
converged : bool, optional
139+
Only used if `x0` is a sequence. If True, two extras boolean arrays of
140+
converged and zero derivatives are appended to the return.
136141
137142
Returns
138143
-------
139-
root : float, sequence, or array
144+
root : float, sequence, or ndarray
140145
Estimated location where function is zero.
141-
r : RootResults (present if ``full_output = True``)
142-
Object containing information about the convergence. In particular,
143-
``r.converged`` is True if the routine converged.
144-
converged : boolean array, optional
146+
r : RootResults
147+
Present if ``full_output=True``. Object containing information about
148+
the convergence. In particular, ``r.converged`` is True if the routine
149+
converged.
150+
converged : ndarray of bool, optional
145151
For vector functions, indicates which elements converged successfully.
146-
zero_der : boolean array, optional
152+
zero_der : ndarray of bool, optional
147153
For vector functions, indicates which elements had a zero derivative.
148154
149155
See Also
150156
--------
151157
brentq, brenth, ridder, bisect
152-
fsolve : find zeroes in n dimensions.
158+
fsolve : find zeros in n dimensions.
153159
154160
Notes
155161
-----
@@ -168,22 +174,24 @@ def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
168174
169175
When `newton` is used with arrays, it is best suited for the following
170176
types of problems:
171-
* the initial guesses, `x0`, are all relatively the same distance from the
172-
roots
173-
* some or all of the extra arguments, `args`, are also arrays so that a
174-
class of similar problems can be solved together
175-
* the size of the initial guesses, `x0`, is larger than 100 elements
176-
Otherwise, a naive loop may perform better than a vector.
177+
178+
* The initial guesses, `x0`, are all relatively the same distance from
179+
the roots.
180+
* Some or all of the extra arguments, `args`, are also arrays so that a
181+
class of similar problems can be solved together.
182+
* The size of the initial guesses, `x0`, is larger than O(100) elements.
183+
Otherwise, a naive loop may perform as well or better than a vector.
177184
178185
Examples
179186
--------
180187
181188
>>> from scipy import optimize
189+
>>> import matplotlib.pyplot as plt
182190
183191
>>> def f(x):
184192
... return (x**3 - 1) # only one real root at x = 1
185193
186-
``fprime`` not provided, use secant method
194+
``fprime`` is not provided, use the secant method:
187195
188196
>>> root = optimize.newton(f, 1.5)
189197
>>> root
@@ -192,31 +200,46 @@ class of similar problems can be solved together
192200
>>> root
193201
1.0000000000000016
194202
195-
Only ``fprime`` provided, use Newton Raphson method
203+
Only ``fprime`` is provided, use the Newton-Raphson method:
196204
197205
>>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2)
198206
>>> root
199207
1.0
200208
201-
Both ``fprime2`` and ``fprime`` provided, use Halley's method
209+
Both ``fprime2`` and ``fprime`` are provided, use Halley's method:
202210
203211
>>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2,
204212
... fprime2=lambda x: 6 * x)
205213
>>> root
206214
1.0
207215
208-
A sequence of initial guesses and arguments is provided, use vectorized
209-
210-
>>> import numpy as np
216+
When we want to find zeros for a set of related starting values and/or
217+
function parameters, we can provide both of those as an array of inputs:
211218
212219
>>> f = lambda x, a: x**3 - a
213220
>>> fder = lambda x, a: 3 * x**2
214221
>>> x = np.random.randn(100)
215-
>>> a = range(-50, 50)
222+
>>> a = np.arange(-50, 50)
216223
>>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, ))
217224
225+
The above is the equivalent of solving for each value in ``(x, a)``
226+
separately in a for-loop, just faster:
227+
218228
>>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,))
219229
... for x0, a0 in zip(x, a)]
230+
>>> np.allclose(vec_res, loop_res)
231+
True
232+
233+
Plot the results found for all values of ``a``:
234+
235+
>>> analytical_result = np.sign(a) * np.abs(a)**(1/3)
236+
>>> fig = plt.figure()
237+
>>> ax = fig.add_subplot(111)
238+
>>> ax.plot(a, analytical_result, 'o')
239+
>>> ax.plot(a, vec_res, '.')
240+
>>> ax.set_xlabel('$a$')
241+
>>> ax.set_ylabel('$x$ where $f(x, a)=0$')
242+
>>> plt.show()
220243
221244
"""
222245
if tol <= 0:
@@ -226,8 +249,8 @@ class of similar problems can be solved together
226249
if not np.isscalar(x0):
227250
return _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,
228251
converged)
229-
# Multiply by 1.0 to convert to floating point. We don't use float(x0)
230-
# so it still works if x0 is complex.
252+
253+
# Convert to float (don't use float(x0); this works also for complex x0)
231254
p0 = 1.0 * x0
232255
funcalls = 0
233256
if fprime is not None:
@@ -283,6 +306,7 @@ class of similar problems can be solved together
283306
p1 = p
284307
q1 = func(p1, *args)
285308
funcalls += 1
309+
286310
if disp:
287311
msg = "Failed to converge after %d iterations, value is %s" % (itr + 1, p)
288312
raise RuntimeError(msg)
@@ -293,16 +317,19 @@ class of similar problems can be solved together
293317
def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,
294318
converged=False):
295319
"""
296-
A vectorized version of Newton, Halley, and secant methods for arrays. Do
297-
not use this method directly. This method is called from :func:`newton`
298-
when ``np.isscalar(x0)`` is true. For docstring, see :func:`newton`.
320+
A vectorized version of Newton, Halley, and secant methods for arrays.
321+
322+
Do not use this method directly. This method is called from `newton`
323+
when ``np.isscalar(x0)`` is True. For docstring, see `newton`.
299324
"""
300325
try:
301326
p = np.asarray(x0, dtype=float)
302-
except TypeError: # can't convert complex to float
327+
except TypeError:
328+
# can't convert complex to float
303329
p = np.asarray(x0)
304-
failures = np.ones_like(p, dtype=bool) # at start, nothing converged
305-
nz_der = np.copy(failures)
330+
331+
failures = np.ones_like(p, dtype=bool)
332+
nz_der = np.ones_like(failures)
306333
if fprime is not None:
307334
# Newton-Raphson method
308335
for iteration in range(maxiter):
@@ -355,9 +382,10 @@ def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,
355382
p1, p = p, p1
356383
q0 = q1
357384
q1 = np.asarray(func(p1, *args))
385+
358386
zero_der = ~nz_der & failures # don't include converged with zero-ders
359387
if zero_der.any():
360-
# secant warnings
388+
# Secant warnings
361389
if fprime is None:
362390
nonzero_dp = (p1 != p)
363391
# non-zero dp, but infinite newton step
@@ -367,7 +395,7 @@ def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,
367395
sum((p1[zero_der_nz_dp] - p[zero_der_nz_dp]) ** 2)
368396
)
369397
warnings.warn('RMS of {:g} reached'.format(rms), RuntimeWarning)
370-
# netwon or halley warnings
398+
# Newton or Halley warnings
371399
else:
372400
all_or_some = 'all' if zero_der.all() else 'some'
373401
msg = '{:s} derivatives were zero'.format(all_or_some)
@@ -380,9 +408,11 @@ def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,
380408
if failures.all():
381409
raise RuntimeError(msg)
382410
warnings.warn(msg, RuntimeWarning)
411+
383412
if converged:
384413
result = namedtuple('result', ('root', 'converged', 'zero_der'))
385414
p = result(p, ~failures, zero_der)
415+
386416
return p
387417

388418

0 commit comments

Comments
 (0)