23
23
24
24
class RootResults (object ):
25
25
""" Represents the root finding result.
26
+
26
27
Attributes
27
28
----------
28
29
root : float
@@ -35,6 +36,7 @@ class RootResults(object):
35
36
True if the routine converged.
36
37
flag : str
37
38
Description of the cause of termination.
39
+
38
40
"""
39
41
def __init__ (self , root , iterations , function_calls , flag ):
40
42
self .root = root
@@ -79,7 +81,7 @@ def _results_select(full_output, r):
79
81
80
82
81
83
def newton (func , x0 , fprime = None , args = (), tol = 1.48e-8 , maxiter = 50 ,
82
- fprime2 = None , full_output = False , disp = True , converged = None ):
84
+ fprime2 = None , full_output = False , disp = True , converged = False ):
83
85
"""
84
86
Find a zero using the Newton-Raphson or secant method.
85
87
@@ -90,7 +92,7 @@ def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
90
92
used.
91
93
92
94
If `x0` is a sequence, then `newton` returns an array, and `func` must be
93
- vectorized and return a sequence or array of the same shape as it's first
95
+ vectorized and return a sequence or array of the same shape as its first
94
96
argument. If an optional argument, `converged`, is `True`, then the
95
97
return is a named tuple `(root, converged, zero_der)` in which `root` is an
96
98
array of the locations where `func` is zero, `converged` is an array, same
@@ -102,15 +104,15 @@ def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
102
104
103
105
Parameters
104
106
----------
105
- func : function
107
+ func : callable
106
108
The function whose zero is wanted. It must be a function of a
107
109
single variable of the form f(x,a,b,c...), where a,b,c... are extra
108
110
arguments that can be passed in the `args` parameter.
109
- x0 : float, sequence, or array
111
+ x0 : float, sequence or ndarray
110
112
An initial estimate of the zero that should be somewhere near the
111
113
actual zero. If not scalar, then `func` must be vectorized and return
112
- a sequence or array of the same shape as it's first argument.
113
- fprime : function , optional
114
+ a sequence or array of the same shape as its first argument.
115
+ fprime : callable , optional
114
116
The derivative of the function when available and convenient. If it
115
117
is None (default), then the secant method is used.
116
118
args : tuple, optional
@@ -119,37 +121,41 @@ def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
119
121
The allowable error of the zero value.
120
122
maxiter : int, optional
121
123
Maximum number of iterations.
122
- fprime2 : function , optional
124
+ fprime2 : callable , optional
123
125
The second order derivative of the function when available and
124
126
convenient. If it is None (default), then the normal Newton-Raphson
125
127
or the secant method is used. If it is not None, then Halley's method
126
128
is used.
127
129
full_output : bool, optional
128
- If `full_output` is False, the root is returned. If `full_output` is
129
- True, the return value is ``(x, r)``, where `x` is the root, and `r` is
130
- a RootResults object.
130
+ If `full_output` is False (default) , the root is returned.
131
+ If True, the return value is ``(x, r)``, where ``x`` is the root, and
132
+ ``r`` is a ` RootResults` object.
131
133
disp : bool, optional
132
- If True, display a RuntimeError if the algorithm didn't converge.
133
- converged : boolean, optional
134
- Only used if `x0` is a sequence. If `True` then two extras boolean
135
- arrays of converged and zero derivatives are appended to the return.
134
+ If True, raise a RuntimeError if the algorithm didn't converge, with
135
+ the error message containing the number of iterations and current
136
+ function value. *Note: this has little to do with displaying, however
137
+ the `disp` keyword cannot be renamed for backwards compatibility.*
138
+ converged : bool, optional
139
+ Only used if `x0` is a sequence. If True, two extras boolean arrays of
140
+ converged and zero derivatives are appended to the return.
136
141
137
142
Returns
138
143
-------
139
- root : float, sequence, or array
144
+ root : float, sequence, or ndarray
140
145
Estimated location where function is zero.
141
- r : RootResults (present if ``full_output = True``)
142
- Object containing information about the convergence. In particular,
143
- ``r.converged`` is True if the routine converged.
144
- converged : boolean array, optional
146
+ r : RootResults
147
+ Present if ``full_output=True``. Object containing information about
148
+ the convergence. In particular, ``r.converged`` is True if the routine
149
+ converged.
150
+ converged : ndarray of bool, optional
145
151
For vector functions, indicates which elements converged successfully.
146
- zero_der : boolean array , optional
152
+ zero_der : ndarray of bool , optional
147
153
For vector functions, indicates which elements had a zero derivative.
148
154
149
155
See Also
150
156
--------
151
157
brentq, brenth, ridder, bisect
152
- fsolve : find zeroes in n dimensions.
158
+ fsolve : find zeros in n dimensions.
153
159
154
160
Notes
155
161
-----
@@ -168,22 +174,24 @@ def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
168
174
169
175
When `newton` is used with arrays, it is best suited for the following
170
176
types of problems:
171
- * the initial guesses, `x0`, are all relatively the same distance from the
172
- roots
173
- * some or all of the extra arguments, `args`, are also arrays so that a
174
- class of similar problems can be solved together
175
- * the size of the initial guesses, `x0`, is larger than 100 elements
176
- Otherwise, a naive loop may perform better than a vector.
177
+
178
+ * The initial guesses, `x0`, are all relatively the same distance from
179
+ the roots.
180
+ * Some or all of the extra arguments, `args`, are also arrays so that a
181
+ class of similar problems can be solved together.
182
+ * The size of the initial guesses, `x0`, is larger than O(100) elements.
183
+ Otherwise, a naive loop may perform as well or better than a vector.
177
184
178
185
Examples
179
186
--------
180
187
181
188
>>> from scipy import optimize
189
+ >>> import matplotlib.pyplot as plt
182
190
183
191
>>> def f(x):
184
192
... return (x**3 - 1) # only one real root at x = 1
185
193
186
- ``fprime`` not provided, use secant method
194
+ ``fprime`` is not provided, use the secant method:
187
195
188
196
>>> root = optimize.newton(f, 1.5)
189
197
>>> root
@@ -192,31 +200,46 @@ class of similar problems can be solved together
192
200
>>> root
193
201
1.0000000000000016
194
202
195
- Only ``fprime`` provided, use Newton Raphson method
203
+ Only ``fprime`` is provided, use the Newton- Raphson method:
196
204
197
205
>>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2)
198
206
>>> root
199
207
1.0
200
208
201
- Both ``fprime2`` and ``fprime`` provided, use Halley's method
209
+ Both ``fprime2`` and ``fprime`` are provided, use Halley's method:
202
210
203
211
>>> root = optimize.newton(f, 1.5, fprime=lambda x: 3 * x**2,
204
212
... fprime2=lambda x: 6 * x)
205
213
>>> root
206
214
1.0
207
215
208
- A sequence of initial guesses and arguments is provided, use vectorized
209
-
210
- >>> import numpy as np
216
+ When we want to find zeros for a set of related starting values and/or
217
+ function parameters, we can provide both of those as an array of inputs:
211
218
212
219
>>> f = lambda x, a: x**3 - a
213
220
>>> fder = lambda x, a: 3 * x**2
214
221
>>> x = np.random.randn(100)
215
- >>> a = range (-50, 50)
222
+ >>> a = np.arange (-50, 50)
216
223
>>> vec_res = optimize.newton(f, x, fprime=fder, args=(a, ))
217
224
225
+ The above is the equivalent of solving for each value in ``(x, a)``
226
+ separately in a for-loop, just faster:
227
+
218
228
>>> loop_res = [optimize.newton(f, x0, fprime=fder, args=(a0,))
219
229
... for x0, a0 in zip(x, a)]
230
+ >>> np.allclose(vec_res, loop_res)
231
+ True
232
+
233
+ Plot the results found for all values of ``a``:
234
+
235
+ >>> analytical_result = np.sign(a) * np.abs(a)**(1/3)
236
+ >>> fig = plt.figure()
237
+ >>> ax = fig.add_subplot(111)
238
+ >>> ax.plot(a, analytical_result, 'o')
239
+ >>> ax.plot(a, vec_res, '.')
240
+ >>> ax.set_xlabel('$a$')
241
+ >>> ax.set_ylabel('$x$ where $f(x, a)=0$')
242
+ >>> plt.show()
220
243
221
244
"""
222
245
if tol <= 0 :
@@ -226,8 +249,8 @@ class of similar problems can be solved together
226
249
if not np .isscalar (x0 ):
227
250
return _array_newton (func , x0 , fprime , args , tol , maxiter , fprime2 ,
228
251
converged )
229
- # Multiply by 1.0 to convert to floating point. We don't use float(x0)
230
- # so it still works if x0 is complex.
252
+
253
+ # Convert to float (don't use float(x0); this works also for complex x0)
231
254
p0 = 1.0 * x0
232
255
funcalls = 0
233
256
if fprime is not None :
@@ -283,6 +306,7 @@ class of similar problems can be solved together
283
306
p1 = p
284
307
q1 = func (p1 , * args )
285
308
funcalls += 1
309
+
286
310
if disp :
287
311
msg = "Failed to converge after %d iterations, value is %s" % (itr + 1 , p )
288
312
raise RuntimeError (msg )
@@ -293,16 +317,19 @@ class of similar problems can be solved together
293
317
def _array_newton (func , x0 , fprime , args , tol , maxiter , fprime2 ,
294
318
converged = False ):
295
319
"""
296
- A vectorized version of Newton, Halley, and secant methods for arrays. Do
297
- not use this method directly. This method is called from :func:`newton`
298
- when ``np.isscalar(x0)`` is true. For docstring, see :func:`newton`.
320
+ A vectorized version of Newton, Halley, and secant methods for arrays.
321
+
322
+ Do not use this method directly. This method is called from `newton`
323
+ when ``np.isscalar(x0)`` is True. For docstring, see `newton`.
299
324
"""
300
325
try :
301
326
p = np .asarray (x0 , dtype = float )
302
- except TypeError : # can't convert complex to float
327
+ except TypeError :
328
+ # can't convert complex to float
303
329
p = np .asarray (x0 )
304
- failures = np .ones_like (p , dtype = bool ) # at start, nothing converged
305
- nz_der = np .copy (failures )
330
+
331
+ failures = np .ones_like (p , dtype = bool )
332
+ nz_der = np .ones_like (failures )
306
333
if fprime is not None :
307
334
# Newton-Raphson method
308
335
for iteration in range (maxiter ):
@@ -355,9 +382,10 @@ def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,
355
382
p1 , p = p , p1
356
383
q0 = q1
357
384
q1 = np .asarray (func (p1 , * args ))
385
+
358
386
zero_der = ~ nz_der & failures # don't include converged with zero-ders
359
387
if zero_der .any ():
360
- # secant warnings
388
+ # Secant warnings
361
389
if fprime is None :
362
390
nonzero_dp = (p1 != p )
363
391
# non-zero dp, but infinite newton step
@@ -367,7 +395,7 @@ def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,
367
395
sum ((p1 [zero_der_nz_dp ] - p [zero_der_nz_dp ]) ** 2 )
368
396
)
369
397
warnings .warn ('RMS of {:g} reached' .format (rms ), RuntimeWarning )
370
- # netwon or halley warnings
398
+ # Newton or Halley warnings
371
399
else :
372
400
all_or_some = 'all' if zero_der .all () else 'some'
373
401
msg = '{:s} derivatives were zero' .format (all_or_some )
@@ -380,9 +408,11 @@ def _array_newton(func, x0, fprime, args, tol, maxiter, fprime2,
380
408
if failures .all ():
381
409
raise RuntimeError (msg )
382
410
warnings .warn (msg , RuntimeWarning )
411
+
383
412
if converged :
384
413
result = namedtuple ('result' , ('root' , 'converged' , 'zero_der' ))
385
414
p = result (p , ~ failures , zero_der )
415
+
386
416
return p
387
417
388
418
0 commit comments