# Chapter 6 : Open Methods¶

## Ex6.1: Pg: 143¶

In [2]:
from math import exp
#f(x) = exp(-x) - x#
#using simple fixed point iteration, Xi+1 = exp(-Xi)
x = 0##initial guess
y=[]
e=[]
y.append(0)
e.append(0)
for i in range(1,12):
if i == 1 :
y.append(x)
else:
y.append(exp(-y[(i-1)]))
e.append((y[(i)] - y[(i-1)]) * 100 / y[(i)])

print "x = :"
for x in y[1:]:
print x
print "\ne = :"
for e in e[1:]:
print e

x = :
0
1.0
0.367879441171
0.692200627555
0.500473500564
0.606243535086
0.545395785975
0.579612335503
0.560115461361
0.57114311508
0.564879347391

e = :
100.0
-171.828182846
46.8536394613
-38.3091465933
17.4467896812
-11.1566225254
5.90335081441
-3.48086697962
1.93080393126
-1.10886824205


## Ex6.2: Pg: 144¶

In [3]:
from math import exp
%matplotlib inline
from matplotlib.pyplot import plot,title,xlabel,ylabel,show
#y1 = x
#y2 = exp(-x)
x=[]
y1=[]
y2=[]
for i in range(0,6):
if i == 0:
x.append(0)
else:
x.append(x[(i-1)] + 0.2)

y1.append(x[(i)])
y2.append(exp(-x[(i)]))

print "x = ",x
print "y1 = ",y1
print "y2 = ",y2
plot(x,y1)
plot(x,y2)#
title("f(x) vs x")
xlabel("x")
y=("f(x)")
# from the graph, we get
x7 = 5.7#
print  "answer using two curve graphical method =  ",x7

x =  [0, 0.2, 0.4, 0.6000000000000001, 0.8, 1.0]
y1 =  [0, 0.2, 0.4, 0.6000000000000001, 0.8, 1.0]
y2 =  [1.0, 0.8187307530779818, 0.6703200460356393, 0.5488116360940264, 0.44932896411722156, 0.36787944117144233]
answer using two curve graphical method =   5.7


## Ex6.3: Pg: 149¶

In [4]:
from math import exp
#f(x) = exp(-x)-x
#f'(x) = -exp(-x)-1
x=[]
et=[]
for i in range(0,5):
if i == 0:
x.append(0)
else:
x.append(x[(i-1)] - (exp(-x[(i-1)])-x[(i-1)])/(-exp(-x[(i-1)])-1))
et.append((x[(i)] - x[(i-1)]) * 100 / x[(i)])
x[(i-1)] = x[(i)]

print "x =",x
print "et =",et

x = [0.5, 0.5663110031972182, 0.5671431650348622, 0.5671432904097811, 0.5671432904097811]
et = [100.0, 11.709290976662398, 0.14672870783743905, 2.2106391984397626e-05]


## Ex6.4: Pg: 150¶

In [5]:
from math import exp
#f(x) = exp(-x) - x
#f'(x) = -exp(-x) - 1
#f"(x) = exp(-x)
xr = 0.56714329#
#E(ti+1) = -f"(x)* E(ti) / 2 * f'(x)
Et0 = 0.56714329#
Et1 = -exp(-xr)* ((Et0)**2) / (2 * (-exp(-xr) - 1))#
print "Et1 = ",Et1,"which is close to the true error of 0.06714329"
Et1true = 0.06714329#
Et2 =  -exp(-xr)* ((Et1true)**2) / (2 * (-exp(-xr) - 1))#
print "Et2 = ",Et2,"which is close to the true error of 0.0008323"
Et2true = 0.0008323#
Et3 =  -exp(-xr)* ((Et2true)**2) / (2 * (-exp(-xr) - 1))#
print "Et3 = ",Et3,"which is close to the true error of 0.0008323"
Et4 =  -exp(-xr)* ((Et3)**2) / (2 * (-exp(-xr) - 1))#
print "Et4 = ",Et4,"which is close to the true error of 0.0008323"
print "Thus it illustratres that the error of newton raphson method for this case is proportional(by a factor of 0.18095) to the square of the error of the previous iteration"


Et1 =  0.0582022389721 which is close to the true error of 0.06714329
Et2 =  0.000815754223141 which is close to the true error of 0.0008323
Et3 =  1.253469828e-07 which is close to the true error of 0.0008323
Et4 =  2.84303276339e-15 which is close to the true error of 0.0008323
Thus it illustratres that the error of newton raphson method for this case is proportional(by a factor of 0.18095) to the square of the error of the previous iteration


## Ex6.5: Pg: 151¶

In [6]:
z = 0.5#
#f(x) = x**10 - 1
#f'(x) = 10*x**9
y=[]
for i in range(0,40):
if i==0:
y.append(z)
else:
y.append(y[(i-1)] - (y[(i-1)]**10 - 1)/(10*y[(i-1)]**9))

print "y ="
for yy in y:
print yy
print "Thus, after the first poor prediction, the technique is converging on to the true root of 1 but at a very slow rate"

y =
0.5
51.65
46.485
41.8365
37.65285
33.887565
30.4988085
27.44892765
24.704034885
22.2336313965
20.0102682569
18.0092414312
16.208317288
14.5874855592
13.1287370033
11.815863303
10.6342769727
9.57084927551
8.61376434811
7.75238791368
6.9771491233
6.27943421352
5.65149079876
5.08634173588
4.57770760618
4.11993695885
3.70794355537
3.33714995458
3.00343690726
2.70309824497
2.43280139954
2.18955475922
1.97068573981
1.7738402371
1.59703134797
1.43880793143
1.29871134273
1.17835471562
1.08334975351
1.02366466118
Thus, after the first poor prediction, the technique is converging on to the true root of 1 but at a very slow rate


## Ex6.6: Pg: 155¶

In [7]:
from math import exp
#f(x) = exp(-x)-x
x=[]
er=[]
for i in range(0,5):
if i==0:
x.append(0)
else:
if i==1:
x.append(1)
else:
x.append(x[(i-1)] - (exp(-x[(i-1)])-x[(i-1)])*(x[(i-2)] - x[(i-1)])/((exp(-x[(i-2)])-x[(i-2)])-(exp(-x[(i-1)])-x[(i-1)])))
er.append((0.56714329 - x[(i)]) * 100 / 0.56714329)

print "x ="
for xx in x:
print xx

print "\net ="
for xx in er:
print xx

x =
0
1
0.61269983678
0.563838389161
0.56717035842

et =
-8.03263435953
0.582727662867
-0.00477276558181


## Ex6.7: Pg: 156¶

In [8]:
from math import log
#f(x) = log(x)
print "secant method"
x=[]
for i in range(0,4):
if i==0:
x.append(0.5)
else:
if i==1:
x.append(5)
else:
x.append(x[(i-1)] - log(x[(i-1)]) * (x[(i-2)] - x[(i-1)])/(log(x[(i-2)]) - log(x[(i-1)])))

print "x ="
for xx in x:
print x
print "thus, secant method is divergent"
print "Now, False position method"
xl = 0.5#
xu = 5#
for i in range(0,3):
m = log(xl)#
n = log(xu)#
xr = xu - n*(xl - xu)/(m - n)#
print "xr = ",xr
w = log(xr)#
if m*w < 0:
xu = xr#
else:
xl = xr#

print "thus, false position method is convergent"

secant method
x =
[0.5, 5, 1.8546349804879152, -0.1043807923822424]
[0.5, 5, 1.8546349804879152, -0.1043807923822424]
[0.5, 5, 1.8546349804879152, -0.1043807923822424]
[0.5, 5, 1.8546349804879152, -0.1043807923822424]
thus, secant method is divergent
Now, False position method
xr =  1.85463498049
xr =  1.21630781847
xr =  1.05852096245
thus, false position method is convergent


## Ex6.8: Pg: 158¶

In [9]:
from math import exp
Del = 0.01#
z = 0.56714329
x1 = 1#
#f(x) = exp(-x) - x
x=[]
print "x1 = ",x1
for i in range(0,4):
if i == 0:
x.append(1)
else :
w = x[(i-1)]
m = exp(-x[(i-1)]) - x[(i-1)]
x[(i-1)] = x[(i-1)]*(1+Del)#
n = exp(-x[(i-1)]) - x[(i-1)]#
x.append(w - (x[(i-1)]- w) * m/(n-m))
em = (z - x[(i)])*100/z#
print "x = ",x[(i)]
print "error = ",em,"%"


x1 =  1
x =  0.537262665537
error =  5.26861993966 %
x =  0.567009685365
error =  0.0235574743537 %
x =  0.567143424147
error =  -2.3653190891e-05 %


## Ex6.9: Pg: 161¶

In [10]:
from __future__ import division
#f(x) = x**3 - 5*x**2 + 7*x -3
#f'(x) = 3*x**2 - 10*x + 7
print "standard Newton Raphson method"
x=[]
et=[]
for i in range(0,7):
if i == 0:
x.append(0)
else:
x.append(x[(i-1)] - ((x[(i-1)])**3 - 5*(x[(i-1)])**2 + 7*x[(i-1)] -3)/(3*(x[(i-1)])**2 - 10*(x[(i-1)]) + 7))
et.append((1 - x[(i)]) * 100 / 1)
print "x = ",x[i]
print "error = ",et[(i-1)],"%"
x[(i-1)] = x[(i)]

print "Modified Newton Raphson method"
#f"(x) = 6*x - 10
x=[]
et=[]
for i in range(0,4):
if i == 0:
x.append(0)
else:
x.append(x[(i-1) ]- ((x[(i-1)])**3 - 5*(x[(i-1)])**2 + 7*x[(i-1)] -3)*((3*(x[(i-1)])**2 - 10*(x[(i-1)]) + 7))/((3*(x[(i-1)])**2 - 10*(x[(i-1)]) + 7)**2 - ((x[(i-1)])**3 - 5*(x[(i-1)])**2 + 7*x[(i-1)] -3) * (6*x[(i-1)] - 10)))
et.append((1 - x[(i)]) * 100 / 1)
print "x = ",x[i]
print "error = ",et[i-1],'%'
x[(i-1) ]= x[(i)]


standard Newton Raphson method
x =  0.428571428571
error =  57.1428571429 %
x =  0.685714285714
error =  31.4285714286 %
x =  0.832865400495
error =  16.7134599505 %
x =  0.913329893257
error =  8.66701067434 %
x =  0.955783292966
error =  4.42167070343 %
x =  0.977655101273
error =  2.23448987271 %
Modified Newton Raphson method
x =  1.10526315789
error =  -10.5263157895 %
x =  1.0030816641
error =  -0.30816640986 %
x =  1.00000238149
error =  -0.000238149381548 %


## Ex6.10: Pg: 165¶

In [11]:
from __future__ import division
from math import sqrt
#u(x,y) = x**2 + x*y - 10
#v(x,y) = y + 3*x*y**2 -57
x=[]
y=[]
for i in range(0,4):
if i == 0:
x.append(1.5)
y.append(3.5)
else:
x.append(sqrt(10 - (x[(i-1)])*(y[(i-1)])))
y.append(sqrt((57 - y[(i-1)])/(3*x[(i)])))
print "x =",x[(i)]
print "y =",y[i]

print "Thus the approaching to the true value 0f x = 2 and y = 3"

x = 2.17944947177
y = 2.86050598812
x = 1.94053387891
y = 3.04955067322
x = 2.02045628588
y = 2.98340474674
Thus the approaching to the true value 0f x = 2 and y = 3


## Ex6.11:Pg 168¶

In [12]:
from __future__ import division
from numpy import mat
from numpy.linalg import det
def u(x,y):
z=x**2+x*y-10
return z
def v(x,y):
z=y+3*x*y**2-57
return z
x=1.5#
y=3.5#
e=[100,100]#
while  e[0]>0.0001 and e[1]>0.0001:
J=mat([[2*x+y, x],[3*y**2, 1+6*x*y]])
deter=det(J)#
u1=u(x,y)#
v1=v(x,y)#
x=x-((u1*J[1,1]-v1*J[0,1])/deter)#
y=y-((v1*J[0,0]-u1*J[1,0])/deter)#
e[(0)]=abs(2-x)#
e[(1)]=abs(3-y)#

bracket=[x ,y]#
print "bracket:",bracket

bracket: [1.9999999838762603, 2.9999994133889132]