%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
x = 0
while(1==1): # an infinite loop
x+=1
#print(x)
if (x>100):
break
print(x)
mysum=0
for i in range(5):
print(mysum)
mysum+=1
if (mysum%2==0): #my sum is even?
continue
mysum += 2 # this line is only reached if mysum is odd
# define a function of variable x and parameter mu
def f(x,mu):
return mu + x**3 #function defined here
# mu is argument to function f
# x0 is initial condition
# nint is the number of iterations
# return 2 vectors suitable for making a cobweb plot
def cobweb(mu,f,x0,nint):
x = x0
xvec = np.zeros(0)
yvec = np.zeros(0)
for i in range(nint): #numbers of iteractions
y = f(x,mu)
xvec = np.append(xvec,x) # add point (x,f(x))
yvec = np.append(yvec,y)
xvec = np.append(xvec,y) # add point (f(x),f(x)) on y=x line
yvec = np.append(yvec,y)
x = y #to iterate!
return xvec, yvec # return coordinates
#############cobweb(mu,f,x0,nint)
xvec, yvec = cobweb(0.43,f,-0.8,10)
print(xvec)
print(yvec)
xv = np.linspace(-1,1.3,101)
yv = f(xv,0.43) #the function with my=0.43
plt.plot(xv,yv,'k-')# plot the function
plt.plot(xv,xv,'b-') # plot y=x
plt.plot(xvec,yvec,'r-') # plot the cobweb
plt.plot(xvec[0],yvec[0],'ro') # initial condition
# showing a period 2 orbit of a different function
def g(x,mu):
return mu*np.sin(np.pi*x)
mu=0.81
xvec, yvec = cobweb(mu,g,0.3,30)
xv = np.linspace(0,1.0,101)
yv = g(xv,mu)
plt.plot(xv,yv,'k-')
plt.plot(xv,xv,'b-')
plt.plot(xvec,yvec,'r-')
plt.plot(xvec[0],yvec[0],'ro')
showing a period 2 orbit
How many digits are important?
for i in range(1,21):
print(i, 1 + 10**-i)
A double precission float devotes a certain number of bits to the number and a certain number of bits to the exponent. The bit format is defined by the IEEE.
The accuracy of double precision numbers is $\sim 10^{-14}$ due to number of bits given to the number itself (not the exponent).
x=1e-13
y=1.0
z1 = x+y
print(z1)
x = 1.005e-13
z2 = x+y
print(z2)
print(z2-z1) # the answer should be 5e-16
To improve precission add all the smallest things together first. Then add the larger things.
Double precision floating point gives you and error of about $10^{-16}$ in the digits because only about 16 digits are stored in double precision format.
That is to say a number like 1 is actually $1 \pm 10^{-16}$.
A number like $10^{-14}$ is actually $ = 10^{-14} (1 \pm 10^{-16}) =10^{-14} \pm 10^{-30}$
Suppose we add $1 + \sqrt{2} \times 10^{-14}$? This is actually $$ 1 \pm 10^{16} + \sqrt{2} \times 10^{-14} ( 1 \pm 10^{-16} ) \sim 1 + \sqrt{2} \times 10^{-14}\pm 10^{16} $$ Let $z$ be this number and now let us subtract 1 from it. $$z - 1 \sim \sqrt{2} \times 10^{-14} \pm 10^{-16}$$
The error as a fraction is now $$\frac{{\rm err} (1-z)}{1-z} \sim \frac{10^{-16}}{10^{-14}} \sim 10^{-2} $$ or we have an error of 1\% which is pretty big.
You may need more precision than double precission floating point.
Your code will run slower if you use higher precision. This is because more computations are necessary. But also because chips dedicated to accelerating computing floating point operations are common and if you use higher precession then the computations will not be done on these accelerators.
from decimal import *
print("digits of precision ",getcontext().prec)
getcontext().prec = 28 # you can set the precision to whatever you want
x = Decimal(1.0)
y = Decimal(2.0)
print(x,y)
y.sqrt()
z = x + Decimal(1)/Decimal(1000)
print(z)
small_14 = Decimal((0,(1,),-14))
small_14
Decimal.from_float(0.1)
Not all arithmetic or numpy operations work on Decimal types.