Python Code#
The Python code used in this book is given here for reference.
Importing libraries#
The following code is used to import the libraries that we will use here.
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
plt.rcParams['text.usetex'] = True # allows use of LaTeX in labels
NumPy (pronounced ‘num-pie’) for performing numerical calculations
SymPy (pronounced ‘sim-pie’) for performing symbolic calculations
matplotlib for plotting solutions
Initial value problem solver#
The solveIVP()
function is used to solve an initial value problem of the form \(\vec{y}' = f(t, \vec{y})\), \(t \in [t_{\min}, t_{\max}]\) and \(\vec{y}_0 = \vec{\alpha}\) using a single step method. The functions return arrays containing the values of \(\vec{t}\) and \(\vec{y}\).
def solveIVP(f, tspan, y0, h, solver):
# Initialise t and y arrays
t = np.arange(tspan[0], tspan[1] + h, h)
y = np.zeros((len(t),len(y0)))
t[0] = tspan[0]
y[0,:] = y0
# Loop through steps and calculate single step solver solution
for n in range(len(t) - 1):
y[n+1,:] = solver(f, t[n], y[n,:], h)
return t, y
Explicit Methods#
The Euler method#
def euler(f, t, y, h):
return y + h * f(t, y)
Heun’s method#
def heun(f, t, y, h):
k1 = f(t, y)
k2 = f(t + h, y + h * k1)
return y + h/2 * (k1 + k2)
The RK4 method#
def rk4(f, t, y, h):
k1 = f(t, y)
k2 = f(t + 0.5 * h, y + 0.5 * h * k1)
k3 = f(t + 0.5 * h, y + 0.5 * h * k2)
k4 = f(t + h, y + h * k3)
return y + h / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
Adaptive step size control#
The solveIVP_SSC()
function is used to solve an initial value problem of the form \(\vec{y}' = f(t, \vec{y})\), \(t \in [t_{\min}, t_{\max}]\) and \(\vec{y}_0 = \vec{\alpha}\) using a single step method with adaptive step size control. The functions return arrays containing the values of \(\vec{t}\) and \(\vec{y}\).
def solveIVP_SSC(f, tspan, y0, h, solver, tol=1e-4):
# Define t and y arrays
t = np.zeros(10000)
y = np.zeros((10000,len(y0)))
t[0] = tspan[0]
y[0,:] = y0
# Solver loop
n = 0
while t[n] < tspan[-1]:
yp1, yp, order = solver(f, t[n], y[n,:], h)
delta = np.max(np.abs(yp1 - yp))
# Determine whether step was successful or not
if delta < tol:
y[n+1,:] = yp1
t[n+1] = t[n] + h
n += 1
# Calculate new value of h (making sure not to exceed tmax)
h *= max(0.5, min(2, 0.9 * (tol / delta) ** (1 / (order + 1))))
h = min(h, tspan[-1] - t[n])
return t[:n+1], y[:n+1,:]
Fehlberg’s RKF4(5) method#
def rkf45(f, t, y, h):
k1 = f(t, y)
k2 = f(t + 1/4 * h, y + 1/4 * h * k1)
k3 = f(t + 3/8 * h, y + h * (3/32 * k1 + 9/32 * k2))
k4 = f(t + 12/13 * h, y + h * (1932/2197 * k1 - 7200/2197 * k2 + 7296/2197 * k3))
k5 = f(t + h, y + h * (439/216 * k1 - 8 * k2 + 3680/513 * k3 - 845/4104 * k4))
k6 = f(t + 1/2 * h, y + h * (-8/27 * k1 + 2 * k2 - 3544/2565 * k3 + 1859/4104 * k4 - 11/40 * k5))
y5 = y + h * (16/135 * k1 + 6656/12825 * k3 + 28561/56430 * k4 - 9/50 * k5 + 2/55 * k6)
y4 = y + h * (25/216 * k1 + 1408/2565 * k3 + 2197/4104 * k4 - 1/5 * k5)
p, s = 4, 6
return y5, y4, p, s
Defining and solving an IVP#
The following code defines the following ODE and uses the solveIVP()
and euler()
functions with a step length of \(h=0.2\) to compute the solution
def f(t, y):
return t * y
# Define IVP parameters
tspan = [0, 1] # boundaries of the t domain
y0 = [1] # solution at the lower boundary
h = 0.2 # step length
# Calculate the solution to the IVP
t, y = solveIVP(f, tspan, y0, h, euler)
Plotting the solution#
The following code uses matplotlib functions to plot the solution.
fig, ax = plt.subplots(figsize=(8, 6))
plt.plot(t, y, "bo-", label="Euler")
plt.xlabel("$t$", fontsize=14)
plt.ylabel("$y$", fontsize=14)
plt.legend(fontsize=12)
plt.show()
Implicit Runge-Kutta Methods#
Third-order Radau IA IRK method#
def radauIA(f, t, y, h):
# Calculate stage values
Y1, Y2 = np.ones(len(y0)), np.ones(len(y0))
Y1old, Y2old = np.ones(len(y0)), np.ones(len(y0))
for k in range(10):
Y1 = y + h * (1/4 * f(t, Y1) - 1/4 * f(t + 2/3 * h, Y2))
Y2 = y + h * (1/4 * f(t, Y1) + 5/12 * f(t + 2/3 * h, Y2))
if max(np.amax(abs(Y1 - Y1old)), np.amax(abs(Y2 - Y2old))) < 1e-4:
break
Y1old, Y2old = Y1, Y2
return y + h / 4 * (f(t, Y1) + 3 * f(t + 2/3 * h,Y2))
Using SymPy to solve order conditions#
The following code uses SymPy commands to solve the following order conditions where \(c_2 = 1\)
import sympy as sp
# Declare symbolic variables
a21, b1, b2, c2 = sp.symbols("a21, b1, b2, c2")
# Define known coefficients
c2 = 1
# Define order conditions
eq1 = b1 + b2 - 1
eq2 = b2 * c2 - sp.Rational(1,2)
# Define row sum conditions
eq3 = a21 - c2
# Solve order conditions
sp.solve((eq1, eq2, eq3), manual=True)
Stability#
Plotting stability regions#
The following code plots the region of absolute stability for the Euler method.
# Generate z values
xmin, xmax, ymin, ymax = -3, 1, -1.5, 1.5
X, Y = np.meshgrid(np.linspace(xmin, xmax, 200),np.linspace(ymin, ymax, 200))
Z = X + Y * 1j
# Define stability function
R = 1 + Z
# Plot stability region
fig = plt.figure()
contour = plt.contourf(X, Y, abs(R), levels=[0, 1], colors="#99ccff")
plt.contour(X, Y, abs(R), colors= "k", levels=[0, 1])
plt.axhline(0, color="k", linewidth=1)
plt.axvline(0, color="k", linewidth=1)
plt.axis("equal")
plt.axis([xmin, xmax, ymin, ymax])
plt.xlabel("$\mathrm{Re}(z)$", fontsize=12)
plt.ylabel("$\mathrm{Im}(z)$", fontsize=12)
plt.show()
Stability function for explicit methods#
The following code calculates the stability function for an explicit Runge-Kutta method defined by the following Butcher tableau
# Define ERK method
A = sp.Matrix([[0, 0, 0, 0],
[sp.Rational(1,2), 0, 0, 0],
[0, sp.Rational(1,2), 0, 0],
[0, 0, 1, 0]])
b = sp.Matrix([sp.Rational(1,6), sp.Rational(1,3), sp.Rational(1,3), sp.Rational(1,6)])
e = sp.ones(4,1)
# Determine coefficients of the stability function
for k in range(4):
display(b.T * A ** k * e)
Stability function for implicit methods#
The following code calculates the stability function for an explicit Runge-Kutta method defined by the following Butcher tableau.
# Define numerator and denominator functions
def P(z):
return (I - z * A + z * ebT).det()
def Q(z):
return (I - z * A).det()
# Define IRK method
A = sp.Matrix([[sp.Rational(5,12), -sp.Rational(1,12)],
[sp.Rational(3,4), sp.Rational(1,4)]])
ebT = sp.Matrix([[sp.Rational(3,4), 0], [0, sp.Rational(1,4)]])
I = sp.eye(2)
# Calculate R(z)
z, y = sp.symbols('z, y')
R = P(z) / Q(z)
print(f"R(z) = ")
display(R)
Checking if an implicit method is A-stable#
The following code outputs the following conditions for A-stability
The roots of \(Q(z)\) have positive real parts
\(E(y) = Q(iy)Q(-iy) - P(iy)P(-iy) \geq 0\)
where the stability function for the method is \(R(z) = \dfrac{P(z)}{Q(z)}\).
# Check roots of Q have positive real parts
roots = sp.solve(Q(z) - 0)
print(f"Roots of Q(z)")
display(roots)
# Check E(y) >= 0
E = Q(1j * y) * Q(-1j * y) - P(1j * y) * P(-1j * y)
print(f"E(y) = ")
display(sp.simplify(sp.nsimplify(E)))
Matrix Decomposition Methods#
LU decomposition#
The following code defines the function lu()
which calculates the LU decomposition of a square matrix \(A\) and returns the lower and upper triangular matrices \(L\) and \(U\) such that \(A = LU\).
def lu(A):
n = A.shape[0]
L, U = np.eye(n), np.zeros((n, n))
for j in range(n):
for i in range(n):
sum_ = 0
if i <= j:
for k in range(i):
sum_ += L[i,k] * U[k,j]
U[i,j] = A[i,j] - sum_
else:
for k in range(j):
sum_ += L[i,k] * U[k,j]
L[i,j] = (A[i,j] - sum_) / U[j,j]
return L, U
Forward and back substitution#
The following code defines the functions forward_substitution()
and back_substitution()
which perform forward and back substitution.
def forward_substitution(L, b):
n = L.shape[0]
x = np.zeros(n)
for i in range(n):
sum_ = 0
for j in range(i):
sum_ += L[i,j] * x[j]
x[i] = (b[i] - sum_) / L[i,i]
return x
def back_substitution(U, b):
n = U.shape[0]
x = np.zeros(n)
x[-1] = b[-1] / U[-1,-1]
for i in range(n - 2, -1, -1):
sum_ = 0
for j in range(i + 1, n):
sum_ += U[i,j] * x[j]
x[i] = (b[i] - sum_) / U[i,i]
return x
Partial pivoting#
The following code defines the function partial_pivot()
that performs partial pivoting on a matrix and outputs the matrix and the permutation matrix.
def partial_pivot(A):
n = A.shape[0]
P = np.eye(n)
for j in range(n):
maxpivot, maxpivotrow = abs(A[j,j]), j
for i in range(j + 1, n):
if abs(A[i,j]) > maxpivot:
maxpivot, maxpivotrow = abs(A[i,j]), i
A[[j,maxpivotrow]] = A[[maxpivotrow,j]]
P[[j,maxpivotrow]] = P[[maxpivotrow,j]]
return P
Cholesky decomposition#
The following code defines the function cholesky()
which performs Cholesky decomposition on a matrix \(A\) and outputs the lower triangular matrix \(L\) such that \(A = LL^\mathrm{T}\).
def cholesky(A):
n = A.shape[0]
L = np.zeros((n, n))
for j in range(n):
for i in range(j, n):
for k in range(j):
L[i,j] += L[i,k] * L[j,k]
if i == j:
L[i,j] = np.sqrt(A[i,j] - L[i,j])
else:
L[i,j] = (A[i,j] - L[i,j]) / L[j,j]
return L
QR decomposition using the Gram-Schmidt process#
The following code defines the function qr_gramschmidt()
which performs QR decomposition using the Gram-Schmidt process on a matrix \(A\) and outputs the orthogonal matrix \(Q\) and upper triangular matrix \(R\) such that \(A = QR\).
def qr_gramschmidt(A):
n = A.shape[1]
Q, R = np.zeros(A.shape), np.zeros((n,n))
for j in range(n):
sum_ = 0
for i in range(j):
R[i,j] = np.dot(Q[:,i], A[:,j])
sum_ += R[i,j] * Q[:,i]
u = A[:,j] - sum_
R[j,j] = np.linalg.norm(u)
Q[:,j] = u / R[j,j]
return Q, R
QR decomposition using the Householder transformations#
The following code defines the function qr_householder()
which performs QR decomposition using Household transformations on a matrix \(A\) and outputs the orthogonal matrix \(Q\) and upper triangular matrix \(R\) such that \(A = QR\).
def qr_householder(A):
m, n = A.shape
I = np.eye(m)
Q, R = np.eye(m), np.copy(A)
for j in range(n):
u = R[:,[j]]
u[:j] = 0
u = u + np.sign(R[j,j]) * np.linalg.norm(u) * I[:,[j]]
v = u / np.linalg.norm(u)
H = I - 2 * np.dot(v, v.T)
R = np.dot(H, R)
Q = np.dot(Q, H)
return Q, R
Calculating eigenvalues of a matrix using the QR algorithm.#
The following code defines the function eigvals()
which calculates the eigenvalues of a square matrix \(A\) using the QR algorithm.
def eigvals(A, tol=1e-6):
for k in range(20):
Q, R = qr_householder(A)
A, Aprev = np.matmul(R, Q), A
if max(abs(np.diagonal(A - Aprev))) < tol:
break
return np.diagonal(A)
Indirect methods#
The following methods calculate the solutions to the system of linear equations \(A \vec{x} = \vec{b}\) ceasing iterations when the largest value of the residual is less than tol
.
The Jacobi method#
def jacobi(A, b, tol=1e-6):
n = len(b)
x, xnew = np.zeros(n), np.zeros(n)
for k in range(100):
for i in range(n):
sum_ = 0
for j in range(n):
if i != j:
sum_ += A[i,j] * x[j]
xnew[i] = (b[i] - sum_) / A[i,i]
x = np.copy(xnew)
r = b - np.dot(A, x)
if max(abs(r)) < tol:
break
return x
The Gauss-Seidel method#
def gauss_seidel(A, b, tol=1e-6):
n = len(b)
x = np.zeros(n)
for k in range(100):
for i in range(n):
sum_ = 0
for j in range(n):
if i != j:
sum_ += A[i,j] * x[j]
x[i] = (b[i] - sum_) / A[i,i]
r = b - np.dot(A, x)
if max(abs(r)) < tol:
break
return x
The SOR method#
def sor(A, b, omega, tol=1e-6):
n = len(b)
x = np.zeros(n)
for k in range(100):
for i in range(n):
sum_ = 0
for j in range(n):
if i != j:
sum_ += A[i,j] * x[j]
x[i] = (1 - omega) * x[i] + omega * (b[i] - sum_) / A[i,i]
r = b - np.dot(A, x)
if max(abs(r)) < tol:
break
return x