Last active
June 18, 2017 15:38
-
-
Save WolframRhodium/b2189c8c44e7bf0d6e4938d6ffb10398 to your computer and use it in GitHub Desktop.
Implementation of numerical analysis algorithms in Python
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import scipy as sp | |
from scipy import misc | |
import sympy as sym | |
import math | |
# Helper functions | |
# https://en.wikipedia.org/wiki/Norm_(mathematics) | |
def calculate_error(diff, norm=0): | |
if norm == 0: # Infinity norm | |
return np.max(np.abs(diff)) | |
elif norm == 1: | |
return np.sum(np.abs(diff)) | |
else: | |
return np.sum(np.abs(diff) ** norm) ** (1 / norm) | |
# Root-finding algorithms | |
# https://en.wikipedia.org/wiki/Bisection_method | |
def bisection_method(f, a, b, epsilon_0=1e-4, epsilon_1=1e-4, max_iteration=1000): | |
# set initial value | |
y1, y2 = f(a), f(b) | |
iteration = 0 | |
# initial evaluation | |
if abs(y1) < epsilon_0: | |
return a, abs(y1), iteration | |
elif abs(y2) < epsilon_0: | |
return b, abs(y2), iteration | |
elif (y1 > 0) == (y2 > 0): | |
raise ValueError("Error input: f(a)*f(b) > 0") | |
# iteration process | |
while iteration < max_iteration: | |
x0 = (a + b) / 2 | |
y0 = f(x0) | |
iteration += 1 | |
if abs(y0) < epsilon_0 or abs(b - a) <= epsilon_1: | |
return x0, abs(y0), iteration | |
if (y0 > 0) != (y1 > 0): | |
b, y2 = x0, y0 | |
else: | |
a, y1 = x0, y0 | |
else: | |
if abs(y1) <= abs(y2): | |
return a, abs(y1), iteration | |
else: | |
return b, abs(y2), iteration | |
# https://en.wikipedia.org/wiki/Newton%27s_method | |
def newton_method(f, x0, epsilon_0=1e-4, epsilon_1=1e-4, max_iteration=1000, dx=1e-6, sympy_x='x'): | |
if callable(f): | |
# set initial value | |
y0 = f(x0) | |
iteration = 0 | |
# initial evaluation | |
if abs(y0) < epsilon_0: | |
return x0, abs(y0), iteration | |
while iteration < max_iteration: | |
xn = x0 - y0 / sp.misc.derivative(f, x0, dx, n=1) | |
yn = f(xn) | |
iteration += 1 | |
if abs(yn) < epsilon_0 or abs(xn - x0) <= epsilon_1: | |
return xn, abs(yn), iteration | |
x0, y0 = xn, yn | |
else: | |
return x0, abs(y0), iteration | |
elif isinstance(f, tuple(sym.core.all_classes)): | |
y0 = f.evalf(subs={sympy_x: x0}) | |
iteration = 0 | |
if abs(y0) < epsilon_0: | |
return x0, abs(y0), iteration | |
x = sym.symbols(sympy_x) | |
diffF = sym.diff(f, x) | |
while iteration < max_iteration: | |
xn = x0 - y0 / diffF.evalf(subs={sympy_x: x0}) | |
yn = f.evalf(subs={sympy_x: xn}) | |
iteration += 1 | |
if abs(yn) < epsilon_0 or abs(xn - x0) <= epsilon_1: | |
return xn, abs(yn), iteration | |
x0, y0 = xn, yn | |
else: | |
return x0, abs(y0), iteration | |
# https://en.wikipedia.org/wiki/Secant_method | |
def secant_method(f, x0, x1, epsilon_0=1e-4, epsilon_1=1e-4, max_iteration=1000): | |
# set initial value | |
y0, y1 = f(x0), f(x1) | |
iteration = 0 | |
# initial evaluation | |
if abs(y0) < epsilon_0: | |
return x0, abs(y0), iteration | |
elif abs(y1) < epsilon_0: | |
return x1, abs(y1), iteration | |
# iteration process | |
while iteration < max_iteration: | |
xn = x1 - y1 / (y1 - y0) * (x1 - x0) | |
yn = f(xn) | |
iteration += 1 | |
if abs(yn) < epsilon_0 or abs(xn - x1) <= epsilon_1: | |
return xn, abs(yn), iteration | |
x0, y0, x1, y1 = x1, y1, xn, yn | |
else: | |
return x1, abs(y1), iteration | |
# Direct solutions of system of linear equations | |
# https://en.wikipedia.org/wiki/Triangular_matrix#Forward_substitution | |
def forward_substitution(A, b): | |
if not (np.size(A, 0) == np.size(A, 1) == np.size(b, 0)): | |
raise ValueError("The size of the input matrix does not match") | |
x = np.zeros_like(b) | |
for i in range(np.size(x, 0)): | |
x[i] = (b[i] - A[i, :i].dot(x[:i])) / A[i, i] | |
return x | |
def backward_substitution(A, b): | |
if not (np.size(A, 0) == np.size(A, 1) == np.size(b, 0)): | |
raise ValueError("The size of the input matrix does not match") | |
x = np.zeros_like(b) | |
for i in range(np.size(x, 0)).__reversed__(): | |
x[i] = (b[i] - A[i, i+1:].dot(x[i+1:])) / A[i, i] | |
return x | |
# https://en.wikipedia.org/wiki/Gaussian_elimination | |
def gauss_elimination(A, b): | |
A = A.astype(float) | |
b = b.astype(float) | |
if not (np.size(A, 0) == np.size(A, 1) == np.size(b, 0)): | |
raise ValueError("The size of the input matrix does not match") | |
for i in range(np.size(b, 0) - 1): | |
for j in range(i+1, np.size(b, 0)): | |
scale = -A[j, i] / A[i, i] | |
A[j] += scale * A[i] | |
b[j] += scale * b[i] | |
return backward_substitution(A, b) | |
# http://web.mit.edu/10.001/Web/Course_Notes/GaussElimPivoting.html | |
def gauss_elimination_with_partial_pivoting(A, b): | |
A = A.astype(float) | |
b = b.astype(float) | |
if not (np.size(A, 0) == np.size(A, 1) == np.size(b, 0)): | |
raise ValueError("The size of the input matrix does not match") | |
for i in range(np.size(b, 0) - 1): | |
index = np.abs(A[i:, i]).argmax() + i | |
if index != i: | |
A[i], A[index] = A[index], A[i].copy() | |
b[i], b[index] = b[index], b[i].copy() | |
for j in range(i+1, np.size(b, 0)): | |
scale = -A[j, i] / A[i, i] | |
A[j] += scale * A[i] | |
b[j] += scale * b[i] | |
return backward_substitution(A, b) | |
# LU decomposition | |
# https://en.wikipedia.org/wiki/LU_decomposition#Doolittle_algorithm | |
def doolittle_algorithm(A): | |
if not np.size(A, 0) == np.size(A, 1): | |
raise ValueError("The size of the input matrix does not match") | |
A = A.astype(float) | |
L = np.zeros_like(A) | |
U = np.zeros_like(A) | |
size = np.size(A, 0) | |
U[0] = A[0].copy() | |
L[1:, 0] = A[1:, 0] / U[0, 0] | |
L[0, 0] = 1 | |
for i in range(1, size): | |
U[i, i:] = A[i, i:] - L[i, :i].dot(U[:i, i:]) | |
L[i:, i-1] = (A[i:, i-1] - L[i:, :i-1].dot(U[:i-1, i-1])) / U[i-1, i-1] | |
L[i, i] = 1 | |
return L, U | |
# https://en.wikipedia.org/wiki/LU_decomposition#Crout_and_LUP_algorithms | |
def crout_algorithm(A): | |
U_T, L_T = Doolittle_algorithm(A.T) | |
return L_T.T, U_T.T | |
# https://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm | |
def lu_decomposition_for_tridiagonal_matrix(A): | |
if not np.size(A, 0) == np.size(A, 1): | |
raise ValueError("The size of the input matrix does not match") | |
A = A.astype(float) | |
L = np.zeros_like(A) | |
U = np.zeros_like(A) | |
size = np.size(A, 0) | |
L[0, 0] = A[0, 0] | |
U[0, 0] = 1 | |
for i in range(1, size): | |
U[i-1, i] = A[i-1, i] / L[i-1, i-1] | |
U[i, i] = 1 | |
L[i, i-1] = A[i, i-1] | |
L[i, i] = A[i, i] - A[i, i-1] * U[i-1, i] | |
return L, U | |
# https://en.wikipedia.org/wiki/Cholesky_decomposition | |
def cholesky_decomposition(A): | |
if not np.size(A, 0) == np.size(A, 1): | |
raise ValueError("The size of the input matrix does not match") | |
A = A.astype(float) | |
L = np.zeros_like(A) | |
size = np.size(A, 0) | |
L[0, 0] = math.sqrt(A[0, 0]) | |
for i in range(1, size): | |
L[i:, i-1] = (A[i:, i-1] - L[i:, :i-1].dot(L[i-1, :i-1])) / L[i-1, i-1] | |
L[i, i] = math.sqrt(A[i, i] - L[i, :i].dot(L[i, :i])) | |
return L | |
# https://en.wikipedia.org/wiki/Cholesky_decomposition#LDL_decomposition | |
def ldl_decomposition(A): | |
if not np.size(A, 0) == np.size(A, 1): | |
raise ValueError("The size of the input matrix does not match") | |
A = A.astype(float) | |
L = np.zeros_like(A) | |
D = np.zeros_like(A) | |
diagD = D.diagonal().copy() | |
size = np.size(A, 0) | |
L[0, 0] = 1 | |
diagD[0] = A[0, 0] | |
for i in range(1, size): | |
L[i:, i-1] = (A[i:, i-1] - (L[i:, :i-1] * diagD[:i-1]).dot(L[i-1, :i-1])) / diagD[i-1] | |
diagD[i] = A[i, i] - (L[i, :i] * diagD[:i]).dot(L[i, :i]) | |
L[i, i] = 1 | |
for i in range(0, size): | |
D[i, i] = diagD[i] | |
return L, D | |
# Iterative solutions of system of linear equations | |
# https://en.wikipedia.org/wiki/Jacobi_method | |
def jacobi_iterative_method(A, b, x0=None, max_iterations=1000, termination_error=1e-4, error_norm=0, display=False): | |
x = x0 if x0 is not None else np.zeros_like(b) # set initial estimate | |
x = x.astype(float) # Cast the array to float type | |
if not (np.size(A, 0) == np.size(A, 1) == np.size(b, 0) == np.size(x, 0)) or np.size(x, 1) != 1: | |
raise ValueError("The size of the input matrix does not match") | |
current_error = calculate_error(A.dot(x) - b, error_norm) | |
iteration = 0 | |
if current_error <= termination_error: | |
return x, current_error, iteration | |
# Core iterates | |
while iteration < max_iterations: | |
iteration = iteration + 1 | |
x_new = x | |
for i in range(np.size(x_new, 0)): | |
x_new[i] = (b[i] - A[i, :i].dot(x[:i]) - A[i, i+1:].dot(x[i+1:])) / A[i, i] | |
x = x_new | |
current_error = calculate_error(A.dot(x) - b, error_norm) | |
if current_error < termination_error: | |
break | |
if display: | |
print("Jacobi iteration:") | |
print(f"A =\n{A}\nb =\n{b}\n") | |
print("Solution:") | |
print(f"x =\n{x}\n") | |
print(f"current_error = {current_error}\niteration = {iteration}\n") | |
return x, current_error, iteration | |
# https://en.wikipedia.org/wiki/Gauss-Seidel_method | |
def gauss_seidel_iterative_method(A, b, x0=None, max_iterations=1000, termination_error=1e-4, error_norm=0, display=False): | |
x = x0 if x0 is not None else np.zeros_like(b) # set initial estimate | |
x = x.astype(float) # Cast the array to float type | |
if not (np.size(A, 0) == np.size(A, 1) == np.size(b, 0) == np.size(x, 0)) or np.size(x, 1) != 1: | |
raise ValueError("The size of the input matrix does not match") | |
current_error = calculate_error(A.dot(x) - b, error_norm) | |
iteration = 0 | |
if current_error <= termination_error: | |
return x, current_error, iteration | |
# Core iterates | |
for i in range(max_iterations): | |
x_new = np.zeros_like(x) | |
x_new[0] = (b[0] - A[0, 1:].dot(x[1:])) / A[0, 0] | |
for i in range(1, np.size(x_new, 0)): | |
x_new[i] = (b[i] - A[i, :i].dot(x_new[:i]) - A[i][i+1:].dot(x[i+1:])) / A[i, i] | |
x_new[-1] = (b[-1] - A[-1, :-1].dot(x_new[:-1])) / A[-1, -1] if np.size(x, 0) > 1 else x_new[-1] | |
x = x_new | |
current_error = calculate_error(A.dot(x) - b, error_norm) | |
if current_error < termination_error: | |
break | |
if display: | |
print("Gauss-Seidel iteration:") | |
print(f"A =\n{A}\nb =\n{b}\n") | |
print("Solution:") | |
print(f"x =\n{x}\n") | |
print(f"current_error = {current_error}\niteration = {iteration}\n") | |
return x, current_error, iteration | |
# https://en.wikipedia.org/wiki/Successive_over-relaxation | |
def successive_over_relaxation_method(A, b, omega=1.5, x0=None, max_iterations=1000, termination_error=1e-4, error_norm=0, display=False): | |
x = x0 if x0 is not None else np.zeros_like(b) # set initial estimate | |
x = x.astype(float) # Cast the array to float type | |
if not (np.size(A, 0) == np.size(A, 1) == np.size(b, 0) == np.size(x, 0)) or np.size(x, 1) != 1: | |
raise ValueError("The size of the input matrix does not match") | |
current_error = na.calculate_error(A.dot(x) - b, error_norm) | |
iteration = 0 | |
if current_error <= termination_error: | |
return x, current_error, iteration | |
# Core iterates | |
for i in range(max_iterations): | |
x_new = np.zeros_like(x) | |
x_new[0] = x[0] * (1 - omega) + (b[0] - A[0, 1:].dot(x[1:])) / A[0, 0] * omega | |
for i in range(1, np.size(x_new, 0)): | |
x_new[i] = x[i] * (1 - omega) + (b[i] - A[i, :i].dot(x_new[:i]) - A[i, i+1:].dot(x[i+1:])) / A[i, i] * omega | |
x_new[-1] = x[-1] * (1 - omega) + ((b[-1] - A[-1, :-1].dot(x_new[:-1])) / A[-1, -1] if np.size(x, 0) > 1 else x_new[-1]) * omega | |
x = x_new | |
current_error = na.calculate_error(A.dot(x) - b, error_norm) | |
if current_error < termination_error: | |
break | |
if display: | |
print("Successive_over-relaxation iteration:") | |
print(f"A =\n{A}\nb =\n{b}\n") | |
print("Solution:") | |
print(f"x =\n{x}\n") | |
print(f"current_error = {current_error}\niteration = {iteration}\n") | |
return x, current_error, iteration |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment