Last active
August 17, 2019 11:31
-
-
Save Nikolaj-K/50da368fc5a1757f381ef7d13de5e3ef to your computer and use it in GitHub Desktop.
A gradient descent implementation for one-dimensional problems.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import time | |
from virtual_machine import VirtualMachine, approx_rational | |
def make_update_step(gradient, step_size): | |
def update_step(state): | |
g = state['g'] | |
state['prev_g'] = g | |
state['g'] = g - step_size * gradient(g) | |
state['g'] = approx_rational(state['g']) # optional | |
print("{} (={})".format(state['g'], float(state['g']))) | |
time.sleep(.01) | |
return state | |
return update_step | |
def halting_condition(state): | |
PREC = 10**-5 | |
delta = state['g'] - state['prev_g'] | |
return abs(delta) < PREC | |
if __name__ == "__main__": | |
from fractions import Fraction | |
def df(x): | |
""" | |
f(x) := x**4 - 3 x**3 + 2 | |
f'(x) = 4 * x**3 - 9 x**2 = 4 * x**2 * (x - 2.25) | |
f''(x) = 12 * x**2 - 18 * x = 12 * x * (x - 1.5) | |
https://en.wikipedia.org/wiki/Gradient_descent#Python | |
https://www.wolframalpha.com/input/?i=Plot%5Bx%5E4+-+3+*+x%5E3+%2B+2,+%7Bx,+-2,+4%7D%5D | |
""" | |
return 4 * x**3 - 9 * x**2 | |
step_size = Fraction(10**-2) # use rationals everywhere optional | |
update_step = make_update_step(df, step_size) | |
vm_gd = VirtualMachine(update_step, halting_condition) | |
state = dict(prev_g=0, g=6) # initial guesses | |
vm_gd.run(state) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment