Skip to content

Commit

Permalink
codezoned#57 added Optimization techniques codes
Browse files Browse the repository at this point in the history
  • Loading branch information
joinal committed Oct 18, 2018
1 parent 2a68a5a commit d799c2d
Show file tree
Hide file tree
Showing 13 changed files with 4,642 additions and 0 deletions.
1,057 changes: 1,057 additions & 0 deletions KKT_optimization/.ipynb_checkpoints/infill-dphi-focus-checkpoint.ipynb

Large diffs are not rendered by default.

1,133 changes: 1,133 additions & 0 deletions KKT_optimization/.ipynb_checkpoints/infill-drho-focus-checkpoint.ipynb

Large diffs are not rendered by default.

1,057 changes: 1,057 additions & 0 deletions KKT_optimization/infill-dphi-focus.ipynb

Large diffs are not rendered by default.

1,133 changes: 1,133 additions & 0 deletions KKT_optimization/infill-drho-focus.ipynb

Large diffs are not rendered by default.

Binary file added Non-Linear Optimization [Python]/docs/error.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
5 changes: 5 additions & 0 deletions Non-Linear Optimization [Python]/docs/results
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
Method Iterations Result
CG 11 (0.9995053, 0.9990086)
Newton 14 (1.0000000, 1.0000000)
Quasi Newton 21 (1.0000000, 1.0000000)

41 changes: 41 additions & 0 deletions Non-Linear Optimization [Python]/src/conjugate_gradient.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from numpy import *
from line_search import find_step_length

def conjugate_gradient(f, fd, x, max_iterations, precision, callback):
direction = -fd(x)
gradient = None
gradient_next = matrix(fd(x)).T
x_prev = None

for i in range(1, max_iterations):
alpha = find_step_length(f, fd, x, 1.0, direction, c2=0.1)
x_prev = x
x = x + alpha*direction

callback(i, direction, alpha, x)

gradient = gradient_next
gradient_next = matrix(fd(x)).T

if linalg.norm(x - x_prev) < precision:
break

BFR = (gradient_next.T * gradient_next) / (gradient.T * gradient)
BFR = squeeze(asarray(BFR))

direction = -squeeze(asarray(gradient_next)) + BFR*direction
return x
53 changes: 53 additions & 0 deletions Non-Linear Optimization [Python]/src/line_search.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy

def find_step_length(f, fd, x, alpha, direction, c2):
g = lambda alpha: f(x+alpha*direction)
gd = lambda alpha: numpy.dot(fd(x + alpha*direction), direction)
return interpolation(g, gd, alpha, c2)

def wolf1(f, fd, alpha):
c1 = 1e-4
return f(alpha) <= f(0) + c1*alpha*fd(alpha)

def wolf_strong(f, fd, alpha, c2):
return abs(fd(alpha)) <= -c2*fd(0)

def simple_backtracking(f, fd, alpha, c2):
rate = 0.5
while not (wolf1(f, fd, alpha) or wolf_strong(f, fd, alpha, c2)):
alpha = rate*alpha
return alpha

def interpolation(f, fd, alpha, c2):
lo = 0.0
hi = 1.0

for i in range(0, 20):
if wolf1(f, fd, alpha):
if wolf_strong(f, fd, alpha, c2):
return alpha

half = (lo+hi)/2.0
alpha = - (fd(lo)*hi*hi) / (2*(f(hi)-f(lo)-fd(lo)*hi))

if alpha < lo or alpha > hi: # quadratic interpolation failed. reduce by half instead
alpha = half
if fd(alpha) > 0:
hi = alpha
elif fd(alpha) <= 0:
lo = alpha
return alpha
32 changes: 32 additions & 0 deletions Non-Linear Optimization [Python]/src/newton.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from numpy import *
from line_search import find_step_length

def newton(f, fd, fdd, x, max_iterations, precision, callback):
for i in range(1, max_iterations):
gradient = fd(x)
hessian = fdd(x)

direction = -linalg.solve(hessian, gradient)
alpha = find_step_length(f, fd, x, 1.0, direction, c2=0.9)
x_prev = x
x = x + alpha*direction

callback(i, direction, alpha, x)

if linalg.norm(x - x_prev) < precision:
break
return x
43 changes: 43 additions & 0 deletions Non-Linear Optimization [Python]/src/quasi_newton.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from numpy import *
from line_search import find_step_length

def quasi_newton(f, fd, x, max_iterations, precision, callback):
I = identity(x.size)
H = I
x_prev = x
f_prev = f
fd_prev = fd

for i in range(1, max_iterations):
gradient = fd(x)
direction = -H * matrix(gradient).T
direction = squeeze(asarray(direction))

alpha = find_step_length(f, fd, x, 1.0, direction, c2=0.9)
x_prev = x
x = x + alpha*direction

callback(i, direction, alpha, x)

if linalg.norm(x - x_prev) < precision:
break

s = matrix(x - x_prev).T
y = matrix(fd(x) - fd(x_prev)).T
rho = float(1 / (y.T*s))
H = (I - rho*s*y.T)*H*(I - rho*y*s.T) + rho*s*s.T
return x
62 changes: 62 additions & 0 deletions Non-Linear Optimization [Python]/src/rosenbrock.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from numpy import *
from newton import *
from quasi_newton import *
from steepest_descent import *
from conjugate_gradient import *

if __name__ == '__main__':
def f(x): return 100 * math.pow(x[1] - math.pow(x[0], 2), 2) + math.pow(1 - x[0], 2)
def df_dx1(x): return 400*math.pow(x[0], 3) - 400*x[0]*x[1] + 2*x[0] - 2
def df_dx2(x): return 200*x[1] - 200*math.pow(x[0], 2)
def fd(x): return array([ df_dx1(x), df_dx2(x) ])

def df_dx1_dx1(x): return 1200*math.pow(x[0], 2) - 400*x[1] + 2
def df_dx1_dx2(x): return-400*x[0]

def fdd(x):
return array([
[df_dx1_dx1(x), df_dx1_dx2(x)],
[df_dx1_dx2(x), 200]])

def print_error(i, direction, alpha, x):
opt = f(array([1,1]))
print("%d, %.20f" % (i, f(x)-opt))

def print_gradient(i, direction, alpha, x):
print("%d, %.20f" % (i, linalg.norm(fd(x))))

def print_all(i, direction, alpha, x):
print("iteration %d: \t direction: %s \t alpha: %.7f \t x: %s"
% (i, ["%.7f" % _ for _ in direction], alpha, ["%.7f" % _ for _ in x]))

x = array([0, 0])
precision = 10e-6
max_iterations = 100
callback = print_all

print("steepest descent:")
steepest_descent(f, fd, x, max_iterations, precision, callback)

print("\nnewton:")
newton(f, fd, fdd, x, max_iterations, precision, callback)

print("\nquasi newton:")
quasi_newton(f, fd, x, max_iterations, precision, callback)

print("\nconjugate gradient:")
conjugate_gradient(f, fd, x, max_iterations, precision, callback)

26 changes: 26 additions & 0 deletions Non-Linear Optimization [Python]/src/steepest_descent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
'''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from numpy import *
from line_search import find_step_length

def steepest_descent(f, fd, x, max_iterations, precision, callback):
for i in range(0, max_iterations):
direction = - fd(x)
alpha = find_step_length(f, fd, x, 1.0, direction, c2=0.9)
x = x + alpha*direction
callback(i, direction, alpha, x)
if linalg.norm(direction) < precision:
break
return x

0 comments on commit d799c2d

Please sign in to comment.