-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathextreme.m
77 lines (68 loc) · 1.93 KB
/
extreme.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
function [Problem] = extreme(A, b, sigmaType, hiddenDim, lambda, use_sign)
% MSE with regularization using extreme learning
d = length(b);
m = size(A, 1);
n = size(A, 2);
h = hiddenDim;
Problem.name = 'extreme';
Problem.m = m;
Problem.n = n;
Problem.samples = d;
Problem.h = h;
Problem.Q = A;
Problem.b = b;
Problem.l = lambda;
Problem.use_sign = use_sign;
Problem.W1 = rand(n,h);
Problem.bias = rand(1,1);
Problem.A = sigma(Problem.Q*Problem.W1, sigmaType);
Problem.W2 = rand(size(Problem.A,2),size(Problem.b,2));
Problem.output = @output;
function y = output(x)
y = Problem.A*x+Problem.bias;
if Problem.use_sign
y = sign(y);
end
end
Problem.cost = @cost;
function f = cost(x)
y = output(x);
%f = mean(sum((Problem.b - y).^2)) + Problem.l*(x'*x);
f = immse(Problem.b, y) + Problem.l*norm(x);
end
Problem.grad = @grad;
function d = grad(x)
% https://math.stackexchange.com/a/1962938
y = output(x);
error = (2 / Problem.n) * (y - Problem.b);
d = Problem.A'*error - 2*Problem.l*x;
% TODO: gradient with sign
end
Problem.grad2 = @grad2;
function h = grad2()
% https://math.stackexchange.com/a/1962938
h = 2*A'*A;
end
Problem.sigma = @sigma;
function o = sigma(X, type)
if type == "sigmoid"
o = sigmoid(X);
elseif type == "relu"
o = relu(X);
else
o = linear(X);
end
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function y = sigmoid(a)
y = 1.0 ./ (1 + exp(-a));
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function y = relu(a)
y = max(0, a);
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function y = linear(x)
y = x;
end