diff --git a/combine_classes.py b/combine_classes.py index 7bb154a..1f3392c 100644 --- a/combine_classes.py +++ b/combine_classes.py @@ -1,71 +1,76 @@ -# Make 'classes' file -# TODO: Reassign file save destinations, comment, and remove extra lines of code +# Run Order: 2nd, 1 out of 1 +# Make 'classes' file in results folder import numpy -paired_test = np.load('./results/20200509/confused_pairs_all.npy') -print(paired_test.shape) +######## +#### Look at confused pairs for all datasets except EMNIST +######## +paired_test = np.load('./results/confused_pairs_all.npy') -ds = 0 +# Use the mean of each paired class set over all trials pt_mean = np.mean(paired_test, axis=1) +# Initialize dataset set ds_set = ['mnist', 'fmnist', 'kmnist','svhn','usps', 'cifar10'] +# Initialize classes variable for record keeping classes = [] -paired_test +# For each dataset for i, ds in enumerate(ds_set): + # Select the paired class means for the selected dataset focus = pt_mean[i] - + + # Select pair of classes that have the lowest score a = np.min(focus[np.nonzero(focus)]) - b = np.sort(focus[np.nonzero(focus)]) - e = b[len(b)-2] c = np.where(focus == a) - d = np.where(focus == e) - classes.append([c[0][0], c[1][0], ds, a, d[0][0], d[1][0], e]) + # Record keeping + classes.append([c[0][0], c[1][0], ds]) classes = np.array(classes, dtype=object) classes_orig = classes -print(classes) -# np.save('./results/20200511/classes.npy', classes[:,:3], allow_pickle=True) -# np.load('./results/20200511/classes.npy', allow_pickle=True) -paired_test = np.load('./results/20200509/confused_pairs_emnist_upper.npy') +######## +#### Look at confused pairs for only EMNIST +######## + +paired_test = np.load('./results/confused_pairs_emnist_upper.npy') paired_test.shape pt_mean = np.mean(paired_test, axis=1) +# Initialize dataset set ds_set = ['emnist'] + +# Initialize classes variable for record keeping classes = [] +# For each dataset (only EMNIST) for i, ds in enumerate(ds_set): + # Select the paired class means for the selected dataset focus = pt_mean[i] - + + # Select pair of classes that have the lowest score a = np.min(focus[np.nonzero(focus)]) - print(a) - b = np.sort(focus[np.nonzero(focus)]) - e = b[len(b)-2] c = np.where(focus == a) - d = np.where(focus == e) - - classes.append([c[0][0]+10, c[1][0]+10, ds, a, d[0][0]+10, d[1][0]+10, e ]) + + # Record keeping + classes.append([c[0][0]+10, c[1][0]+10, ds]) classes = np.array(classes, dtype=object) -print(classes) -# intermed = np.concatenate((class_orig, classes), 0) -# np.save('./results/20200511/classes_emnist_perf.npy', intermed, allow_pickle=True) -# np.save('./results/20200511/classes.npy', classes, allow_pickle=True) +######## +#### Organize final class pairs into an array for further use +######## -# classes_orig = np.load('./results/20200511/classes.npy', allow_pickle=True) -print(classes_orig.shape, classes[:,:].shape) -classes_final = np.concatenate((classes_orig[:,:3],classes[:,:3]),0) +classes_final = np.concatenate((classes_orig,classes),0) a = classes_final[3:6].copy() b = classes_final[6].copy() classes_final[3] = b classes_final[4:] = a print(classes_final) -# # np.save('./results/20200511/classes.npy', classes_final, allow_pickle=True) \ No newline at end of file +np.save('./results/classes.npy', classes_final, allow_pickle=True) \ No newline at end of file diff --git a/confused_pairs.py b/confused_pairs.py new file mode 100644 index 0000000..28ae1a4 --- /dev/null +++ b/confused_pairs.py @@ -0,0 +1,65 @@ +# Run Order: 1st, 1 out of 2 +# Determine most confused pairs of classes in all datasets except for EMNIST + +from custompackage.load_data import * +from custompackage.load_architecture import * +from custompackage.traintestloop import * + +import torch +from torch.utils.data import DataLoader +import numpy as np +import math +import torchvision +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +import scipy +import os +import glob +import pandas as pd +import pickle + +if not os.path.exists('results'): + os.makedirs('results') + +# Initialize parameters for dataset loading +bs = 256 +weighting = 'paired' +trials = 10 +ds_set = ['mnist', 'fmnist', 'kmnist','svhn','usps','cifar10'] + +# Initialize for record keeping +paired_test = np.zeros((len(ds_set),trials,10,10)) +for m in range(trials): + # For each 10-class dataset + for k, ds in enumerate(ds_set): + # Go through each class + for i in range(10): + t1 = i + # and pair it with every other class + for j in range(i+1,10): + t2 = j + + # Load the binary classification dataloaders + trainloaders, validloaders, testloader = dataset_weighted_split_all(bs, t1, t2, weighting, trials, ds) + + # Assign entirety of the datasets within each dataloader to a variable + X_train = trainloaders[0].dataset.tensors[0] + y_train = trainloaders[0].dataset.tensors[1] + X_test = testloader.dataset.tensors[0] + y_test = testloader.dataset.tensors[1] + + + # initialize lda + lda = LinearDiscriminantAnalysis() + + # fit to images, labels + lda.fit(X_train, y_train) + + # see accuracy for validation set + score_test = lda.score(X_test, y_test) + +# print(ds, m, i, j, score_test) + + #Record keeping + paired_test[k, m, i, j] = score_test + + np.save('./results/confused_pairs_all.npy', paired_test) \ No newline at end of file diff --git a/confused_pairs_emnist.py b/confused_pairs_emnist.py new file mode 100644 index 0000000..734c9a7 --- /dev/null +++ b/confused_pairs_emnist.py @@ -0,0 +1,68 @@ +# Run Order: 1st, 2 out of 2 +# Determine most confused pairs of classes in only EMNIST dataset, specifically the uppercase letters + + +from custompackage.load_data import * +from custompackage.load_architecture import * +from custompackage.traintestloop import * + +import torch +from torch.utils.data import DataLoader +import numpy as np +import math +import torchvision +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +import scipy +import os +import glob +import pandas as pd +import pickle + +if not os.path.exists('results'): + os.makedirs('results') + +# Testing uppercase Letters from EMNIST only + +# Initialize parameters for dataset loading +bs = 256 +weighting = 'paired' +trials = 10 +ds_set = ['emnist'] + +# Initialize for record keeping +paired_test = np.zeros((len(ds_set),trials,26,26)) +for m in range(trials): + # For each 10-class dataset + for k, ds in enumerate(ds_set): + # Go through each class + for i in range(10, 36): + t1 = i + # and pair it with every other class + for j in range(i+1,36): + t2 = j + + # Load the binary classification dataloaders + trainloaders, validloaders, testloader = dataset_weighted_split_all(bs, t1, t2, weighting, trials, ds) + + # Assign entirety of the datasets within each dataloader to a variable + X_train = trainloaders[0].dataset.tensors[0] + y_train = trainloaders[0].dataset.tensors[1] + X_test = testloader.dataset.tensors[0] + y_test = testloader.dataset.tensors[1] + + + # initialize lda + lda = LinearDiscriminantAnalysis() + + # fit to images, labels + lda.fit(X_train, y_train) + + # see accuracy for validation set + score_test = lda.score(X_test, y_test) + +# print(ds, m, i, j, score_test) + + #Record keeping + paired_test[k,m, i-10,j-10] = score_test + + np.save('./results/confused_pairs_emnist_upper.npy', paired_test) \ No newline at end of file diff --git a/fcnn.py b/fcnn.py new file mode 100644 index 0000000..e00c5ab --- /dev/null +++ b/fcnn.py @@ -0,0 +1,77 @@ +# Run Order: 3rd, 1 out of 2 +### Train and test fcnn model +### Saves test loss and test accuracy +### all classes script, early stopping implemented + + +from custompackage.load_data import * +from custompackage.load_architecture import * +from custompackage.traintestloop import * + + +import torch +from torch.utils.data import DataLoader +import torch.optim as optim +import torch.nn as nn +import numpy as np +import math +import torchvision +from torchvision import transforms +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +from sklearn.metrics import classification_report, confusion_matrix +from sklearn.svm import SVC +from sklearn.linear_model import LogisticRegression +from sklearn.naive_bayes import GaussianNB +import matplotlib +import matplotlib.pyplot as plt +from torch.optim.optimizer import required +from torch.utils.data.dataset import random_split +import scipy +import os +import glob +import pandas as pd +import pickle +from pytorchtools import EarlyStopping + + +# Test space for networks +# Select Class Set +class_set = 0 + + +# Initialize settings +bs = 256 +weighting = 'paired' +trials = 10 +epochs = 2000 +trees_set = [1,2,4,8,16,32] + +# Load class-dataset list +classes = np.load('./results/classes.npy', allow_pickle=True) + +# Initialize final test loss and accuracy variables +loss = np.zeros((len(classes), trials, len(trees_set))) +acc = np.zeros((len(classes), trials, len(trees_set))) + + +# For each dataset enumerated from classes list +for j, (t1, t2, ds) in enumerate(classes): + print(t1, t2, ds) + # Load data loaders + trainloaders, validloaders, testloader = dataset_weighted_split_all(bs, t1, t2, weighting, trials, ds, permute=False) + # Initialize input size for model initialization purposes + input_size = trainloaders[0].dataset.tensors[0][0].shape[0] + # For each trial + for i in range(trials): + # For every k-tree defined by trees_set + for k, trees in enumerate(trees_set): + print(j, i, k) + # Initialize the fcnn model, such that hidden layer is twice the number of trees + model = simple_fcnn(input_size, 2*trees, 1).cuda() + #Train and test fcnn, assigning loss and acc values + loss_curve, acc_curve, loss[j,i,k], acc[j,i,k], model_t = train_test_fc(model, trainloaders[i], + validloaders[i], testloader, epochs=epochs) + + # Save accuracy and loss arrays + np.save('./results/fcnn_acc_'+str(class_set)+'.npy', acc) + np.save('./results/fcnn_loss_'+str(class_set)+'.npy', loss) diff --git a/ktree_benchmarking.ipynb b/ktree_benchmarking.ipynb index f259dcd..f30ce80 100644 --- a/ktree_benchmarking.ipynb +++ b/ktree_benchmarking.ipynb @@ -11,29 +11,15 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 2, "metadata": {}, - "outputs": [ - { - "ename": "ModuleNotFoundError", - "evalue": "No module named 'custompackage.neuron_capacity'", - "output_type": "error", - "traceback": [ - "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[1;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", - "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mcustompackage\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mload_architecture\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[1;33m*\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mcustompackage\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtraintestloop\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[1;33m*\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 4\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mcustompackage\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mneuron_capacity\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[1;33m*\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 5\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0mcustompackage\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msin_ineq\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[1;33m*\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'custompackage.neuron_capacity'" - ] - } - ], + "outputs": [], "source": [ "\n", "\n", "from custompackage.load_data import *\n", "from custompackage.load_architecture import *\n", "from custompackage.traintestloop import *\n", - "from custompackage.neuron_capacity import *\n", - "from custompackage.sin_ineq import *\n", "\n", "import torch\n", "from torch.utils.data import DataLoader\n", @@ -1956,6 +1942,18 @@ "plt.imshow(model.w0_3.weight.data.cpu()[:16,:16])" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "code", "execution_count": null, diff --git a/ktree_orig.py b/ktree_orig.py new file mode 100644 index 0000000..bb10280 --- /dev/null +++ b/ktree_orig.py @@ -0,0 +1,86 @@ +# Run Order: 4th, 1 out of 3 +### Train and test k-tree model +### Saves test loss and test accuracy +### Uses original image input order +### all classes script, early stopping implemented + + +from custompackage.load_data import * +from custompackage.load_architecture import * +from custompackage.traintestloop import * + + +import torch +from torch.utils.data import DataLoader +import torch.optim as optim +import torch.nn as nn +import numpy as np +import math +import torchvision +from torchvision import transforms +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +from sklearn.metrics import classification_report, confusion_matrix +from sklearn.svm import SVC +from sklearn.linear_model import LogisticRegression +from sklearn.naive_bayes import GaussianNB +import matplotlib +import matplotlib.pyplot as plt +from torch.optim.optimizer import required +from torch.utils.data.dataset import random_split +import scipy +import os +import glob +import pandas as pd +import pickle +from pytorchtools import EarlyStopping + + + +# Test space for networks +# Select Class Set +class_set = 0 + +# Initialize settings +bs = 256 +weighting = 'paired' +trials = 10 +epochs = 2000 +trees_set = [1,2,4,8,16,32] + +# Load class-dataset list +classes = np.load('./results/classes.npy', allow_pickle=True) + +# if class_set == 0: +# classes = classes[0:2] # mnist fmnist +# elif class_set == 1: +# classes = classes[2:4] # kmnist emnist +# elif class_set == 2: +# classes = classes[4:6] # svhn usps +# else: +# classes = classes[6].reshape(1,-1) + +# Initialize final test loss and accuracy variables +loss = np.zeros((len(classes), trials, len(trees_set))) +acc = np.zeros((len(classes), trials, len(trees_set))) + +# For each dataset enumerated from classes list +for j, (t1, t2, ds) in enumerate(classes): + print(t1, t2, ds) + # Load data loaders + trainloaders, validloaders, testloader = dataset_weighted_split_all(bs, t1, t2, weighting, trials, ds, permute=False) + # Initialize input size for model initialization purposes + input_size = trainloaders[0].dataset.tensors[0][0].shape[0] + # For each trial + for i in range(trials): + # For every k-tree defined by trees_set + for k, trees in enumerate(trees_set): + print(j, i, k) + # Initialize the ktree model + model = ktree_gen(ds=ds, Repeats=trees, Padded=True).cuda() + + #Train and test ktree, assigning loss and acc values + loss_curve, acc_curve, loss[j,i,k], acc[j,i,k], model_t = train_test_ktree(model, trainloaders[i], + validloaders[i], testloader, epochs = epochs, randorder=False) + # Save accuracy and loss arrays + np.save('./results/ktree_acc_orig_'+str(class_set)+'.npy', acc) + np.save('./results/ktree_loss_orig_'+str(class_set)+'.npy', loss) diff --git a/ktree_perm.py b/ktree_perm.py new file mode 100644 index 0000000..4290666 --- /dev/null +++ b/ktree_perm.py @@ -0,0 +1,86 @@ +# Run Order: 5th, 1 out of 2 +### Train and test k-tree model +### Saves test loss and test accuracy +### Uses original image input order +### all classes script, early stopping implemented + + +from custompackage.load_data import * +from custompackage.load_architecture import * +from custompackage.traintestloop import * + + +import torch +from torch.utils.data import DataLoader +import torch.optim as optim +import torch.nn as nn +import numpy as np +import math +import torchvision +from torchvision import transforms +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +from sklearn.metrics import classification_report, confusion_matrix +from sklearn.svm import SVC +from sklearn.linear_model import LogisticRegression +from sklearn.naive_bayes import GaussianNB +import matplotlib +import matplotlib.pyplot as plt +from torch.optim.optimizer import required +from torch.utils.data.dataset import random_split +import scipy +import os +import glob +import pandas as pd +import pickle +from pytorchtools import EarlyStopping + + + + +# Test space for networks +# Select Class Set +class_set = 0 + +# Initialize settings +bs = 256 +weighting = 'paired' +trials = 10 +epochs = 2000 +trees_set = [1,2,4,8,16,32] + +# Load class-dataset list +classes = np.load('./results/classes.npy', allow_pickle=True) + +# if class_set == 0: +# classes = classes[0:2] # mnist fmnist +# elif class_set == 1: +# classes = classes[2:4] # kmnist emnist +# elif class_set == 2: +# classes = classes[4:6] # svhn usps +# else: +# classes = classes[6].reshape(1,-1) + +# Initialize final test loss and accuracy variables +loss = np.zeros((len(classes), trials, len(trees_set))) +acc = np.zeros((len(classes), trials, len(trees_set))) + +# For each dataset enumerated from classes list +for j, (t1, t2, ds) in enumerate(classes): + print(t1, t2, ds) + # Load data loaders + trainloaders, validloaders, testloader = dataset_weighted_split_all(bs, t1, t2, weighting, trials, ds, permute=True) + # Initialize input size for model initialization purposes + input_size = trainloaders[0].dataset.tensors[0][0].shape[0] + # For each trial + for i in range(trials): + # For every k-tree defined by trees_set + for k, trees in enumerate(trees_set): + print(j, i, k) + # Initialize the ktree model + model = ktree_gen(ds=ds, Repeats=trees, Padded=True).cuda() + #Train and test ktree, assigning loss and acc values + loss_curve, acc_curve, loss[j,i,k], acc[j,i,k], model_t = train_test_ktree(model, trainloaders[i], + validloaders[i], testloader, epochs = epochs, randorder=False) + # Save accuracy and loss arrays + np.save('./results/ktree_acc_perm_'+str(class_set)+'.npy', acc) + np.save('./results/ktree_loss_perm_'+str(class_set)+'.npy', loss) diff --git a/ktree_rand.py b/ktree_rand.py new file mode 100644 index 0000000..0805114 --- /dev/null +++ b/ktree_rand.py @@ -0,0 +1,85 @@ +# Run Order: 5th, 2 out of 2 +### Train and test k-tree model +### Saves test loss and test accuracy +### Uses original image input order +### all classes script, early stopping implemented + + +from custompackage.load_data import * +from custompackage.load_architecture import * +from custompackage.traintestloop import * + + +import torch +from torch.utils.data import DataLoader +import torch.optim as optim +import torch.nn as nn +import numpy as np +import math +import torchvision +from torchvision import transforms +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +from sklearn.metrics import classification_report, confusion_matrix +from sklearn.svm import SVC +from sklearn.linear_model import LogisticRegression +from sklearn.naive_bayes import GaussianNB +import matplotlib +import matplotlib.pyplot as plt +from torch.optim.optimizer import required +from torch.utils.data.dataset import random_split +import scipy +import os +import glob +import pandas as pd +import pickle +from pytorchtools import EarlyStopping + + + +# Test space for networks +# Select Class Set +class_set = 0 + +# Initialize settings +bs = 256 +weighting = 'paired' +trials = 10 +epochs = 2000 +trees_set = [1,2,4,8,16,32] + +# Load class-dataset list +classes = np.load('./results/classes.npy', allow_pickle=True) + +# if class_set == 0: +# classes = classes[0:2] # mnist fmnist +# elif class_set == 1: +# classes = classes[2:4] # kmnist emnist +# elif class_set == 2: +# classes = classes[4:6] # svhn usps +# else: +# classes = classes[6].reshape(1,-1) + +# Initialize final test loss and accuracy variables +loss = np.zeros((len(classes), trials, len(trees_set))) +acc = np.zeros((len(classes), trials, len(trees_set))) + +# For each dataset enumerated from classes list +for j, (t1, t2, ds) in enumerate(classes): + print(t1, t2, ds) + # Load data loaders + trainloaders, validloaders, testloader = dataset_weighted_split_all(bs, t1, t2, weighting, trials, ds, permute=False) + # Initialize input size for model initialization purposes + input_size = trainloaders[0].dataset.tensors[0][0].shape[0] + # For each trial + for i in range(trials): + # For every k-tree defined by trees_set + for k, trees in enumerate(trees_set): + print(j, i, k) + # Initialize the ktree model + model = ktree_gen(ds=ds, Repeats=trees, Padded=True).cuda() + #Train and test ktree, assigning loss and acc values (random ordered input) + loss_curve, acc_curve, loss[j,i,k], acc[j,i,k], model_t, _ = train_test_ktree(model, trainloaders[i], + validloaders[i], testloader, epochs = epochs, randorder=True) + # Save accuracy and loss arrays + np.save('./results/ktree_acc_rand_'+str(class_set)+'.npy', acc) + np.save('./results/ktree_loss_rand_'+str(class_set)+'.npy', loss) diff --git a/lda.py b/lda.py new file mode 100644 index 0000000..aef2619 --- /dev/null +++ b/lda.py @@ -0,0 +1,73 @@ +# Run Order: 3rd, 2 out of 2 +### Train and test lda model +### Saves test accuracy +### all classes script, early stopping implemented + + +from custompackage.load_data import * +from custompackage.load_architecture import * +from custompackage.traintestloop import * + + +import torch +from torch.utils.data import DataLoader +import torch.optim as optim +import torch.nn as nn +import numpy as np +import math +import torchvision +from torchvision import transforms +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +from sklearn.metrics import classification_report, confusion_matrix +from sklearn.svm import SVC +from sklearn.linear_model import LogisticRegression +from sklearn.naive_bayes import GaussianNB +import matplotlib +import matplotlib.pyplot as plt +from torch.optim.optimizer import required +from torch.utils.data.dataset import random_split +import scipy +import os +import glob +import pandas as pd +import pickle + + + +# Initialize settings +bs = 256 +weighting = 'paired' +trials = 10 + +# Load class-dataset list +classes = np.load('./results/classes.npy', allow_pickle=True) + +# Initialize test accuracy variable +score_test = np.zeros((len(classes), trials)) + + +for j, (t1, t2, ds) in enumerate(classes): + print(t1, t2, ds) + print('lda') + # Get correctly labeled and paired class datasets + trainloaders, validloaders, testloader = dataset_weighted_split_all(bs, t1, t2, weighting, trials, ds) + for i in range(trials): + print(j, i) + # Reassign datasets + X_train = trainloaders[i].dataset.tensors[0] + y_train = trainloaders[i].dataset.tensors[1] + X_test = testloader.dataset.tensors[0] + y_test = testloader.dataset.tensors[1] + + # initialize lda + lda = LinearDiscriminantAnalysis() + + # fit to images, labels + lda.fit(X_train, y_train) + + # see accuracy for validation set + score_test[j,i] = lda.score(X_test, y_test) + print(score_test[j,i]) + + # Save accuracy array + np.save('./results/lda_score_test.npy', score_test) diff --git a/results/classes.npy b/results/classes.npy new file mode 100644 index 0000000..bee1f75 Binary files /dev/null and b/results/classes.npy differ diff --git a/results/confused_pairs_all.npy b/results/confused_pairs_all.npy new file mode 100644 index 0000000..79a4d47 Binary files /dev/null and b/results/confused_pairs_all.npy differ diff --git a/results/confused_pairs_emnist_upper.npy b/results/confused_pairs_emnist_upper.npy new file mode 100644 index 0000000..b41a939 Binary files /dev/null and b/results/confused_pairs_emnist_upper.npy differ diff --git a/results/fc_acc.npy b/results/fc_acc.npy new file mode 100644 index 0000000..6098538 Binary files /dev/null and b/results/fc_acc.npy differ diff --git a/results/fc_loss.npy b/results/fc_loss.npy new file mode 100644 index 0000000..0e08405 Binary files /dev/null and b/results/fc_loss.npy differ diff --git a/results/fcnn_acc_0.npy b/results/fcnn_acc_0.npy new file mode 100644 index 0000000..816f537 Binary files /dev/null and b/results/fcnn_acc_0.npy differ diff --git a/results/fcnn_loss_0.npy b/results/fcnn_loss_0.npy new file mode 100644 index 0000000..0a1a44d Binary files /dev/null and b/results/fcnn_loss_0.npy differ diff --git a/results/ktree_acc_orig_0.npy b/results/ktree_acc_orig_0.npy new file mode 100644 index 0000000..68bbd97 Binary files /dev/null and b/results/ktree_acc_orig_0.npy differ diff --git a/results/ktree_acc_orig_2.npy b/results/ktree_acc_orig_2.npy new file mode 100644 index 0000000..d0c0bec Binary files /dev/null and b/results/ktree_acc_orig_2.npy differ diff --git a/results/ktree_acc_orig_3.npy b/results/ktree_acc_orig_3.npy new file mode 100644 index 0000000..9aed148 Binary files /dev/null and b/results/ktree_acc_orig_3.npy differ diff --git a/results/ktree_acc_perm_0.npy b/results/ktree_acc_perm_0.npy new file mode 100644 index 0000000..a971b2d Binary files /dev/null and b/results/ktree_acc_perm_0.npy differ diff --git a/results/ktree_acc_rand_0.npy b/results/ktree_acc_rand_0.npy new file mode 100644 index 0000000..0c02d17 Binary files /dev/null and b/results/ktree_acc_rand_0.npy differ diff --git a/results/ktree_loss_orig_0.npy b/results/ktree_loss_orig_0.npy new file mode 100644 index 0000000..b7e7c85 Binary files /dev/null and b/results/ktree_loss_orig_0.npy differ diff --git a/results/ktree_loss_orig_2.npy b/results/ktree_loss_orig_2.npy new file mode 100644 index 0000000..be45512 Binary files /dev/null and b/results/ktree_loss_orig_2.npy differ diff --git a/results/ktree_loss_orig_3.npy b/results/ktree_loss_orig_3.npy new file mode 100644 index 0000000..efcf716 Binary files /dev/null and b/results/ktree_loss_orig_3.npy differ diff --git a/results/ktree_loss_perm_0.npy b/results/ktree_loss_perm_0.npy new file mode 100644 index 0000000..4d9a54b Binary files /dev/null and b/results/ktree_loss_perm_0.npy differ diff --git a/results/ktree_loss_rand_0.npy b/results/ktree_loss_rand_0.npy new file mode 100644 index 0000000..9c62f7c Binary files /dev/null and b/results/ktree_loss_rand_0.npy differ diff --git a/results/lda_score_test.npy b/results/lda_score_test.npy new file mode 100644 index 0000000..eec3dad Binary files /dev/null and b/results/lda_score_test.npy differ