diff --git a/README.md b/README.md index 6f695f3..0e26fd1 100644 --- a/README.md +++ b/README.md @@ -108,9 +108,14 @@ To setup TensorBox and evaluate the model, follow instructions below: cd tensorbox python evaluate.py --weights data/save.ckpt-18000 --test_boxes data/testing_set.json - The evaluated image results are saved under `data/images_testing_set_18000`. + +To evaluate a custom image by uploading it from the desktop: + mv web_upload_eval.py ./tensorbox + +Go to the browser and type in `localhost:8000/draw/modelTest` + To use a custom testing dataset, add the image files to `data/images` and replace the testing_set.json with the custom test dataset. The custom dataset should have image_path field as `/images/` and the rects field will contain the true bounding box coordinates. Below is a solution for a potential error that might come up during TensorBox installation. The commands should be executed inside the `virtualenv`: diff --git a/src/custom_scripts/convertToRGB.py b/src/custom_scripts/convertToRGB.py new file mode 100644 index 0000000..71cfa3b --- /dev/null +++ b/src/custom_scripts/convertToRGB.py @@ -0,0 +1,23 @@ +from PIL import Image +import csv +import os +import sys, getopt + + +def main(argv): + + if len(argv) < 1: + print 'python convertToRGB.py ' + sys.exit(2) + else: + try: + file_path, file_name = os.path.split(argv[0]) + im = Image.open(argv[0]) + rgb_im = im.convert('RGB') + rgb_im.save(file_path+"/"+file_name.split(".")[0]+".jpeg") + except IOError: + print "Please enter a valid image" + sys.exit(2) + +if __name__ == "__main__": + main(sys.argv[1:]) \ No newline at end of file diff --git a/src/webapp/TrainImage_Annotate/Annotate/__init__.pyc b/src/webapp/TrainImage_Annotate/Annotate/__init__.pyc index c1e2aa9..a73606f 100644 Binary files a/src/webapp/TrainImage_Annotate/Annotate/__init__.pyc and b/src/webapp/TrainImage_Annotate/Annotate/__init__.pyc differ diff --git a/src/webapp/TrainImage_Annotate/Annotate/admin.pyc b/src/webapp/TrainImage_Annotate/Annotate/admin.pyc index 85f251c..6c4d492 100644 Binary files a/src/webapp/TrainImage_Annotate/Annotate/admin.pyc and b/src/webapp/TrainImage_Annotate/Annotate/admin.pyc differ diff --git a/src/webapp/TrainImage_Annotate/Annotate/migrations/__init__.pyc b/src/webapp/TrainImage_Annotate/Annotate/migrations/__init__.pyc index 44a0bce..e5b2991 100644 Binary files a/src/webapp/TrainImage_Annotate/Annotate/migrations/__init__.pyc and b/src/webapp/TrainImage_Annotate/Annotate/migrations/__init__.pyc differ diff --git a/src/webapp/TrainImage_Annotate/Annotate/templates/download.html b/src/webapp/TrainImage_Annotate/Annotate/templates/download.html index 07f0a49..511a5d2 100644 --- a/src/webapp/TrainImage_Annotate/Annotate/templates/download.html +++ b/src/webapp/TrainImage_Annotate/Annotate/templates/download.html @@ -9,17 +9,114 @@ - + + + + - - - -
-

Hello

- -
+ +
+
+
+

Skull Detection Tool

+
+
+
+ + +
+
+ cloud_upload +
+
+
+ +
+
+ your image +
+
+
+
+
+
+
+
+ +
+
+
+
+
+
+
+
+ + \ No newline at end of file diff --git a/src/webapp/TrainImage_Annotate/Annotate/urls.py b/src/webapp/TrainImage_Annotate/Annotate/urls.py index a3c4c82..dd3f640 100644 --- a/src/webapp/TrainImage_Annotate/Annotate/urls.py +++ b/src/webapp/TrainImage_Annotate/Annotate/urls.py @@ -10,4 +10,6 @@ url(r'^download/$', views.downloadImgs, name='downloadAnnotImages'), url(r'^downloadNMS/$', views.downloadImgsNMS, name='downloadAnnotImages'), url(r'^downloadall/$', views.downloadAll, name='downloadAllImages'), + url(r'^modelTest/$', views.modelTest, name='modelTest'), + url(r'^imageEval/$', views.imgEval, name='evalImg'), ] diff --git a/src/webapp/TrainImage_Annotate/Annotate/urls.pyc b/src/webapp/TrainImage_Annotate/Annotate/urls.pyc index aad7120..f68bf1d 100644 Binary files a/src/webapp/TrainImage_Annotate/Annotate/urls.pyc and b/src/webapp/TrainImage_Annotate/Annotate/urls.pyc differ diff --git a/src/webapp/TrainImage_Annotate/Annotate/views.py b/src/webapp/TrainImage_Annotate/Annotate/views.py index 1eec7b1..3b4206b 100644 --- a/src/webapp/TrainImage_Annotate/Annotate/views.py +++ b/src/webapp/TrainImage_Annotate/Annotate/views.py @@ -4,6 +4,7 @@ import re import csv import os +import sys import zipfile import StringIO from PIL import Image, ImageDraw @@ -11,6 +12,15 @@ import fnmatch import math +''' +The path where the web_upload_eval.py file is located an +export all the functions from the .py file +''' +my_file = os.path.exists(os.path.abspath("../../../tensorbox/")) +if my_file: + sys.path.append(os.path.abspath("../../../tensorbox/")) + from web_upload_eval import * + def prepareArrayNMS(filename): # prepares the array that is to be passed to the @@ -228,6 +238,51 @@ def non_max_suppression_slow(boxes, overlapThresh, areas=None): return boxes[pick].astype("int"), pick +@csrf_exempt +def modelTest(request): + return render(request, 'download.html', {'evalBool':False}) + +@csrf_exempt +def imgEval(request): + + ''' + + check if the form request made is a POST request; if it is POST + request, get the uploaded image data and the image file name and + upload it for evaluation with the ML model + ''' + if request.method == 'POST': + handle_uploaded_file(request.FILES['pic'], str(request.FILES['pic'])) + check_main() + return render(request, 'download.html', {'evalBool':True}) + return render(request, 'download.html', {'evalBool':False}) + +def handle_uploaded_file(file, filename): + + ''' + The image that is to be evaluated using the model is saved + with the name of 'image_test.jpeg' for easy finding and processing + of file by the evaluation file + ''' + # create media_1 folder if it does not exist + if not os.path.exists('Annotate/static/media_1/'): + os.mkdir('Annotate/static/media_1/') + # save te uploaded image to media_1 flder + with open('Annotate/static/media_1/image_test.jpeg', 'wb+') as destination: + for chunk in file.chunks(): + destination.write(chunk) + + # the path from where the model will take the input image + if not os.path.exists('uploadFils/'): + os.mkdir('uploadFils/') + + with open('uploadFils/image_test.jpeg', 'wb+') as destination: + for chunk in file.chunks(): + destination.write(chunk) + + + + def viewAnnotImgs(request): # lists all the training images without Non-Maximum Suppression diff --git a/src/webapp/TrainImage_Annotate/Annotate/views.pyc b/src/webapp/TrainImage_Annotate/Annotate/views.pyc index 47ec5de..a253519 100644 Binary files a/src/webapp/TrainImage_Annotate/Annotate/views.pyc and b/src/webapp/TrainImage_Annotate/Annotate/views.pyc differ diff --git a/src/webapp/TrainImage_Annotate/TrainImage_Annotate/__init__.pyc b/src/webapp/TrainImage_Annotate/TrainImage_Annotate/__init__.pyc index 3740a97..92d7b81 100644 Binary files a/src/webapp/TrainImage_Annotate/TrainImage_Annotate/__init__.pyc and b/src/webapp/TrainImage_Annotate/TrainImage_Annotate/__init__.pyc differ diff --git a/src/webapp/TrainImage_Annotate/TrainImage_Annotate/settings.pyc b/src/webapp/TrainImage_Annotate/TrainImage_Annotate/settings.pyc index b33eae6..21b1d58 100644 Binary files a/src/webapp/TrainImage_Annotate/TrainImage_Annotate/settings.pyc and b/src/webapp/TrainImage_Annotate/TrainImage_Annotate/settings.pyc differ diff --git a/src/webapp/TrainImage_Annotate/TrainImage_Annotate/urls.pyc b/src/webapp/TrainImage_Annotate/TrainImage_Annotate/urls.pyc index 1647805..8bcdb94 100644 Binary files a/src/webapp/TrainImage_Annotate/TrainImage_Annotate/urls.pyc and b/src/webapp/TrainImage_Annotate/TrainImage_Annotate/urls.pyc differ diff --git a/src/webapp/TrainImage_Annotate/TrainImage_Annotate/wsgi.pyc b/src/webapp/TrainImage_Annotate/TrainImage_Annotate/wsgi.pyc index 674679f..56d62ba 100644 Binary files a/src/webapp/TrainImage_Annotate/TrainImage_Annotate/wsgi.pyc and b/src/webapp/TrainImage_Annotate/TrainImage_Annotate/wsgi.pyc differ diff --git a/web_upload_eval.py b/web_upload_eval.py new file mode 100644 index 0000000..93b1e5e --- /dev/null +++ b/web_upload_eval.py @@ -0,0 +1,98 @@ +import tensorflow as tf +import os +import json +import subprocess +from scipy.misc import imread, imresize +from scipy import misc +from train import build_forward +from utils.annolist import AnnotationLib as al +from utils.train_utils import add_rectangles, rescale_boxes + +import cv2 +import argparse + +abs_path_model = os.path.abspath('../../../tensorbox/data') + +def get_image_dir(): + weights_iteration = int(os.path.abspath(abs_path_model+abs_path_model+'/save.ckpt-18000').split('-')[-1]) + expname = '_' + image_dir = '%s/images_%s_%d%s' % (os.path.dirname(abs_path_model+'/save.ckpt-18000'), os.path.basename(abs_path_model+'/testing_set_present.json')[:-5], weights_iteration, expname) + return image_dir + +def get_results(H): + tf.reset_default_graph() + x_in = tf.placeholder(tf.float32, name='x_in', shape=[H['image_height'], H['image_width'], 3]) + if H['use_rezoom']: + pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None) + grid_area = H['grid_height'] * H['grid_width'] + pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * H['rnn_len'], 2])), [grid_area, H['rnn_len'], 2]) + if H['reregress']: + pred_boxes = pred_boxes + pred_boxes_deltas + else: + pred_boxes, pred_logits, pred_confidences = build_forward(H, tf.expand_dims(x_in, 0), 'test', reuse=None) + saver = tf.train.Saver() + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + saver.restore(sess, abs_path_model+'/save.ckpt-18000') + + pred_annolist = al.AnnoList() + + true_annolist = al.parse(abs_path_model+'/testing_set_present.json') + data_dir = os.path.dirname(abs_path_model+'/testing_set_present.json') + image_dir = get_image_dir() + subprocess.call('mkdir -p %s' % image_dir, shell=True) + for i in range(len(true_annolist)): + true_anno = true_annolist[i] + orig_img = imread('../../../src/webapp/TrainImage_Annotate/uploadFils/image_test.jpeg') + img = imresize(orig_img, (H["image_height"], H["image_width"]), interp='cubic') + feed = {x_in: img} + (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed) + pred_anno = al.Annotation() + pred_anno.imageName = true_anno.imageName + new_img, rects = add_rectangles(H, [img], np_pred_confidences, np_pred_boxes, + use_stitching=True, rnn_len=H['rnn_len'], min_conf=0.2, tau=0.25, show_suppressed=False) + + pred_anno.rects = rects + pred_anno.imagePath = os.path.abspath(data_dir) + pred_anno = rescale_boxes((H["image_height"], H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1]) + pred_annolist.append(pred_anno) + + imname = '../../../src/webapp/TrainImage_Annotate/Annotate/static/media_1/imageTest.jpeg' + misc.imsave(imname, new_img) + if i % 25 == 0: + print(i) + return pred_annolist, true_annolist + +def check_main(): + + + os.environ['CUDA_VISIBLE_DEVICES'] = str(0) + hypes_file = abs_path_model+'/hypes.json' + + with open(hypes_file, 'r') as f: + H = json.load(f) + expname = '_' + pred_boxes = '%s.%s%s' % (abs_path_model+'/save.ckpt-18000', expname, os.path.basename(abs_path_model+'/testing_set_present.json')) + true_boxes = '%s.gt_%s%s' % (abs_path_model+'/save.ckpt-18000', expname, os.path.basename(abs_path_model+'/testing_set_present.json')) + + + pred_annolist, true_annolist = get_results(H) + pred_annolist.save(pred_boxes) + true_annolist.save(true_boxes) + + try: + rpc_cmd = './utils/annolist/doRPC.py --minOverlap %f %s %s' % (0.5, true_boxes, pred_boxes) + print('$ %s' % rpc_cmd) + rpc_output = subprocess.check_output(rpc_cmd, shell=True) + print(rpc_output) + txt_file = [line for line in rpc_output.split('\n') if line.strip()][-1] + output_png = '%s/results.png' % get_image_dir() + plot_cmd = './utils/annolist/plotSimple.py %s --output %s' % (txt_file, output_png) + print('$ %s' % plot_cmd) + plot_output = subprocess.check_output(plot_cmd, shell=True) + print('output results at: %s' % plot_output) + except Exception as e: + print(e) + +if __name__ == '__main__': + check_main()