Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

FixBug:Tensor is not an element of this graph. #11

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 65 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
absl-py==0.4.1
astor==0.7.1
beautifulsoup4==4.6.3
bokeh==0.13.0
certifi==2016.2.28
chardet==3.0.4
click==6.7
cloudpickle==0.5.3
cycler==0.10.0
cytoolz==0.9.0.1
dask==0.18.0
decorator==4.3.0
distributed==1.22.0
Flask==1.0.2
gast==0.2.0
grpcio==1.12.1
h5py==2.8.0
heapdict==1.0.0
idna==2.7
imageio==2.3.0
imutils==0.4.6
itsdangerous==0.24
Jinja2==2.10
Keras==2.2.4
Keras-Applications==1.0.6
Keras-Preprocessing==1.0.5
kiwisolver==1.0.1
locket==0.2.0
lxml==3.8.0
Markdown==2.6.11
MarkupSafe==1.0
matplotlib==2.2.3
mkl-fft==1.0.0
mkl-random==1.0.1
msgpack-python==0.5.6
networkx==2.1
numpy==1.14.5
olefile==0.45.1
opencv-python==3.4.3.18
packaging==17.1
pandas==0.23.1
partd==0.3.8
Pillow==5.2.0
protobuf==3.6.0
psutil==5.4.6
pyparsing==2.2.0
python-dateutil==2.7.3
pytz==2018.4
PyWavelets==0.5.2
PyYAML==3.12
qiniu==7.2.2
requests==2.19.1
scikit-image==0.13.1
scipy==1.1.0
six==1.11.0
sortedcontainers==2.0.4
tblib==1.3.2
tensorboard==1.10.0
tensorflow==1.10.0
termcolor==1.1.0
toolz==0.9.0
tornado==5.0.2
urllib3==1.23
Werkzeug==0.14.1
zict==0.1.3
13 changes: 11 additions & 2 deletions server.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@
import base64
import pickle

import tensorflow as tf
global graph,model
graph = tf.get_default_graph()

app = Flask(__name__)

def load_model(bin_dir):
Expand Down Expand Up @@ -95,8 +99,13 @@ def parseImage(imgData):
x /= 255

# Predict from model
out = model.predict(x)

with graph.as_default():
out = model.predict(x)
# print('np.argmax(out, axis=1)[1]:'+np.argmax(out, axis=1)[1])
# print('np.argmax(out, axis=1)[0]:'+int(np.argmax(out, axis=1)[0])
print("output:"+str(out[:]))
print(str(np.argmax(out, axis=1)[:]))
print(str(mapping))
# Generate response
response = {'prediction': chr(mapping[(int(np.argmax(out, axis=1)[0]))]),
'confidence': str(max(out[0]) * 100)[:6]}
Expand Down
Binary file added simple/emnist-letters.mat
Binary file not shown.
16 changes: 8 additions & 8 deletions templates/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,21 @@
<html>
<head>
<meta charset="UTF-8">
<title>Handwritten Digit Recognition using Convolutional Neural Networks</title>
<title>@rcooper // github @coopss </title>
<title>使用卷积神经网络( Convolutional Neural Networks )实现的手写字母识别</title>
<title> </title>
<link rel="stylesheet" href="{{url_for('static', filename='style.css')}}">
</head>

<body>
<h1>Alphanumeric Character Recognition using a 2-D Convolutional Neural Network</h1>
<h1> 使用卷积神经网络(2-D Convolutional Neural Networks )实现的手写字母识别 </h1>

<div class="centered">
<canvas id="canvas" width="280" height="280"></canvas>
</div>

<div class="centered">
<input type="button" class="predict_button" value="Predict" style="">
<input type="button" id="clear_button" value="Clear" style="">
<input type="button" class="predict_button" value="预测" style="">
<input type="button" id="clear_button" value="清空" style="">
</div>

<div class="centered">
Expand All @@ -29,7 +29,7 @@ <h1 id="confidence"></h1>
<script src="{{url_for('static',filename='index.js')}}"></script>
<script type="text/javascript">
$(".predict_button").click(function(){
$('#result').text(' Predicting...');
$('#result').text(' 预测中...');
var $SCRIPT_ROOT = {{request.script_root|tojson|safe}};
var canvasObj = document.getElementById("canvas");
var img = canvasObj.toDataURL('image/png');
Expand All @@ -38,8 +38,8 @@ <h1 id="confidence"></h1>
url: $SCRIPT_ROOT + "/predict/",
data: img,
success: function(data){
$('#result').text('Predicted Output: ' + data.prediction);
$('#confidence').text('Confidence: ' + data.confidence + '%');
$('#result').text('预测结果: ' + data.prediction);
$('#confidence').text('置信度: ' + data.confidence + '%');
}
});
});
Expand Down
88 changes: 74 additions & 14 deletions training.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
# Mute tensorflow debugging information console
import os
from collections import defaultdict

import numpy
from numpy.random import random

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

from keras.layers import Conv2D, MaxPooling2D, Convolution2D, Dropout, Dense, Flatten, LSTM
Expand All @@ -11,6 +16,7 @@
import keras
import numpy as np


def load_data(mat_file_path, width=28, height=28, max_=None, verbose=True):
''' Load data in from .mat file as specified by the paper.

Expand All @@ -29,6 +35,7 @@ def load_data(mat_file_path, width=28, height=28, max_=None, verbose=True):
- ((training_images, training_labels), (testing_images, testing_labels), mapping)

'''

# Local functions
def rotate(img):
# Used to rotate images (for some reason they are transposed on read-in)
Expand All @@ -50,13 +57,18 @@ def display(img, threshold=0.5):
# Load convoluted list structure form loadmat
mat = loadmat(mat_file_path)

# 只提取前6个字母作为训练
training_types = 6

# Load char mapping
mapping = {kv[0]:kv[1:][0] for kv in mat['dataset'][0][0][2]}
pickle.dump(mapping, open('bin/mapping.p', 'wb' ))
mapping = {kv[0]: kv[1:][0] for kv in mat['dataset'][0][0][2][0:training_types]}
pickle.dump(mapping, open('bin/mapping.p', 'wb'))

# Load training data
if max_ == None:
# max_ = 6
max_ = len(mat['dataset'][0][0][0][0][0][0])

training_images = mat['dataset'][0][0][0][0][0][0][:max_].reshape(max_, height, width, 1)
training_labels = mat['dataset'][0][0][0][0][0][1][:max_]

Expand All @@ -65,20 +77,23 @@ def display(img, threshold=0.5):
max_ = len(mat['dataset'][0][0][1][0][0][0])
else:
max_ = int(max_ / 6)
max_ = int((max_ / 26) * training_types)
testing_images = mat['dataset'][0][0][1][0][0][0][:max_].reshape(max_, height, width, 1)
testing_labels = mat['dataset'][0][0][1][0][0][1][:max_]

# Reshape training data to be valid
if verbose == True: _len = len(training_images)
for i in range(len(training_images)):
if verbose == True: print('%d/%d (%.2lf%%)' % (i + 1, _len, ((i + 1)/_len) * 100), end='\r')
if verbose == True: print('%d/%d (%.2lf%%)' % (i + 1, _len, ((i + 1) / _len) * 100),
end='\r')
training_images[i] = rotate(training_images[i])
if verbose == True: print('')

# Reshape testing data to be valid
if verbose == True: _len = len(testing_images)
for i in range(len(testing_images)):
if verbose == True: print('%d/%d (%.2lf%%)' % (i + 1, _len, ((i + 1)/_len) * 100), end='\r')
if verbose == True: print('%d/%d (%.2lf%%)' % (i + 1, _len, ((i + 1) / _len) * 100),
end='\r')
testing_images[i] = rotate(testing_images[i])
if verbose == True: print('')

Expand All @@ -90,9 +105,47 @@ def display(img, threshold=0.5):
training_images /= 255
testing_images /= 255

# 生成只含有ABCDEF的训练样本
new_training_images, new_training_labels = \
generate_new_train_data(training_images,
training_labels,
training_types)

nb_classes = len(mapping)
return (
(new_training_images, new_training_labels), (testing_images, testing_labels), mapping,
nb_classes)


def generate_new_train_data(training_images, training_labels, training_types):
trains = defaultdict(list)
for index, image in enumerate(training_images):
image_value = tuple(image)
index_key = int(training_labels[index])
if image_value and 0 < index_key <= training_types:
trains[index_key].append(image_value)
if index_key > 26:
print(trains[index])
# sorted(trains.keys())
print(len(trains))
new_training_images = []
new_training_labels = []
for k, values in trains.items():
for value in values:
new_training_images.append(value)
new_training_labels.append([k])
new_training_labels = np.array(new_training_labels, dtype=np.uint8)
new_training_images = np.array(new_training_images, dtype=np.float32)
shuffle_in_unison_scary(new_training_images, new_training_labels)
return new_training_images, new_training_labels


def shuffle_in_unison_scary(a, b):
rng_state = numpy.random.get_state()
numpy.random.shuffle(a)
numpy.random.set_state(rng_state)
numpy.random.shuffle(b)

return ((training_images, training_labels), (testing_images, testing_labels), mapping, nb_classes)

def build_net(training_data, width=28, height=28, verbose=False):
''' Build and train neural network. Also offloads the net in .yaml and the
Expand All @@ -112,9 +165,9 @@ def build_net(training_data, width=28, height=28, verbose=False):
input_shape = (height, width, 1)

# Hyperparameters
nb_filters = 32 # number of convolutional filters to use
pool_size = (2, 2) # size of pooling area for max pooling
kernel_size = (3, 3) # convolution kernel size
nb_filters = 32 # number of convolutional filters to use
pool_size = (2, 2) # size of pooling area for max pooling
kernel_size = (3, 3) # convolution kernel size

model = Sequential()
model.add(Convolution2D(nb_filters,
Expand All @@ -141,16 +194,20 @@ def build_net(training_data, width=28, height=28, verbose=False):
if verbose == True: print(model.summary())
return model


def train(model, training_data, callback=True, batch_size=256, epochs=10):
(x_train, y_train), (x_test, y_test), mapping, nb_classes = training_data

print(y_train)
print(y_test)
print(nb_classes)
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
y_train = np_utils.to_categorical(y_train - 1, nb_classes)
y_test = np_utils.to_categorical(y_test - 1, nb_classes)

if callback == True:
# Callback for analysis in TensorBoard
tbCallBack = keras.callbacks.TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)
tbCallBack = keras.callbacks.TensorBoard(log_dir='./Graph', histogram_freq=0,
write_graph=True, write_images=True)

model.fit(x_train, y_train,
batch_size=batch_size,
Expand All @@ -169,20 +226,23 @@ def train(model, training_data, callback=True, batch_size=256, epochs=10):
yaml_file.write(model_yaml)
save_model(model, 'bin/model.h5')


if __name__ == '__main__':
parser = argparse.ArgumentParser(usage='A training program for classifying the EMNIST dataset')
parser.add_argument('-f', '--file', type=str, help='Path .mat file data', required=True)
parser.add_argument('--width', type=int, default=28, help='Width of the images')
parser.add_argument('--height', type=int, default=28, help='Height of the images')
parser.add_argument('--max', type=int, default=None, help='Max amount of data to use')
parser.add_argument('--epochs', type=int, default=10, help='Number of epochs to train on')
parser.add_argument('--verbose', action='store_true', default=False, help='Enables verbose printing')
parser.add_argument('--verbose', action='store_true', default=False,
help='Enables verbose printing')
args = parser.parse_args()

bin_dir = os.path.dirname(os.path.realpath(__file__)) + '/bin'
if not os.path.exists(bin_dir):
os.makedirs(bin_dir)

training_data = load_data(args.file, width=args.width, height=args.height, max_=args.max, verbose=args.verbose)
training_data = load_data(args.file, width=args.width, height=args.height, max_=args.max,
verbose=args.verbose)
model = build_net(training_data, width=args.width, height=args.height, verbose=args.verbose)
train(model, training_data, epochs=args.epochs)