The value of knowledge lies not in possession, but in share.

0%

Keras快速教程

最近所接触的项目基本都是使用Keras及TensorFlow共同实现的,准确的说是使用Keras作为入口,使用TensorFlow作为后端来开展的神经网络相关模型的训练、测试及应用工作。

Keras是一个高层神经网络API,Keras由纯Python编写而成并基TensorflowTheano以及CNTK后端。Keras 为支持快速实验而生,能够把你的idea迅速转换为结果,如果你有如下需求,请选择Keras:

  • 简易和快速的原型设计(keras具有高度模块化,极简,和可扩充特性)
  • 支持CNN和RNN,或二者的结合
  • 无缝CPU和GPU切换

简单地说,好嗨呦,感觉人生已经到达了高潮就是快!

快速入门:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
from keras.models import Sequential
from keras.layers import Dense

# 构建模型
model = Sequential()
model.add(Dense(units=64, activation='relu', input_dim=100))
model.add(Dense(units=10, activation='softmax'))

# 配置优化器
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])

# 模型训练
model.fit(data_train, labels_train, epochs=5, batch_size=32)

# 模型性能评估
loss_and_metrics = model.evaluate(data_test, labels_test, batch_size=128)

# 对新的数据进行预测
classes = model.predict(data_test, batch_size=128)

项目示例

经过一段时间的项目开发,关于深度学习的目录构建推荐如下:

1
2
3
4
5
6
7
8
9
10
.
├── datasets
│   └── xxxx //数据集
├── models
│   ├── model-weights.h5 //保存的模型
│   └── pretrain_model
│   └── keras.h5 //预训练模型
├── modle.py //神经网络结构
├── train.py //训练的脚本
└── test.py //测试的脚本

关于model.py脚本内容示例如下 (不保证运行,下同):

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
# usr/bin/env python
# -*- coding:utf-8 -*-
from keras.models import Model
from keras.layers.core import Dense, Dropout, Activation, Reshape, Permute
from keras.layers.convolutional import Conv2D, Conv2DTranspose, ZeroPadding2D
from keras.layers.pooling import AveragePooling2D, GlobalAveragePooling2D
from keras.layers import Input, Flatten
from keras.layers.merge import concatenate
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras.layers.wrappers import TimeDistributed


def conv_block(input, growth_rate, dropout_rate=None, weight_decay=1e-4):
x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input)
x = Activation('relu')(x)
x = Conv2D(growth_rate, (3,3), kernel_initializer='he_normal', padding='same')(x)
if(dropout_rate):
x = Dropout(dropout_rate)(x)
return x

def dense_block(x, nb_layers, nb_filter, growth_rate, droput_rate=0.2, weight_decay=1e-4):
for i in range(nb_layers):
cb = conv_block(x, growth_rate, droput_rate, weight_decay)
x = concatenate([x, cb], axis=-1)
nb_filter += growth_rate
return x, nb_filter

def transition_block(input, nb_filter, dropout_rate=None, pooltype=1, weight_decay=1e-4):
x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input)
x = Activation('relu')(x)
x = Conv2D(nb_filter, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay))(x)

if(dropout_rate):
x = Dropout(dropout_rate)(x)

if(pooltype == 2):
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
elif(pooltype == 1):
x = ZeroPadding2D(padding = (0, 1))(x)
x = AveragePooling2D((2, 2), strides=(2, 1))(x)
elif(pooltype == 3):
x = AveragePooling2D((2, 2), strides=(2, 1))(x)
return x, nb_filter

def dense_cnn(input, nclass):

_dropout_rate = 0.2
_weight_decay = 1e-4

_nb_filter = 64
# conv 64 5*5 s=2
x = Conv2D(_nb_filter, (5, 5), strides=(2, 2), kernel_initializer='he_normal', padding='same',
use_bias=False, kernel_regularizer=l2(_weight_decay))(input)

# 64 + 8 * 8 = 128
x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
# 128
x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

# 128 + 8 * 8 = 192
x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)
# 192 -> 128
x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

# 128 + 8 * 8 = 192
x, _nb_filter = dense_block(x, 8, _nb_filter, 8, None, _weight_decay)

x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
x = Activation('relu')(x)

x = Permute((2, 1, 3), name='permute')(x)
x = TimeDistributed(Flatten(), name='flatten')(x)
y_pred = Dense(nclass, name='out', activation='softmax')(x)

# basemodel = Model(inputs=input, outputs=y_pred)
# basemodel.summary()

return y_pred

def dense_blstm(input):

pass

input = Input(shape=(32, 280, 1), name='the_input')
dense_cnn(input, 5000)

关于train.py脚本内容示例如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
# usr/bin/env python
#-*- coding:utf-8 -*-
import os
import json
import threading
import numpy as np
from PIL import Image

import tensorflow as tf
from keras import losses
from keras import backend as K
from keras.utils import plot_model
from keras.preprocessing import image
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Input, Dense, Flatten
from keras.layers.core import Reshape, Masking, Lambda, Permute
from keras.layers.recurrent import GRU, LSTM
from keras.layers.wrappers import Bidirectional, TimeDistributed
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, Adam
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler, TensorBoard

from imp import reload
import model
import io
import sys
reload(sys)
#sys.setdefaultencoding('utf-8')

img_h = 32
img_w = 280
batch_size = 128
maxlabellength = 10

def get_session(gpu_fraction=1.0):

num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)

if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

def readfile(filename):
res = []
with open(filename, 'r') as f:
lines = f.readlines()
for i in lines:
res.append(i.strip())
dic = {}
for i in res:
p = i.split(' ')
dic[p[0]] = p[1:]
return dic

class random_uniform_num():
"""
均匀随机,确保每轮每个只出现一次
"""
def __init__(self, total):
self.total = total
self.range = [i for i in range(total)]
np.random.shuffle(self.range)
self.index = 0
def get(self, batchsize):
r_n=[]
if(self.index + batchsize > self.total):
r_n_1 = self.range[self.index:self.total]
np.random.shuffle(self.range)
self.index = (self.index + batchsize) - self.total
r_n_2 = self.range[0:self.index]
r_n.extend(r_n_1)
r_n.extend(r_n_2)
else:
r_n = self.range[self.index : self.index + batchsize]
self.index = self.index + batchsize

return r_n

def gen(data_file, image_path, batchsize=128, maxlabellength=10, imagesize=(32, 280)):
image_label = readfile(data_file)
_imagefile = [i for i, j in image_label.items()]
x = np.zeros((batchsize, imagesize[0], imagesize[1], 1), dtype=np.float)
labels = np.ones([batchsize, maxlabellength]) * 10000
input_length = np.zeros([batchsize, 1])
label_length = np.zeros([batchsize, 1])

r_n = random_uniform_num(len(_imagefile))
_imagefile = np.array(_imagefile)
while 1:
shufimagefile = _imagefile[r_n.get(batchsize)]
for i, j in enumerate(shufimagefile):
img1 = Image.open(os.path.join(image_path, j)).convert('L')
img = np.array(img1, 'f') / 255.0 - 0.5

x[i] = np.expand_dims(img, axis=2)
# print('imag:shape', img.shape)
str = image_label[j]
label_length[i] = len(str)

if(len(str) <= 0):
print("len < 0", j)
input_length[i] = imagesize[1] // 8
labels[i, :len(str)] = [int(k) - 1 for k in str]

inputs = {'the_input': x,
'the_labels': labels,
'input_length': input_length,
'label_length': label_length,
}
outputs = {'ctc': np.zeros([batchsize])}
yield (inputs, outputs)

def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)

def get_model(img_h, nclass):
input = Input(shape=(img_h, None, 1), name='the_input')
y_pred = model.dense_cnn(input, nclass)

basemodel = Model(inputs=input, outputs=y_pred)
basemodel.summary()

labels = Input(name='the_labels', shape=[None], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')

loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])

model = Model(inputs=[input, labels, input_length, label_length], outputs=loss_out)
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer='adam', metrics=['accuracy'])

return basemodel, model


if __name__ == '__main__':
char_set = io.open('char_std_5990.txt', 'r', encoding='utf-8').readlines()
char_set = ''.join([ch.strip('\n') for ch in char_set][1:])
nclass = len(char_set)

K.set_session(get_session())
reload(model)
basemodel, model = get_model(img_h, nclass)

modelPath = './models/pretrain_model/keras.h5'
if os.path.exists(modelPath):
print("Loading model weights...")
basemodel.load_weights(modelPath)
print('done!')

train_loader = gen('data_train.txt', './images', batchsize=batch_size, maxlabellength=maxlabellength, imagesize=(img_h, img_w))
test_loader = gen('data_test.txt', './images', batchsize=batch_size, maxlabellength=maxlabellength, imagesize=(img_h, img_w))

checkpoint = ModelCheckpoint(filepath='./models/model-weights-{epoch:02d}-{val_loss:.2f}.h5', monitor='val_loss', save_best_only=False, save_weights_only=True)
lr_schedule = lambda epoch: 0.0005 * 0.4**epoch
learning_rate = np.array([lr_schedule(i) for i in range(10)])
changelr = LearningRateScheduler(lambda epoch: float(learning_rate[epoch]))
earlystop = EarlyStopping(monitor='val_loss', patience=2, verbose=1)
tensorboard = TensorBoard(log_dir='./models/logs', write_graph=True)

print('-----------Start training-----------')
model.fit_generator(train_loader,
steps_per_epoch = 3607567 // batch_size,
epochs = 10,
initial_epoch = 0,
validation_data = test_loader,
validation_steps = 36440 // batch_size,
callbacks = [checkpoint, earlystop, changelr, tensorboard])

test.py脚本内容示例如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
# usr/bin/env python
#-*- coding:utf-8 -*-
import os
import numpy as np
from imp import reload
from PIL import Image, ImageOps

from keras.layers import Input
from keras.models import Model
# import keras.backend as K

from . import model

reload(model)

characters = keys.alphabet[:]
characters = characters[1:]
nclass = len(characters)

input = Input(shape=(32, None, 1), name='the_input')
y_pred= model.dense_cnn(input, nclass)
basemodel = Model(inputs=input, outputs=y_pred)

modelPath = os.path.join(os.getcwd(), '.models/weights-model.h5')
if os.path.exists(modelPath):
basemodel.load_weights(modelPath)

def decode(pred):
char_list = []
pred_text = pred.argmax(axis=2)[0]
for i in range(len(pred_text)):
if pred_text[i] != nclass - 1 and ((not (i > 0 and pred_text[i] == pred_text[i - 1])) or (i > 1 and pred_text[i] == pred_text[i - 2])):
char_list.append(characters[pred_text[i]])
return u''.join(char_list)

def predict(img):
width, height = img.size[0], img.size[1]
scale = height * 1.0 / 32
width = int(width / scale)

img = img.resize([width, 32], Image.ANTIALIAS)
img = np.array(img).astype(np.float32) / 255.0 - 0.5

X = img.reshape([1, 32, width, 1])

y_pred = basemodel.predict(X)
y_pred = y_pred[:, :, :]

# out = K.get_value(K.ctc_decode(y_pred, input_length=np.ones(y_pred.shape[0]) * y_pred.shape[1])[0][0])[:, :]
# out = u''.join([characters[x] for x in out[0]])
out = decode(y_pred)

return out

🍭支持一根棒棒糖吧!