用 Keras 功能 API 进行深度学习
顺序API和功能API之间的区别。 如何使用功能性API定义简单的多层感知器,卷积神经网络和递归神经网络模型。 如何定义具有共享层和多个输入和输出的更复杂的模型。
Keras顺序模型 Keras功能模型 标准网络模型 共享层模型 多种输入和输出模型 最佳实践 新增:关于Functional API Python语法的说明
from keras.models import Sequential
from keras.layers import Dense
model = Sequential([Dense(2, input_dim=1), Dense(1)])
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(2, input_dim=1))
model.add(Dense(1))
(2,)
时,形状元组始终以最后一个悬挂尺寸定义,例如:from keras.layers import Input
visible = Input(shape=(2,))
from keras.layers import Input
from keras.layers import Dense
visible = Input(shape=(2,))
hidden = Dense(2)(visible)
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
visible = Input(shape=(2,))
hidden = Dense(2)(visible)
model = Model(inputs=visible, outputs=hidden)
多层感知器
# Multilayer Perceptron
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
visible = Input(shape=(10,))
hidden1 = Dense(10, activation='relu')(visible)
hidden2 = Dense(20, activation='relu')(hidden1)
hidden3 = Dense(10, activation='relu')(hidden2)
output = Dense(1, activation='sigmoid')(hidden3)
model = Model(inputs=visible, outputs=output)
# summarize layers
print(model.summary())
# plot graph
plot_model(model, to_file='multilayer_perceptron_graph.png')
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 10) 0
_________________________________________________________________
dense_1 (Dense) (None, 10) 110
_________________________________________________________________
dense_2 (Dense) (None, 20) 220
_________________________________________________________________
dense_3 (Dense) (None, 10) 210
_________________________________________________________________
dense_4 (Dense) (None, 1) 11
=================================================================
Total params: 551
Trainable params: 551
Non-trainable params: 0
_________________________________________________________________
卷积神经网络
# Convolutional Neural Network
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
visible = Input(shape=(64,64,1))
conv1 = Conv2D(32, kernel_size=4, activation='relu')(visible)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(16, kernel_size=4, activation='relu')(pool1)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
flat = Flatten()(pool2)
hidden1 = Dense(10, activation='relu')(flat)
output = Dense(1, activation='sigmoid')(hidden1)
model = Model(inputs=visible, outputs=output)
# summarize layers
print(model.summary())
# plot graph
plot_model(model, to_file='convolutional_neural_network.png')
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 64, 64, 1) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 61, 61, 32) 544
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 30, 30, 32) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 27, 27, 16) 8208
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 13, 13, 16) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 2704) 0
_________________________________________________________________
dense_1 (Dense) (None, 10) 27050
_________________________________________________________________
dense_2 (Dense) (None, 1) 11
=================================================================
Total params: 35,813
Trainable params: 35,813
Non-trainable params: 0
_________________________________________________________________
递归神经网络
# Recurrent Neural Network
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers.recurrent import LSTM
visible = Input(shape=(100,1))
hidden1 = LSTM(10)(visible)
hidden2 = Dense(10, activation='relu')(hidden1)
output = Dense(1, activation='sigmoid')(hidden2)
model = Model(inputs=visible, outputs=output)
# summarize layers
print(model.summary())
# plot graph
plot_model(model, to_file='recurrent_neural_network.png')
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 100, 1) 0
_________________________________________________________________
lstm_1 (LSTM) (None, 10) 480
_________________________________________________________________
dense_1 (Dense) (None, 10) 110
_________________________________________________________________
dense_2 (Dense) (None, 1) 11
=================================================================
Total params: 601
Trainable params: 601
Non-trainable params: 0
_________________________________________________________________
共享输入层
# Shared Input Layer
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
# input layer
visible = Input(shape=(64,64,1))
# first feature extractor
conv1 = Conv2D(32, kernel_size=4, activation='relu')(visible)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
flat1 = Flatten()(pool1)
# second feature extractor
conv2 = Conv2D(16, kernel_size=8, activation='relu')(visible)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
flat2 = Flatten()(pool2)
# merge feature extractors
merge = concatenate([flat1, flat2])
# interpretation layer
hidden1 = Dense(10, activation='relu')(merge)
# prediction output
output = Dense(1, activation='sigmoid')(hidden1)
model = Model(inputs=visible, outputs=output)
# summarize layers
print(model.summary())
# plot graph
plot_model(model, to_file='shared_input_layer.png')
____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
input_1 (InputLayer) (None, 64, 64, 1) 0
____________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 61, 61, 32) 544 input_1[0][0]
____________________________________________________________________________________________________
conv2d_2 (Conv2D) (None, 57, 57, 16) 1040 input_1[0][0]
____________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 30, 30, 32) 0 conv2d_1[0][0]
____________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D) (None, 28, 28, 16) 0 conv2d_2[0][0]
____________________________________________________________________________________________________
flatten_1 (Flatten) (None, 28800) 0 max_pooling2d_1[0][0]
____________________________________________________________________________________________________
flatten_2 (Flatten) (None, 12544) 0 max_pooling2d_2[0][0]
____________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 41344) 0 flatten_1[0][0]
flatten_2[0][0]
____________________________________________________________________________________________________
dense_1 (Dense) (None, 10) 413450 concatenate_1[0][0]
____________________________________________________________________________________________________
dense_2 (Dense) (None, 1) 11 dense_1[0][0]
====================================================================================================
Total params: 415,045
Trainable params: 415,045
Non-trainable params: 0
____________________________________________________________________________________________________
共享特征提取层
# Shared Feature Extraction Layer
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers.recurrent import LSTM
from keras.layers.merge import concatenate
# define input
visible = Input(shape=(100,1))
# feature extraction
extract1 = LSTM(10)(visible)
# first interpretation model
interp1 = Dense(10, activation='relu')(extract1)
# second interpretation model
interp11 = Dense(10, activation='relu')(extract1)
interp12 = Dense(20, activation='relu')(interp11)
interp13 = Dense(10, activation='relu')(interp12)
# merge interpretation
merge = concatenate([interp1, interp13])
# output
output = Dense(1, activation='sigmoid')(merge)
model = Model(inputs=visible, outputs=output)
# summarize layers
print(model.summary())
# plot graph
plot_model(model, to_file='shared_feature_extractor.png')
____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
input_1 (InputLayer) (None, 100, 1) 0
____________________________________________________________________________________________________
lstm_1 (LSTM) (None, 10) 480 input_1[0][0]
____________________________________________________________________________________________________
dense_2 (Dense) (None, 10) 110 lstm_1[0][0]
____________________________________________________________________________________________________
dense_3 (Dense) (None, 20) 220 dense_2[0][0]
____________________________________________________________________________________________________
dense_1 (Dense) (None, 10) 110 lstm_1[0][0]
____________________________________________________________________________________________________
dense_4 (Dense) (None, 10) 210 dense_3[0][0]
____________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 20) 0 dense_1[0][0]
dense_4[0][0]
____________________________________________________________________________________________________
dense_5 (Dense) (None, 1) 21 concatenate_1[0][0]
====================================================================================================
Total params: 1,151
Trainable params: 1,151
Non-trainable params: 0
____________________________________________________________________________________________________
多输入模型
Model()
实例时,我们将两个输入层定义为一个数组。特别:model = Model(inputs=[visible1, visible2], outputs=output)
# Multiple Inputs
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
# first input model
visible1 = Input(shape=(64,64,1))
conv11 = Conv2D(32, kernel_size=4, activation='relu')(visible1)
pool11 = MaxPooling2D(pool_size=(2, 2))(conv11)
conv12 = Conv2D(16, kernel_size=4, activation='relu')(pool11)
pool12 = MaxPooling2D(pool_size=(2, 2))(conv12)
flat1 = Flatten()(pool12)
# second input model
visible2 = Input(shape=(32,32,3))
conv21 = Conv2D(32, kernel_size=4, activation='relu')(visible2)
pool21 = MaxPooling2D(pool_size=(2, 2))(conv21)
conv22 = Conv2D(16, kernel_size=4, activation='relu')(pool21)
pool22 = MaxPooling2D(pool_size=(2, 2))(conv22)
flat2 = Flatten()(pool22)
# merge input models
merge = concatenate([flat1, flat2])
# interpretation model
hidden1 = Dense(10, activation='relu')(merge)
hidden2 = Dense(10, activation='relu')(hidden1)
output = Dense(1, activation='sigmoid')(hidden2)
model = Model(inputs=[visible1, visible2], outputs=output)
# summarize layers
print(model.summary())
# plot graph
plot_model(model, to_file='multiple_inputs.png')
____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
input_1 (InputLayer) (None, 64, 64, 1) 0
____________________________________________________________________________________________________
input_2 (InputLayer) (None, 32, 32, 3) 0
____________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 61, 61, 32) 544 input_1[0][0]
____________________________________________________________________________________________________
conv2d_3 (Conv2D) (None, 29, 29, 32) 1568 input_2[0][0]
____________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 30, 30, 32) 0 conv2d_1[0][0]
____________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D) (None, 14, 14, 32) 0 conv2d_3[0][0]
____________________________________________________________________________________________________
conv2d_2 (Conv2D) (None, 27, 27, 16) 8208 max_pooling2d_1[0][0]
____________________________________________________________________________________________________
conv2d_4 (Conv2D) (None, 11, 11, 16) 8208 max_pooling2d_3[0][0]
____________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D) (None, 13, 13, 16) 0 conv2d_2[0][0]
____________________________________________________________________________________________________
max_pooling2d_4 (MaxPooling2D) (None, 5, 5, 16) 0 conv2d_4[0][0]
____________________________________________________________________________________________________
flatten_1 (Flatten) (None, 2704) 0 max_pooling2d_2[0][0]
____________________________________________________________________________________________________
flatten_2 (Flatten) (None, 400) 0 max_pooling2d_4[0][0]
____________________________________________________________________________________________________
concatenate_1 (Concatenate) (None, 3104) 0 flatten_1[0][0]
flatten_2[0][0]
____________________________________________________________________________________________________
dense_1 (Dense) (None, 10) 31050 concatenate_1[0][0]
____________________________________________________________________________________________________
dense_2 (Dense) (None, 10) 110 dense_1[0][0]
____________________________________________________________________________________________________
dense_3 (Dense) (None, 1) 11 dense_2[0][0]
====================================================================================================
Total params: 49,699
Trainable params: 49,699
Non-trainable params: 0
____________________________________________________________________________________________________
多输出模型
# Multiple Outputs
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers.recurrent import LSTM
from keras.layers.wrappers import TimeDistributed
# input layer
visible = Input(shape=(100,1))
# feature extraction
extract = LSTM(10, return_sequences=True)(visible)
# classification output
class11 = LSTM(10)(extract)
class12 = Dense(10, activation='relu')(class11)
output1 = Dense(1, activation='sigmoid')(class12)
# sequence output
output2 = TimeDistributed(Dense(1, activation='linear'))(extract)
# output
model = Model(inputs=visible, outputs=[output1, output2])
# summarize layers
print(model.summary())
# plot graph
plot_model(model, to_file='multiple_outputs.png')
____________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
====================================================================================================
input_1 (InputLayer) (None, 100, 1) 0
____________________________________________________________________________________________________
lstm_1 (LSTM) (None, 100, 10) 480 input_1[0][0]
____________________________________________________________________________________________________
lstm_2 (LSTM) (None, 10) 840 lstm_1[0][0]
____________________________________________________________________________________________________
dense_1 (Dense) (None, 10) 110 lstm_2[0][0]
____________________________________________________________________________________________________
dense_2 (Dense) (None, 1) 11 dense_1[0][0]
____________________________________________________________________________________________________
time_distributed_1 (TimeDistribu (None, 100, 1) 11 lstm_1[0][0]
====================================================================================================
Total params: 1,452
Trainable params: 1,452
Non-trainable params: 0
____________________________________________________________________________________________________
一致的变量名。对输入(可见)层和输出层(输出)甚至隐藏层( hidden1
,hidden2
)使用相同的变量名。它将有助于正确地将事物连接在一起。查看图层摘要。始终打印模型摘要并查看图层输出,以确保模型按预期连接在一起。 查看图形图。始终创建模型图的图并进行检查,以确保将所有内容按预期组合在一起。 命名图层。您可以为查看模型图的摘要和绘图时使用的图层分配名称。例如: Dense(1,name =’hidden1')
。单独的子模型。考虑分离出子模型的开发,最后将子模型组合在一起。
作者:沂水寒城,CSDN博客专家,个人研究方向:机器学习、深度学习、NLP、CV
Blog: http://yishuihancheng.blog.csdn.net
赞 赏 作 者
更多阅读
特别推荐
点击下方阅读原文加入社区会员
评论