时间:2021-05-22
我就废话不多说了,大家还是直接看代码吧~
'''Created on 2018-4-16'''import kerasfrom keras.models import Sequentialfrom keras.layers import Densefrom keras.models import Modelfrom keras.callbacks import ModelCheckpoint,Callbackimport numpy as npimport tflearnimport tflearn.datasets.mnist as mnistx_train, y_train, x_test, y_test = mnist.load_data(one_hot=True)x_valid = x_test[:5000]y_valid = y_test[:5000]x_test = x_test[5000:]y_test = y_test[5000:]print(x_valid.shape)print(x_test.shape)model = Sequential()model.add(Dense(units=64, activation='relu', input_dim=784))model.add(Dense(units=10, activation='softmax'))model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])filepath = 'D:\\machineTest\\model-ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5'# filepath = 'D:\\machineTest\\model-ep{epoch:03d}-loss{loss:.3f}.h5'checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')print(model.get_config())# [{'class_name': 'Dense', 'config': {'bias_regularizer': None, 'use_bias': True, 'kernel_regularizer': None, 'batch_input_shape': (None, 784), 'trainable': True, 'kernel_constraint': None, 'bias_constraint': None, 'kernel_initializer': {'class_name': 'VarianceScaling', 'config': {'scale': 1.0, 'distribution': 'uniform', 'mode': 'fan_avg', 'seed': None}}, 'activity_regularizer': None, 'units': 64, 'dtype': 'float32', 'bias_initializer': {'class_name': 'Zeros', 'config': {}}, 'activation': 'relu', 'name': 'dense_1'}}, {'class_name': 'Dense', 'config': {'bias_regularizer': None, 'use_bias': True, 'kernel_regularizer': None, 'bias_initializer': {'class_name': 'Zeros', 'config': {}}, 'kernel_constraint': None, 'bias_constraint': None, 'kernel_initializer': {'class_name': 'VarianceScaling', 'config': {'scale': 1.0, 'distribution': 'uniform', 'mode': 'fan_avg', 'seed': None}}, 'activity_regularizer': None, 'trainable': True, 'units': 10, 'activation': 'softmax', 'name': 'dense_2'}}]# model.fit(x_train, y_train, epochs=1, batch_size=128, callbacks=[checkpoint],validation_data=(x_valid, y_valid))model.fit(x_train, y_train, epochs=1,validation_data=(x_valid, y_valid),steps_per_epoch=10,validation_steps=1)# score = model.evaluate(x_test, y_test, batch_size=128)# print(score)# #获取模型结构状况# model.summary()# _________________________________________________________________# Layer (type) Output Shape Param # # =================================================================# dense_1 (Dense) (None, 64) 50240(784*64+64(b)) # _________________________________________________________________# dense_2 (Dense) (None, 10) 650(64*10 + 10 ) # =================================================================# #根据下标和名称返回层对象# layer = model.get_layer(index = 0)# 获取模型权重,设置权重model.set_weights()weights = np.array(model.get_weights())print(weights.shape)# (4,)权重由4部分组成print(weights[0].shape)# (784, 64)dense_1 w1print(weights[1].shape)# (64,)dense_1 b1print(weights[2].shape)# (64, 10)dense_2 w2print(weights[3].shape)# (10,)dense_2 b2# # 保存权重和加载权重# model.save_weights("D:\\xxx\\weights.h5")# model.load_weights("D:\\xxx\\weights.h5", by_name=False)#by_name=True,可以根据名字匹配和层载入权重# 查看中间结果,必须要先声明个函数式模型dense1_layer_model = Model(inputs=model.input,outputs=model.get_layer('dense_1').output)out = dense1_layer_model.predict(x_test)print(out.shape)# (5000, 64)# 如果是函数式模型,则可以直接输出# import keras# from keras.models import Model# from keras.callbacks import ModelCheckpoint,Callback# import numpy as np# from keras.layers import Input,Conv2D,MaxPooling2D# import cv2# # image = cv2.imread("D:\\machineTest\\falali.jpg")# print(image.shape)# cv2.imshow("1",image)# # # 第一层conv# image = image.reshape([-1, 386, 580, 3])# img_input = Input(shape=(386, 580, 3))# x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)# x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)# x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)# model = Model(inputs=img_input, outputs=x)# out = model.predict(image)# print(out.shape)# out = out.reshape(193, 290,64)# image_conv1 = out[:,:,1].reshape(193, 290)# image_conv2 = out[:,:,20].reshape(193, 290)# image_conv3 = out[:,:,40].reshape(193, 290)# image_conv4 = out[:,:,60].reshape(193, 290)# cv2.imshow("conv1",image_conv1)# cv2.imshow("conv2",image_conv2)# cv2.imshow("conv3",image_conv3)# cv2.imshow("conv4",image_conv4)# cv2.waitKey(0)中间结果输出可以查看conv过之后的图像:
原始图像:
经过一层conv以后,输出其中4张图片:
以上这篇keras 模型参数,模型保存,中间结果输出操作就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持。
声明:本页内容来源网络,仅供用户参考;我单位不保证亦不表示资料全面及准确无误,也不保证亦不表示这些资料为最新信息,如因任何原因,本网内容或者用户因倚赖本网内容造成任何损失或损害,我单位将不会负任何法律责任。如涉及版权问题,请提交至online#300.cn邮箱联系删除。
今天做了一个关于keras保存模型的实验,希望有助于大家了解keras保存模型的区别。我们知道keras的模型一般保存为后缀名为h5的文件,比如final_mo
pytorch输出中间层特征:tensorflow输出中间特征,2种方式:1.保存全部模型(包括结构)时,需要之前先add_to_collection或者用sl
1、只保存最佳的训练模型2、保存有所有有提升的模型3、加载模型4、参数说明只保存最佳的训练模型fromkeras.callbacksimportModelChe
如何将训练好的网络进行保存,我们可以用pickle或cPickle来保存Keras模型,同时我们可以用下面的方法:一、保存整个模型model.save(file
前提:模型参数和结构是分别保存的1、构建模型(#loadmodelgraph)model=MODEL()2、加载模型参数(#loadmodelstate_dic