时间:2021-05-23
keras模型可视化:
model:model = Sequential()# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.# this applies 32 convolution filters of size 3x3 each.model.add(ZeroPadding2D((1,1), input_shape=(38, 38, 1)))model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))# model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))model.add(BatchNormalization())model.add(MaxPooling2D(pool_size=(2, 2)))model.add(Dropout(0.25))model.add(Conv2D(64, (3, 3), activation='relu', padding='same',))# model.add(Conv2D(64, (3, 3), activation='relu', padding='same',))model.add(BatchNormalization())model.add(MaxPooling2D(pool_size=(2, 2)))model.add(Dropout(0.25))model.add(Conv2D(128, (3, 3), activation='relu', padding='same',))# model.add(Conv2D(128, (3, 3), activation='relu', padding='same',))model.add(BatchNormalization())model.add(MaxPooling2D(pool_size=(2, 2)))model.add(Dropout(0.25))model.add(AveragePooling2D((5,5)))model.add(Flatten())# model.add(Dense(512, activation='relu'))# model.add(Dropout(0.5))model.add(Dense(label_size, activation='softmax'))1.层可视化:
test_x = []img_src = cv2.imdecode(np.fromfile(r'c:\temp.tif', dtype=np.uint8), cv2.IMREAD_GRAYSCALE)img = cv2.resize(img_src, (38, 38), interpolation=cv2.INTER_CUBIC)# img = np.random.randint(0,255,(38,38))img = (255 - img) / 255img = np.reshape(img, (38, 38, 1))test_x.append(img)###################################################################layer = model.layers[1]weight = layer.get_weights()# print(weight)print(np.asarray(weight).shape)model_v1 = Sequential()# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.# this applies 32 convolution filters of size 3x3 each.model_v1.add(ZeroPadding2D((1, 1), input_shape=(38, 38, 1)))model_v1.add(Conv2D(32, (3, 3), activation='relu', padding='same'))# model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))model_v1.layers[1].set_weights(weight)re = model_v1.predict(np.array(test_x))print(np.shape(re))re = np.transpose(re, (0,3,1,2))for i in range(32): plt.subplot(4,8,i+1) plt.imshow(re[0][i]) #, cmap='gray'plt.show()##################################################################model_v2 = Sequential()# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.# this applies 32 convolution filters of size 3x3 each.model_v2.add(ZeroPadding2D((1, 1), input_shape=(38, 38, 1)))model_v2.add(Conv2D(32, (3, 3), activation='relu', padding='same'))# model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))model_v2.add(BatchNormalization())model_v2.add(MaxPooling2D(pool_size=(2, 2)))model_v2.add(Dropout(0.25))model_v2.add(Conv2D(64, (3, 3), activation='relu', padding='same', ))print(len(model_v2.layers))layer1 = model.layers[1]weight1 = layer1.get_weights()model_v2.layers[1].set_weights(weight1)layer5 = model.layers[5]weight5 = layer5.get_weights()model_v2.layers[5].set_weights(weight5)re2 = model_v2.predict(np.array(test_x))re2 = np.transpose(re2, (0,3,1,2))for i in range(64): plt.subplot(8,8,i+1) plt.imshow(re2[0][i]) #, cmap='gray'plt.show()##################################################################model_v3 = Sequential()# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.# this applies 32 convolution filters of size 3x3 each.model_v3.add(ZeroPadding2D((1, 1), input_shape=(38, 38, 1)))model_v3.add(Conv2D(32, (3, 3), activation='relu', padding='same'))# model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))model_v3.add(BatchNormalization())model_v3.add(MaxPooling2D(pool_size=(2, 2)))model_v3.add(Dropout(0.25))model_v3.add(Conv2D(64, (3, 3), activation='relu', padding='same', ))# model.add(Conv2D(64, (3, 3), activation='relu', padding='same',))model_v3.add(BatchNormalization())model_v3.add(MaxPooling2D(pool_size=(2, 2)))model_v3.add(Dropout(0.25))model_v3.add(Conv2D(128, (3, 3), activation='relu', padding='same', ))print(len(model_v3.layers))layer1 = model.layers[1]weight1 = layer1.get_weights()model_v3.layers[1].set_weights(weight1)layer5 = model.layers[5]weight5 = layer5.get_weights()model_v3.layers[5].set_weights(weight5)layer9 = model.layers[9]weight9 = layer9.get_weights()model_v3.layers[9].set_weights(weight9)re3 = model_v3.predict(np.array(test_x))re3 = np.transpose(re3, (0,3,1,2))for i in range(121): plt.subplot(11,11,i+1) plt.imshow(re3[0][i]) #, cmap='gray'plt.show()2.kernel可视化:
def process(x): res = np.clip(x, 0, 1) return resdef dprocessed(x): res = np.zeros_like(x) res += 1 res[x < 0] = 0 res[x > 1] = 0 return resdef deprocess_image(x): x -= x.mean() x /= (x.std() + 1e-5) x *= 0.1 x += 0.5 x = np.clip(x, 0, 1) x *= 255 x = np.clip(x, 0, 255).astype('uint8') return xfor i_kernal in range(64): input_img=model.input loss = K.mean(model.layers[5].output[:, :,:,i_kernal]) # loss = K.mean(model.output[:, i_kernal]) # compute the gradient of the input picture wrt this loss grads = K.gradients(loss, input_img)[0] # normalization trick: we normalize the gradient grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5) # this function returns the loss and grads given the input picture iterate = K.function([input_img, K.learning_phase()], [loss, grads]) # we start from a gray image with some noise np.random.seed(0) num_channels=1 img_height=img_width=38 input_img_data = (255- np.random.randint(0,255,(1, img_height, img_width, num_channels))) / 255. failed = False # run gradient ascent print('####################################',i_kernal+1) loss_value_pre=0 for i in range(10000): # processed = process(input_img_data) # predictions = model.predict(input_img_data) loss_value, grads_value = iterate([input_img_data,1]) # grads_value *= dprocessed(input_img_data[0]) if i%1000 == 0: # print(' predictions: ' , np.shape(predictions), np.argmax(predictions)) print('Iteration %d/%d, loss: %f' % (i, 10000, loss_value)) print('Mean grad: %f' % np.mean(grads_value)) if all(np.abs(grads_val) < 0.000001 for grads_val in grads_value.flatten()): failed = True print('Failed') break # print('Image:\n%s' % str(input_img_data[0,0,:,:])) if loss_value_pre != 0 and loss_value_pre > loss_value: break if loss_value_pre == 0: loss_value_pre = loss_value # if loss_value > 0.99: # break input_img_data += grads_value * 1 #e-3 plt.subplot(8, 8, i_kernal+1) # plt.imshow((process(input_img_data[0,:,:,0])*255).astype('uint8'), cmap='Greys') #cmap='Greys' img_re = deprocess_image(input_img_data[0]) img_re = np.reshape(img_re, (38,38)) plt.imshow(img_re, cmap='Greys') #cmap='Greys' # plt.show()plt.show()model.layers[1]
model.layers[5]
model.layers[-1]
以上这篇keras模型可视化,层可视化及kernel可视化实例就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持。
声明:本页内容来源网络,仅供用户参考;我单位不保证亦不表示资料全面及准确无误,也不保证亦不表示这些资料为最新信息,如因任何原因,本网内容或者用户因倚赖本网内容造成任何损失或损害,我单位将不会负任何法律责任。如涉及版权问题,请提交至online#300.cn邮箱联系删除。
我们可以试用可视化包——Pyechart。Echarts是百度开源的一个数据可视化JS库,主要用于数据可视化。pyecharts是一个用于生成Echarts图标
前言inMap是一款基于canvas的大数据可视化库,专注于大数据方向点线面的可视化效果展示。目前支持散点、围栏、热力、网格、聚合等方式;致力于让大数据可视化变
前言之前我们分享过用Python进行可视化的9种常见方式。其实我们还能让可视化图形逼格更高一些,今天就分享一下如何让可视化秀起来:用Python和matplot
可视化编辑模式,通过可视化就可以快速修改页面,实时在线编辑效果。可视化建站系统简单来说就是能看得见的操作建站模式,不需要技术;不需要代码;不需要专业人员,操作模
可视化编辑模式,通过可视化就可以快速修改页面,实时在线编辑效果。可视化建站系统简单来说就是能看得见的操作建站模式,不需要技术;不需要代码;不需要专业人员,操作模