Keras:Unet网络实现多类语义分割方式

时间:2021-05-23

1 介绍

U-Net最初是用来对医学图像的语义分割,后来也有人将其应用于其他领域。但大多还是用来进行二分类,即将原始图像分成两个灰度级或者色度,依次找到图像中感兴趣的目标部分。

本文主要利用U-Net网络结构实现了多类的语义分割,并展示了部分测试效果,希望对你有用!

2 源代码

(1)训练模型

from __future__ import print_functionimport osimport datetimeimport numpy as npfrom keras.models import Modelfrom keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose, AveragePooling2D, Dropout, \ BatchNormalizationfrom keras.optimizers import Adamfrom keras.layers.convolutional import UpSampling2D, Conv2Dfrom keras.callbacks import ModelCheckpointfrom keras import backend as Kfrom keras.layers.advanced_activations import LeakyReLU, ReLUimport cv2 PIXEL = 512 #set your image sizeBATCH_SIZE = 5lr = 0.001EPOCH = 100X_CHANNEL = 3 # training images channelY_CHANNEL = 1 # label iamges channelX_NUM = 422 # your traning data number pathX = 'I:\\Pascal VOC Dataset\\train1\\images\\' #change your file pathpathY = 'I:\\Pascal VOC Dataset\\train1\\SegmentationObject\\' #change your file path #data processingdef generator(pathX, pathY,BATCH_SIZE): while 1: X_train_files = os.listdir(pathX) Y_train_files = os.listdir(pathY) a = (np.arange(1, X_NUM)) X = [] Y = [] for i in range(BATCH_SIZE): index = np.random.choice(a) # print(index) img = cv2.imread(pathX + X_train_files[index], 1) img = np.array(img).reshape(PIXEL, PIXEL, X_CHANNEL) X.append(img) img1 = cv2.imread(pathY + Y_train_files[index], 1) img1 = np.array(img1).reshape(PIXEL, PIXEL, Y_CHANNEL) Y.append(img1) X = np.array(X) Y = np.array(Y) yield X, Y #creat unet networkinputs = Input((PIXEL, PIXEL, 3))conv1 = Conv2D(8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)pool1 = AveragePooling2D(pool_size=(2, 2))(conv1) # 16 conv2 = BatchNormalization(momentum=0.99)(pool1)conv2 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)conv2 = BatchNormalization(momentum=0.99)(conv2)conv2 = Conv2D(64, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)conv2 = Dropout(0.02)(conv2)pool2 = AveragePooling2D(pool_size=(2, 2))(conv2) # 8 conv3 = BatchNormalization(momentum=0.99)(pool2)conv3 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)conv3 = BatchNormalization(momentum=0.99)(conv3)conv3 = Conv2D(128, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)conv3 = Dropout(0.02)(conv3)pool3 = AveragePooling2D(pool_size=(2, 2))(conv3) # 4 conv4 = BatchNormalization(momentum=0.99)(pool3)conv4 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)conv4 = BatchNormalization(momentum=0.99)(conv4)conv4 = Conv2D(256, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)conv4 = Dropout(0.02)(conv4)pool4 = AveragePooling2D(pool_size=(2, 2))(conv4) conv5 = BatchNormalization(momentum=0.99)(pool4)conv5 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)conv5 = BatchNormalization(momentum=0.99)(conv5)conv5 = Conv2D(512, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)conv5 = Dropout(0.02)(conv5)pool4 = AveragePooling2D(pool_size=(2, 2))(conv4)# conv5 = Conv2D(35, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)# drop4 = Dropout(0.02)(conv5)pool4 = AveragePooling2D(pool_size=(2, 2))(pool3) # 2pool5 = AveragePooling2D(pool_size=(2, 2))(pool4) # 1 conv6 = BatchNormalization(momentum=0.99)(pool5)conv6 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6) conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6)up7 = (UpSampling2D(size=(2, 2))(conv7)) # 2conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up7)merge7 = concatenate([pool4, conv7], axis=3) conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7)up8 = (UpSampling2D(size=(2, 2))(conv8)) # 4conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up8)merge8 = concatenate([pool3, conv8], axis=3) conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8)up9 = (UpSampling2D(size=(2, 2))(conv9)) # 8conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up9)merge9 = concatenate([pool2, conv9], axis=3) conv10 = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9)up10 = (UpSampling2D(size=(2, 2))(conv10)) # 16conv10 = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up10) conv11 = Conv2D(16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv10)up11 = (UpSampling2D(size=(2, 2))(conv11)) # 32conv11 = Conv2D(8, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up11) # conv12 = Conv2D(3, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv11)conv12 = Conv2D(3, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv11) model = Model(input=inputs, output=conv12)print(model.summary())model.compile(optimizer=Adam(lr=1e-3), loss='mse', metrics=['accuracy']) history = model.fit_generator(generator(pathX, pathY,BATCH_SIZE), steps_per_epoch=600, nb_epoch=EPOCH)end_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') #save your training modelmodel.save(r'V1_828.h5') #save your loss datamse = np.array((history.history['loss']))np.save(r'V1_828.npy', mse)

(2)测试模型

from keras.models import load_modelimport numpy as npimport matplotlib.pyplot as pltimport osimport cv2 model = load_model('V1_828.h5')test_images_path = 'I:\\Pascal VOC Dataset\\test\\test_images\\'test_gt_path = 'I:\\Pascal VOC Dataset\\test\\SegmentationObject\\'pre_path = 'I:\\Pascal VOC Dataset\\test\\pre\\' X = []for info in os.listdir(test_images_path): A = cv2.imread(test_images_path + info) X.append(A) # i += 1X = np.array(X)print(X.shape)Y = model.predict(X) groudtruth = []for info in os.listdir(test_gt_path): A = cv2.imread(test_gt_path + info) groudtruth.append(A)groudtruth = np.array(groudtruth) i = 0for info in os.listdir(test_images_path): cv2.imwrite(pre_path + info,Y[i]) i += 1 a = range(10)n = np.random.choice(a)cv2.imwrite('prediction.png',Y[n])cv2.imwrite('groudtruth.png',groudtruth[n])fig, axs = plt.subplots(1, 3)# cnt = 1# for j in range(1):axs[0].imshow(np.abs(X[n]))axs[0].axis('off')axs[1].imshow(np.abs(Y[n]))axs[1].axis('off')axs[2].imshow(np.abs(groudtruth[n]))axs[2].axis('off') # cnt += 1fig.savefig("imagestest.png")plt.close()

3 效果展示

说明:从左到右依次是预测图像,真实图像,标注图像。可以看出,对于部分数据的分割效果还有待改进,主要原因还是数据集相对复杂,模型难于找到其中的规律。

以上这篇Keras:Unet网络实现多类语义分割方式就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持。

声明:本页内容来源网络,仅供用户参考;我单位不保证亦不表示资料全面及准确无误,也不保证亦不表示这些资料为最新信息,如因任何原因,本网内容或者用户因倚赖本网内容造成任何损失或损害,我单位将不会负任何法律责任。如涉及版权问题,请提交至online#300.cn邮箱联系删除。

相关文章