1500字范文,内容丰富有趣,写作好帮手!
1500字范文 > 1DCNN 2DCNN LeNet5 VGGNet16使用tensorflow2.X实现

1DCNN 2DCNN LeNet5 VGGNet16使用tensorflow2.X实现

时间:2021-03-21 21:09:30

相关推荐

1DCNN 2DCNN LeNet5 VGGNet16使用tensorflow2.X实现

1DCNN是1维卷积

2DCNN是两层卷积,+池化层

leNet5是两段卷积层+池化层,最后加三层全连接层

VGGNet16总共分为八段:

from tensorflow.keras.models import Sequentialfrom tensorflow.keras import layersfrom tensorflow import kerasdef LeNet_CNNmodel():model = keras.models.Sequential([layers.Conv2D(filters=64, kernel_size=(3, 3),padding='same',input_shape=(16, 16, 1), activation='relu'),layers.MaxPooling2D(pool_size=(2, 2), padding = 'same' ),layers.Conv2D(filters=32, kernel_size=(3, 3), padding='same',activation='relu'),layers.MaxPooling2D(pool_size=(2, 2), padding = 'same' ),#layers.Dropout(0.25),#(5,5,16) > 400layers.Flatten(),layers.Dense(256, activation='relu'),#layers.Dropout(0.5),#layers.Dense(84, activation='relu'),layers.Dense(128, activation='relu'),#layers.Dropout(0.5),layers.Dense(12, activation='softmax')])# Compile pile(loss="sparse_categorical_crossentropy", optimizer='adam', metrics=['accuracy'])return model#,class_weight=class_weightdef LeNet_CNN():t1 = time.time()model = LeNet_CNNmodel()X_train = tf.reshape(train_x,[-1,16,16,1])X_test = tf.reshape(test_x, [-1,16,16,1])model.summary()history = model.fit(X_train, train_y, validation_data=(X_test, test_y), nb_epoch=25, batch_size=128, verbose=2)scores = model.evaluate(X_test, test_y, verbose=0)t2 = time.time()pred_y = model.predict(X_test)print(scores)print("Baseline Error: %.2f%%" % (100 - scores[1] * 100),t2-t1)print(history.history)return scores,pred_y#simple_CNN()def oneD_cNNmodel():model = keras.models.Sequential([layers.Conv1D(50,7, input_shape = (32,8),activation='relu'),layers.MaxPooling1D(3),layers.Conv1D(50, 7, input_shape=(32, 8), activation='relu'),layers.GlobalAveragePooling1D(),#layers.Dropout(0.5),layers.Dense(12, activation='softmax')])pile(loss="sparse_categorical_crossentropy", optimizer='adam', metrics=['accuracy'])return modeldef oneD_cNN():t1 =time.time()model = oneD_cNNmodel()X_train = tf.reshape(train_x,[-1,32,8])X_test = tf.reshape(test_x, [-1,32,8])model.summary()history = model.fit(X_train, train_y, validation_data=(X_test, test_y), nb_epoch=25, batch_size=128)scores = model.evaluate(X_test, test_y, verbose=0)t2 = time.time()pred_y = model.predict(X_test)print(scores)print("Baseline Error: %.2f%%" % (100 - scores[1] * 100),t2-t1)print(history.history)return scores,pred_ydef two_CNNmodel():model = keras.models.Sequential([layers.Conv2D(64, kernel_size=(3, 3),padding='same',input_shape=(16, 16, 1), activation='relu'),layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu'),layers.MaxPooling2D(pool_size=(2, 2), padding='same'),layers.Flatten(),layers.Dense(128, activation='relu'),layers.Dropout(0.5),layers.Dense(12, activation = 'softmax')])pile(loss="sparse_categorical_crossentropy", optimizer='adam', metrics=['accuracy'])return modeldef twoD_CNN():t1 = time.time()model = two_CNNmodel()X_train = tf.reshape(train_x,[-1,16,16,1])X_test = tf.reshape(test_x, [-1,16,16,1])model.summary()history = model.fit(X_train, train_y, validation_data=(X_test, test_y), nb_epoch=25, batch_size=128, verbose=2)scores = model.evaluate(X_test, test_y, verbose=0)t2 = time.time()pred_y = model.predict(X_test)print(scores)print("Baseline Error: %.2f%%" % (100 - scores[1] * 100),t2-t1)print(history.history)return scores,pred_ydef VGGNet16_model():model = keras.models.Sequential([layers.Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(16, 16, 1)),layers.Conv2D(64, (3, 3), activation='relu', padding='same'),layers.MaxPooling2D(pool_size=(2, 2), padding='same'),# block 2layers.Conv2D(128, (3, 3), activation='relu', padding='same'),layers.Conv2D(128, (3, 3), activation='relu', padding='same'),layers.MaxPooling2D(pool_size=(2, 2), padding='same'),#block3layers.Conv2D(256, (3, 3), activation='relu', padding='same'),layers.Conv2D(256, (3, 3), activation='relu', padding='same'),layers.Conv2D(256, (3, 3), activation='relu', padding='same'),layers.MaxPooling2D(pool_size=(2, 2), padding='same'),#block4layers.Conv2D(512, (3, 3), activation='relu', padding='same'),layers.Conv2D(512, (3, 3), activation='relu', padding='same'),layers.Conv2D(512, (3, 3), activation='relu', padding='same'),layers.MaxPooling2D(pool_size=(2, 2), padding='same'),# block5layers.Conv2D(512, (3, 3), activation='relu', padding='same'),layers.Conv2D(512, (3, 3), activation='relu', padding='same'),layers.Conv2D(512, (3, 3), activation='relu', padding='same'),layers.MaxPooling2D(pool_size=(2, 2), padding='same'),# layers.Dropout(0.25),# (5,5,16) > 400layers.Flatten(),layers.Dense(256, activation='relu'),# layers.Dropout(0.5),# layers.Dense(84, activation='relu'),layers.Dense(128, activation='relu'),# layers.Dropout(0.5),layers.Dense(12, activation='softmax')])# Compile pile(loss="sparse_categorical_crossentropy", optimizer='adam', metrics=['accuracy'])return modeldef VGGNet16():t1 = time.time()model = VGG16_Model()X_train = tf.reshape(train_x,[-1,16,16,1])X_test = tf.reshape(test_x, [-1,16,16,1])model.summary()history = model.fit(X_train, train_y, validation_data=(X_test, test_y), nb_epoch=25, batch_size=128, verbose=2)scores = model.evaluate(X_test, test_y, verbose=0)t2 = time.time()pred_y = model.predict(X_test)print(scores)print("Baseline Error: %.2f%%" % (100 - scores[1] * 100),t2-t1)print(history.history)return scores,pred_y

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。