Quantcast
Viewing all articles
Browse latest Browse all 14040

TensorFlow Auto neon-glow-effect

I wrote a piece of code to achieve the effect of neon lights but the effect is not good.

Here is code.

import osimport numpy as npimport tensorflow as tffrom tensorflow.keras.initializers import glorot_uniformfrom tensorflow.keras.layers import Input, Activation, ZeroPadding2D, Conv2Dfrom tensorflow.keras.models import Modelfrom tensorflow.keras.preprocessing import imagedef input_data():    path_x = "data/train/trainA/"    path_y = "data/train/trainB/"    train_num = sum([len(x) for _, _, x in os.walk(os.path.dirname(path_x))])    print(train_num)    image_train_1 = np.zeros((train_num, 1024, 1024, 3))    label_train_1 = np.zeros((train_num, 1024, 1024, 3))    count = 0    for i in range(train_num):        img = image.load_img(path_x +"test (" + str(i + 1) +").png")        x = image.img_to_array(img)        x = np.expand_dims(x, axis=0)        image_train_1[count] = x        img = image.load_img(path_y +"test (" + str(i + 1) +").png")        x = image.img_to_array(img)        x = np.expand_dims(x, axis=0)        label_train_1[count] = x        count += 1    return image_train_1, label_train_1def test_data():    path_x = "data/train/testA/"    path_y = "data/train/testB/"    test_num = sum([len(x) for _, _, x in os.walk(os.path.dirname(path_x))])    print(test_num)    test_x = np.zeros((test_num, 1024, 1024, 3))    test_y = np.zeros((test_num, 1024, 1024, 3))    count = 0    for i in range(test_num):        img = image.load_img(path_x +"test (" + str(i + 1) +").png")        x = image.img_to_array(img)        x = np.expand_dims(x, axis=0)        test_x[count] = x        img = image.load_img(path_y +"test (" + str(i + 1) +").png")        x = image.img_to_array(img)        x = np.expand_dims(x, axis=0)        test_y[count] = x        count += 1    return test_x, test_ydef filterModel(input_shape=(1024, 1024, 3)):    shape = Input(input_shape)    x = ZeroPadding2D((0, 0))(shape)    x = Conv2D(32, (9, 9), strides=(1, 1), padding='same', name='conv1', kernel_initializer=glorot_uniform(seed=0))(x)    x = Activation('relu')(x)    x = Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='conv2', kernel_initializer=glorot_uniform(seed=0))(x)    x = Activation('relu')(x)    x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='conv3', kernel_initializer=glorot_uniform(seed=0))(x)    x = Activation('relu')(x)    output = Conv2D(3, (5, 5), strides=(1, 1), padding='same', name='conv5', kernel_initializer=glorot_uniform(seed=0))(        x)    model1 = Model(inputs=shape, outputs=output, name='FilterModel')    return model1# trainDataimage_train, label_train = input_data()image_train = image_train.astype('float32')label_train = label_train.astype('float32')image_train /= 255label_train /= 255# testDatatest_x, test_y = test_data()test_x = test_x.astype('float32')test_y = test_y.astype('float32')test_x /= 255test_y /= 255# trainingmodel = filterModel((1024, 1024, 3))model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.000003), loss="mean_squared_error",              metrics=["accuracy"])model.summary()model.fit(image_train, label_train, validation_data=(test_x, test_y), epochs=100, batch_size=2)model.save("filterModel.h5")print("Model save done!")

model summary

Model: "FilterModel"_________________________________________________________________Layer (type)                 Output Shape              Param #   =================================================================input_1 (InputLayer)         [(None, 1024, 1024, 3)]   0         _________________________________________________________________zero_padding2d (ZeroPadding2 (None, 1024, 1024, 3)     0         _________________________________________________________________conv1 (Conv2D)               (None, 1024, 1024, 32)    7808      _________________________________________________________________activation (Activation)      (None, 1024, 1024, 32)    0         _________________________________________________________________conv2 (Conv2D)               (None, 1024, 1024, 64)    18496     _________________________________________________________________activation_1 (Activation)    (None, 1024, 1024, 64)    0         _________________________________________________________________conv3 (Conv2D)               (None, 1024, 1024, 128)   73856     _________________________________________________________________activation_2 (Activation)    (None, 1024, 1024, 128)   0         _________________________________________________________________conv5 (Conv2D)               (None, 1024, 1024, 3)     9603      =================================================================Total params: 109,763Trainable params: 109,763Non-trainable params: 0

I conducted 100 rounds of training because the accuracy and loss will hardly change after that.

result:

test_x | predict | test_y

Input&Predict&Label

Input&Predict&Label

How to optimize this model?Or are there other ways to achieve this effect?


Viewing all articles
Browse latest Browse all 14040

Trending Articles