Регрессия изображений (демозаикация) с помощью Keras

Я делаю цветную демозацию с помощью Keras. Я пробовал сверточные нейронные сети, но он не работает должным образом. Мой код:

    from __future__ import print_function
    from keras.datasets import cifar10
    from keras.preprocessing.image import ImageDataGenerator
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, Activation, Flatten
    from keras.layers import Convolution2D, MaxPooling2D
    from keras.optimizers import SGD
    from keras.utils import np_utils
    import bayer_pattern
    from cnn.networks.model_define import CNNModel
    import argparse
    import numpy
    from PIL import Image

    ap = argparse.ArgumentParser()
    ap.add_argument("-s", "--save-model", type=int, default=-1,
        help="(optional) whether or not model should be saved to disk")
    ap.add_argument("-l", "--load-model", type=int, default=-1,
        help="(optional) whether or not pre-trained model should be loaded")
    ap.add_argument("-w", "--weights", type=str,
        help="(optional) path to weights file")
    args = vars(ap.parse_args())


    batch_size = 32
    nb_classes = 10
    nb_epoch = 200
    data_augmentation = False

    # input image dimensions
    img_rows, img_cols = 32, 32
    # the CIFAR10 images are RGB
    img_channels = 3

    # the data, shuffled and split between train and test sets
    (y_train, y_train_train), (y_test, y_test_test) = cifar10.load_data()
    print('X_train shape:', y_train.shape)
    print('X_test shape:', y_test.shape)
    print(y_train.shape[0], 'train samples')
    print(y_test.shape[0], 'test samples')

    # X_train.dump("X_train.dat")
    # X_test.dump("X_test.dat")
    # X_bayer_train = bayer_pattern.makeInputsCifar(X_train)
    # X_bayer_test = bayer_pattern.makeInputsCifar(X_test)
    # X_bayer_train.dump("X_bayer_train.dat")
    # X_bayer_test.dump("X_bayer_test.dat")


    X_bayer_train = numpy.load("X_bayer_train.dat")
    X_bayer_test = numpy.load("X_bayer_test.dat")

    model = Sequential()

    model.add(Convolution2D(16, 5, 5, border_mode='same', input_shape=(img_channels, img_rows, img_cols)))
    model.add(Activation("relu"))

    model.add(Convolution2D(32, 5, 5, border_mode="same"))
    model.add(Activation("relu"))

    model.add(Convolution2D(64, 5, 5, border_mode="same"))
    model.add(Activation("relu"))

    model.add(Convolution2D(32, 5, 5, border_mode="same"))
    model.add(Activation("relu"))

    # model.add(Convolution2D(64, 5, 5, border_mode="same"))
    # model.add(Activation("relu"))
    # model.add(Convolution2D(64, 5, 5, border_mode="same"))
    # model.add(Activation("relu"))

    # model.add(Convolution2D(32, 5, 5, border_mode="same"))
    # model.add(Activation("relu"))

    # model.add(Convolution2D(32, 5, 5, border_mode="same"))
    # model.add(Activation("relu"))

    model.add(Convolution2D(16, 5, 5, border_mode="same"))
    model.add(Activation("relu"))

    model.add(Convolution2D(3, 5, 5, border_mode="same"))
    model.add(Activation("relu"))


    # for a mean squared error regression problem
    model.compile(optimizer='rmsprop', loss='mse')

    if not data_augmentation:
        print('Not using data augmentation.')
        model.fit(X_bayer_train, y_train,
                  batch_size=batch_size,
                  nb_epoch=nb_epoch,
                  validation_data=(X_bayer_test, y_test), shuffle=True)

        score = model.evaluate(X_bayer_test, y_test, batch_size=batch_size)
        print(score)

        img = (model.predict(X_bayer_test[0:1]))[0].swapaxes(0,2).swapaxes(0,1)
        imgans = (y_test[0:1])[0].swapaxes(0,2).swapaxes(0,1)
        predicted = Image.fromarray(img, 'RGB')
        predicted.save('predicted.png')
        original = Image.fromarray(imgans, 'RGB')
        original.save('original.png')

        print(bayer_pattern.psnr(img,imgans))

        print("[INFO] dumping weights to file...")
        model.save('models/2Sept.h5')

    else:
        print('Using real-time data augmentation.')

        # this will do preprocessing and realtime data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images

        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        datagen.fit(X_train)

        # fit the model on the batches generated by datagen.flow()
        # model.fit_generator(datagen.flow(X_train, Y_train,
        #                     batch_size=batch_size),
        #                     samples_per_epoch=X_train.shape[0],
        #                     nb_epoch=nb_epoch,
        #                     validation_data=(X_test, Y_test))
        model.fit_generator(datagen.flow(X_bayer_train, X_train,
                            batch_size=batch_size),
                            samples_per_epoch=X_bayer_train.shape[0],
                            nb_epoch=nb_epoch,
                            validation_data=(X_bayer_test, X_test))
        print("[INFO] dumping weights to file...")
        model.save_weights(args["weights"], overwrite=True)

Может кто-нибудь предложить мне какую-то архитектуру для этой проблемы демосайкинга изображений. В настоящее время я использую сверточные нейронные сети и базу данных cifar10.

0 ответов

Другие вопросы по тегам