Неожиданный результат `train_function` (пустые журналы). Пожалуйста, используйте `Model.compile(..., run_eagerly=True)
когда я запускаю model.fit, я получаю следующую ошибку: Неожиданный результатtrain_function
(Пустые журналы). Пожалуйста, используйтеModel.compile(..., run_eagerly=True)
, илиtf.config.run_functions_eagerly(True)
для получения дополнительной информации о том, что пошло не так, или сообщите о проблеме/ошибке наtf.keras
.
import os
import cv2
import numpy as np
import skimage.io
from matplotlib import pyplot as plt
from patchify import patchify
from PIL import Image
np.random.seed(42)
seed =42
batch_size= 8
from tensorflow.keras.preprocessing.image import ImageDataGenerator
img_data_gen_args = dict(rescale = 1/255.0)
mask_data_gen_args = dict(rescale = 1/255.0) #Binarize the output again.#Binarize the output again.
image_data_generator = ImageDataGenerator(**img_data_gen_args)
image_generator = image_data_generator.flow_from_directory("E:/FAGR_small/patches/data_for_training_and_testing/train_images/",
seed=42,
batch_size=batch_size,
target_size=(224,224),
class_mode=None) #Very important to set this otherwise it returns multiple numpy arrays
#thinking class mode is binary.
mask_data_generator = ImageDataGenerator(**mask_data_gen_args)
mask_generator = mask_data_generator.flow_from_directory("E:/FAGR_small/patches/data_for_training_and_testing/train_masks/",
seed=42,
batch_size=batch_size,
target_size=(224,224),
color_mode = 'grayscale', #Read masks in grayscale
class_mode=None)
valid_img_generator = image_data_generator.flow_from_directory("E:/FAGR_small/patches/data_for_training_and_testing/val_images/",
seed=42,
batch_size=batch_size,
target_size=(224,224),
class_mode=None) #Default batch size 32, if not specified here
valid_mask_generator = mask_data_generator.flow_from_directory("E:/FAGR_small/patches/data_for_training_and_testing/val_masks/",
seed=42,
batch_size=batch_size,
target_size=(224,224),
color_mode = 'grayscale', #Read masks in grayscale
class_mode=None) #Default batch size 32, if not specified here
train_generator = zip(image_generator, mask_generator)
val_generator = zip(valid_img_generator, valid_mask_generator)
x = image_generator.next()
y = mask_generator.next()
for i in range(0,1):
image = x[i]
mask = y[i]
plt.subplot(1,2,1)
plt.imshow(image)
plt.subplot(1,2,2)
plt.imshow(mask,cmap='gray')
plt.show()
print(image.shape)
print(mask.shape)
"""-------------metrics-----------------"""
#Jaccard distance loss mimics IoU.
from keras import backend as K
def jaccard_distance_loss(y_true, y_pred, smooth=100):
intersection = K.sum(K.sum(K.abs(y_true * y_pred), axis=-1))
sum_ = K.sum(K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1))
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return (1 - jac) * smooth
#Dice metric can be a great metric to track accuracy of semantic segmentation.
def dice_metric(y_pred, y_true):
intersection = K.sum(K.sum(K.abs(y_true * y_pred), axis=-1))
union = K.sum(K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1))
# if y_pred.sum() == 0 and y_pred.sum() == 0:
# return 1.0
return 2*intersection / union
"""-----------------------------------------"""
IMG_HEIGHT = x.shape[1]
IMG_WIDTH = x.shape[2]
IMG_CHANNELS = x.shape[3]
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Activation, MaxPool2D, Concatenate
"""----------------Model-------------------"""
def conv_block(input, num_filters):
x = Conv2D(num_filters, 3, padding="same")(input)
x = BatchNormalization()(x) #Not in the original network.
x = Activation("relu")(x)
x = Conv2D(num_filters, 3, padding="same")(x)
x = BatchNormalization()(x) #Not in the original network
x = Activation("relu")(x)
return x
#Encoder block: Conv block followed by maxpooling
def encoder_block(input, num_filters):
x = conv_block(input, num_filters)
p = MaxPool2D((2, 2))(x)
return x, p
#Decoder block
#skip features gets input from encoder for concatenation
def decoder_block(input, skip_features, num_filters):
x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(input)
x = Concatenate()([x, skip_features])
x = conv_block(x, num_filters)
return x
#Build Unet using the blocks
def build_unet(input_shape):
inputs = Input(input_shape)
s1, p1 = encoder_block(inputs, 64)
s2, p2 = encoder_block(p1, 128)
s3, p3 = encoder_block(p2, 256)
s4, p4 = encoder_block(p3, 512)
b1 = conv_block(p4, 1024) #Bridge
d1 = decoder_block(b1, s4, 512)
d2 = decoder_block(d1, s3, 256)
d3 = decoder_block(d2, s2, 128)
d4 = decoder_block(d3, s1, 64)
outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d4) #Binary (can be multiclass)
model = Model(inputs, outputs, name="U-Net")
return model
"""----------------------------------------------"""
input_shape = (IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)
model = build_unet(input_shape)
from focal_loss import BinaryFocalLoss
model.compile(optimizer=Adam(lr = 1e-3), loss=BinaryFocalLoss(gamma=2),
metrics=[dice_metric])
model.summary()
отлично работает до сих пор. Выход:
Found 130 images belonging to 1 classes.
Found 130 images belonging to 1 classes.
Found 33 images belonging to 1 classes.
Found 33 images belonging to 1 classes.
(224, 224, 3)
(224, 224, 1)
Model: "U-Net"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_11 (InputLayer) [(None, 224, 224, 3 0 []
)]
conv2d_190 (Conv2D) (None, 224, 224, 64 1792 ['input_11[0][0]']
)
batch_normalization_180 (Batch (None, 224, 224, 64 256 ['conv2d_190[0][0]']
Normalization) )
activation_180 (Activation) (None, 224, 224, 64 0 ['batch_normalization_180[0][0]']
)
conv2d_191 (Conv2D) (None, 224, 224, 64 36928 ['activation_180[0][0]']
)
batch_normalization_181 (Batch (None, 224, 224, 64 256 ['conv2d_191[0][0]']
Normalization) )
activation_181 (Activation) (None, 224, 224, 64 0 ['batch_normalization_181[0][0]']
)
max_pooling2d_40 (MaxPooling2D (None, 112, 112, 64 0 ['activation_181[0][0]']
) )
conv2d_192 (Conv2D) (None, 112, 112, 12 73856 ['max_pooling2d_40[0][0]']
8)
batch_normalization_182 (Batch (None, 112, 112, 12 512 ['conv2d_192[0][0]']
Normalization) 8)
activation_182 (Activation) (None, 112, 112, 12 0 ['batch_normalization_182[0][0]']
8)
conv2d_193 (Conv2D) (None, 112, 112, 12 147584 ['activation_182[0][0]']
8)
batch_normalization_183 (Batch (None, 112, 112, 12 512 ['conv2d_193[0][0]']
Normalization) 8)
activation_183 (Activation) (None, 112, 112, 12 0 ['batch_normalization_183[0][0]']
8)
max_pooling2d_41 (MaxPooling2D (None, 56, 56, 128) 0 ['activation_183[0][0]']
)
conv2d_194 (Conv2D) (None, 56, 56, 256) 295168 ['max_pooling2d_41[0][0]']
batch_normalization_184 (Batch (None, 56, 56, 256) 1024 ['conv2d_194[0][0]']
Normalization)
activation_184 (Activation) (None, 56, 56, 256) 0 ['batch_normalization_184[0][0]']
conv2d_195 (Conv2D) (None, 56, 56, 256) 590080 ['activation_184[0][0]']
batch_normalization_185 (Batch (None, 56, 56, 256) 1024 ['conv2d_195[0][0]']
Normalization)
activation_185 (Activation) (None, 56, 56, 256) 0 ['batch_normalization_185[0][0]']
max_pooling2d_42 (MaxPooling2D (None, 28, 28, 256) 0 ['activation_185[0][0]']
)
conv2d_196 (Conv2D) (None, 28, 28, 512) 1180160 ['max_pooling2d_42[0][0]']
batch_normalization_186 (Batch (None, 28, 28, 512) 2048 ['conv2d_196[0][0]']
Normalization)
activation_186 (Activation) (None, 28, 28, 512) 0 ['batch_normalization_186[0][0]']
conv2d_197 (Conv2D) (None, 28, 28, 512) 2359808 ['activation_186[0][0]']
batch_normalization_187 (Batch (None, 28, 28, 512) 2048 ['conv2d_197[0][0]']
Normalization)
activation_187 (Activation) (None, 28, 28, 512) 0 ['batch_normalization_187[0][0]']
max_pooling2d_43 (MaxPooling2D (None, 14, 14, 512) 0 ['activation_187[0][0]']
)
conv2d_198 (Conv2D) (None, 14, 14, 1024 4719616 ['max_pooling2d_43[0][0]']
)
batch_normalization_188 (Batch (None, 14, 14, 1024 4096 ['conv2d_198[0][0]']
Normalization) )
activation_188 (Activation) (None, 14, 14, 1024 0 ['batch_normalization_188[0][0]']
)
conv2d_199 (Conv2D) (None, 14, 14, 1024 9438208 ['activation_188[0][0]']
)
batch_normalization_189 (Batch (None, 14, 14, 1024 4096 ['conv2d_199[0][0]']
Normalization) )
activation_189 (Activation) (None, 14, 14, 1024 0 ['batch_normalization_189[0][0]']
)
conv2d_transpose_40 (Conv2DTra (None, 28, 28, 512) 2097664 ['activation_189[0][0]']
nspose)
concatenate_40 (Concatenate) (None, 28, 28, 1024 0 ['conv2d_transpose_40[0][0]',
) 'activation_187[0][0]']
conv2d_200 (Conv2D) (None, 28, 28, 512) 4719104 ['concatenate_40[0][0]']
batch_normalization_190 (Batch (None, 28, 28, 512) 2048 ['conv2d_200[0][0]']
Normalization)
activation_190 (Activation) (None, 28, 28, 512) 0 ['batch_normalization_190[0][0]']
conv2d_201 (Conv2D) (None, 28, 28, 512) 2359808 ['activation_190[0][0]']
batch_normalization_191 (Batch (None, 28, 28, 512) 2048 ['conv2d_201[0][0]']
Normalization)
activation_191 (Activation) (None, 28, 28, 512) 0 ['batch_normalization_191[0][0]']
conv2d_transpose_41 (Conv2DTra (None, 56, 56, 256) 524544 ['activation_191[0][0]']
nspose)
concatenate_41 (Concatenate) (None, 56, 56, 512) 0 ['conv2d_transpose_41[0][0]',
'activation_185[0][0]']
conv2d_202 (Conv2D) (None, 56, 56, 256) 1179904 ['concatenate_41[0][0]']
batch_normalization_192 (Batch (None, 56, 56, 256) 1024 ['conv2d_202[0][0]']
Normalization)
activation_192 (Activation) (None, 56, 56, 256) 0 ['batch_normalization_192[0][0]']
conv2d_203 (Conv2D) (None, 56, 56, 256) 590080 ['activation_192[0][0]']
batch_normalization_193 (Batch (None, 56, 56, 256) 1024 ['conv2d_203[0][0]']
Normalization)
activation_193 (Activation) (None, 56, 56, 256) 0 ['batch_normalization_193[0][0]']
conv2d_transpose_42 (Conv2DTra (None, 112, 112, 12 131200 ['activation_193[0][0]']
nspose) 8)
concatenate_42 (Concatenate) (None, 112, 112, 25 0 ['conv2d_transpose_42[0][0]',
6) 'activation_183[0][0]']
conv2d_204 (Conv2D) (None, 112, 112, 12 295040 ['concatenate_42[0][0]']
8)
batch_normalization_194 (Batch (None, 112, 112, 12 512 ['conv2d_204[0][0]']
Normalization) 8)
activation_194 (Activation) (None, 112, 112, 12 0 ['batch_normalization_194[0][0]']
8)
conv2d_205 (Conv2D) (None, 112, 112, 12 147584 ['activation_194[0][0]']
8)
batch_normalization_195 (Batch (None, 112, 112, 12 512 ['conv2d_205[0][0]']
Normalization) 8)
activation_195 (Activation) (None, 112, 112, 12 0 ['batch_normalization_195[0][0]']
8)
conv2d_transpose_43 (Conv2DTra (None, 224, 224, 64 32832 ['activation_195[0][0]']
nspose) )
concatenate_43 (Concatenate) (None, 224, 224, 12 0 ['conv2d_transpose_43[0][0]',
8) 'activation_181[0][0]']
conv2d_206 (Conv2D) (None, 224, 224, 64 73792 ['concatenate_43[0][0]']
)
batch_normalization_196 (Batch (None, 224, 224, 64 256 ['conv2d_206[0][0]']
Normalization) )
activation_196 (Activation) (None, 224, 224, 64 0 ['batch_normalization_196[0][0]']
)
conv2d_207 (Conv2D) (None, 224, 224, 64 36928 ['activation_196[0][0]']
)
batch_normalization_197 (Batch (None, 224, 224, 64 256 ['conv2d_207[0][0]']
Normalization) )
activation_197 (Activation) (None, 224, 224, 64 0 ['batch_normalization_197[0][0]']
)
conv2d_208 (Conv2D) (None, 224, 224, 1) 65 ['activation_197[0][0]']
num_train_imgs = len(os.listdir("E:/FAGR_small/patches/data_for_training_and_testing/train_images/"))
steps_per_epoch = num_train_imgs //batch_size
num_val_images = len(os.listdir("E:/FAGR_small/patches/data_for_training_and_testing/val_images/"))
validation_steps = num_val_images //batch_size
history = model.fit(train_generator, validation_data=val_generator,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps, epochs=50)
После запуска model.fit я получаю:
Unexpected result of `train_function` (Empty logs). Please use `Model.compile(..., run_eagerly=True)`, or `tf.config.run_functions_eagerly(True)` for more information of where went wrong, or file a issue/bug to `tf.keras`.