nan в результате U-net
Я пытаюсь сегментировать изображения с помощью unet, но в итоге получаю nan, кто-нибудь с таким сталкивался и может объяснить как это решить
def unet(num_classes = 2, input_shape= (256, 256, 1)):
img_input = Input(input_shape)
# Block 1
x = Conv2D(64, (3, 3), padding='same', name='block1_conv1')(img_input)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), padding='same', name='block1_conv2')(x)
x = BatchNormalization()(x)
block_1_out = Activation('relu')(x)
x = MaxPooling2D()(block_1_out)
# Block 2
x = Conv2D(128, (3, 3), padding='same', name='block2_conv1')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(128, (3, 3), padding='same', name='block2_conv2')(x)
x = BatchNormalization()(x)
block_2_out = Activation('relu')(x)
x = MaxPooling2D()(block_2_out)
# Block 3
x = Conv2D(256, (3, 3), padding='same', name='block3_conv1')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), padding='same', name='block3_conv2')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), padding='same', name='block3_conv3')(x)
x = BatchNormalization()(x)
block_3_out = Activation('relu')(x)
x = MaxPooling2D()(block_3_out)
# Block 4
x = Conv2D(512, (3, 3), padding='same', name='block4_conv1')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), padding='same', name='block4_conv2')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), padding='same', name='block4_conv3')(x)
x = BatchNormalization()(x)
block_4_out = Activation('relu')(x)
# UP 2
x = Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same', name = 'Conv2DTranspose_UP2')(block_4_out)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = concatenate([x, block_3_out])
x = Conv2D(256, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# UP 3
x = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same', name = 'Conv2DTranspose_UP3')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = concatenate([x, block_2_out])
x = Conv2D(128, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(128, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# UP 4
x = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same', name = 'Conv2DTranspose_UP4')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = concatenate([x, block_1_out])
x = Conv2D(64, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(num_classes, (3, 3), activation='sigmoid', padding='same')(x)
model = Model(img_input, x)
model.compile(optimizer=Adam(lr=0.005),
loss='categorical_crossentropy',
metrics=["accuracy"])
model.summary()
return model
res=model.predict(one_test_norm)
res
array([[[[nan, nan],
[nan, nan],
[nan, nan],
...,
[nan, nan],
[nan, nan],
[nan, nan]],
[[nan, nan],
[nan, nan],
[nan, nan],
...,
[nan, nan],
[nan, nan],
[nan, nan]],
[[nan, nan],
[nan, nan],
[nan, nan],
...,
[nan, nan],
[nan, nan],
[nan, nan]],
...,