Проблема с val_loss при обучении изображений
Я всё перепробовал: добавлял нейроны, добавлял callbacks, но ничего не помогает. Когда модель обучилась, я загрузил модель, и выбирал 3-5 фото с каждого класса, в итоге плохо распознаёт модель. Может это быть из-за того, что изображения Вот код для обучении модели 768х1024? Может проблема в слоях?
import cv2
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import to_categorical
import os
import tensorflow as tf
from sklearn.utils.class_weight import compute_class_weight
from sklearn.model_selection import train_test_split
classes = 3
# Функция для загрузки и предобработки изображений из папки
def load_images_from_folder(folder):
images = []
labels = []
class_names = os.listdir(folder)
for class_name in class_names:
class_path = os.path.join(folder, class_name)
for filename in os.listdir(class_path):
img = cv2.imread(os.path.join(class_path, filename))
if img is not None:
img = cv2.resize(img, (32, 32)) # Изменение размера до 64x64
img = img.astype('float32') / 255
images.append(img)
labels.append(class_names.index(class_name))
return np.array(images), to_categorical(np.array(labels), classes), labels
# Load and preprocess images from the dataset folder
dataset_folder = 'E:/MSHI_2_KYRS_2_SIM/5.v5/tea_sickness_dataset'
x_train, y_train, y_labels = load_images_from_folder(dataset_folder)
# Split data into training and testing sets
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.2, random_state=42)
# Calculate class weights
class_weights = dict(enumerate(compute_class_weight(class_weight='balanced', classes=np.unique(y_labels), y=y_labels)))
# Create and compile the model with weighted loss
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Train the model with class weights
model.fit(x_train, y_train, batch_size=32, epochs=35, verbose=1, validation_data=(x_test, y_test), class_weight=class_weights)
# Save the model
save_path = 'E:/MSHI_2_KYRS_2_SIM/5.v5/my_model.keras'
model.save(save_path)
# Load the model
loaded_model = tf.keras.models.load_model(save_path)
# Load and preprocess an image for prediction
img_paths = [
'E:/MSHI_2_KYRS_2_SIM/5.v5/tea_sickness_dataset/algal_leaf/UNADJUSTEDNONRAW_thumb_2a.jpg',
'E:/MSHI_2_KYRS_2_SIM/5.v5/tea_sickness_dataset/brown_blight/UNADJUSTEDNONRAW_thumb_11c.jpg',
'E:/MSHI_2_KYRS_2_SIM/5.v5/tea_sickness_dataset/white_spot/UNADJUSTEDNONRAW_thumb_7a.jpg'
# Добавьте пути к другим изображениям здесь
]
for img_path in img_paths:
img = cv2.imread(img_path)
img = cv2.resize(img, (32, 32))
img = img.astype('float32') / 255
img = np.expand_dims(img, axis=0)
# Make predictions using the loaded model
predictions = loaded_model.predict(img)
class_names = ['algal_leaf', 'brown_blight', 'white_spot']
predicted_class = np.argmax(predictions)
predicted_class_name = class_names[predicted_class]
predicted_prob = predictions[0][predicted_class]
print('Image:', img_path)
print('Predicted class:', predicted_class_name)
print('Probability:', predicted_prob)
print()
Этот код представляет собой полноценное приложение на PyQt5 для распознавания изображений
import cv2
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QPushButton, QFileDialog
from PyQt5.QtGui import QPixmap, QImage
from PIL import Image
import numpy as np
import sys
import os
class ImageRecognitionApp(QMainWindow):
def __init__(self):
super().__init__()
# Вказати абсолютний шлях до файлу моделі
self.model_path = 'E:/MSHI_2_KYRS_2_SIM/5.v5/my_model.keras'
self.loaded_model = None
self.class_names = ['algal_leaf', 'brown_blight', 'white_spot']
self.initUI()
def initUI(self):
# Налаштування головного вікна програми
self.setWindowTitle('Image Recognition App')
self.setGeometry(150, 150, 550, 350)
# Створення елементів інтерфейсу
self.image_label = QLabel(self)
self.image_label.setGeometry(50, 50, 300, 300)
self.result_label = QLabel(self)
self.result_label.setGeometry(380, 190, 150, 20)
self.score_label = QLabel(self)
self.score_label.setGeometry(380, 170, 400, 20)
choose_button = QPushButton('вибрати фото', self)
choose_button.setGeometry(380, 120, 130, 30)
choose_button.clicked.connect(self.choose_image)
load_model_button = QPushButton('загрузити модель', self)
load_model_button.setGeometry(380, 90, 130, 30)
load_model_button.clicked.connect(self.load_model)
def reload_image(self, file_path):
# Завантаження та відображення зображення
img = Image.open(file_path)
img = img.resize((250, 250))
img_array = np.array(img)
qimg = QImage(img_array.tobytes(), img_array.shape[1], img_array.shape[0], img_array.shape[1] * 3, QImage.Format_RGB888)
pixmap = QPixmap.fromImage(qimg)
self.image_label.setPixmap(pixmap)
# Прогноз та відображення результату
prediction_result_str, prediction_result_score = self.predict_image(file_path)
self.score_label.setText(f'Клас: {self.class_names[prediction_result_str]}')
self.result_label.setText(f'Імовірність: {prediction_result_score:.2%}')
def choose_image(self):
# Обробка події вибору файлу зображення
file_dialog = QFileDialog()
file_path, _ = file_dialog.getOpenFileName(self, 'вибрати фото', '', 'Image Files (*.png *.jpg *.bmp)')
if file_path:
self.reload_image(file_path)
def preprocess_image(self, image_path):
# Обробка зображення перед передачею його на вхід моделі
img = Image.open(image_path)
img = img.resize((32, 32))
img_array = np.array(img) / 255.0
img_array = np.expand_dims(img_array, axis=0)
return img_array
def predict_image(self, image_path):
# Прогноз класу зображення
if self.loaded_model is not None:
preprocessed_image = self.preprocess_image(image_path)
predictions = self.loaded_model.predict(preprocessed_image)[0]
predicted_class = np.argmax(predictions)
prediction_score = predictions[predicted_class]
return predicted_class, prediction_score
else:
print("Model not loaded!")
def load_model(self):
# Завантаження моделі
if os.path.exists(self.model_path):
self.loaded_model = tf.keras.models.load_model(self.model_path)
print("Model loaded successfully!")
else:
print("Model file not found!")
if __name__ == '__main__':
app = QApplication(sys.argv)
window = ImageRecognitionApp()
window.show()
sys.exit(app.exec_())
Вот собственно результат:
Epoch 1/35
10/10 [==============================] - 1s 64ms/step - loss: 1.1380 - accuracy: 0.3481 - val_loss: 1.1113 - val_accuracy: 0.2297
Epoch 2/35
10/10 [==============================] - 0s 39ms/step - loss: 1.1024 - accuracy: 0.3242 - val_loss: 1.1047 - val_accuracy: 0.2297
Epoch 3/35
10/10 [==============================] - 0s 44ms/step - loss: 1.0980 - accuracy: 0.3413 - val_loss: 1.1043 - val_accuracy: 0.2297
Epoch 4/35
10/10 [==============================] - 0s 46ms/step - loss: 1.1008 - accuracy: 0.3242 - val_loss: 1.1028 - val_accuracy: 0.2297
Epoch 5/35
10/10 [==============================] - 0s 44ms/step - loss: 1.0952 - accuracy: 0.3549 - val_loss: 1.1036 - val_accuracy: 0.2297
Epoch 6/35
10/10 [==============================] - 0s 41ms/step - loss: 1.0953 - accuracy: 0.3891 - val_loss: 1.0994 - val_accuracy: 0.2297
Epoch 7/35
10/10 [==============================] - 0s 41ms/step - loss: 1.0881 - accuracy: 0.3788 - val_loss: 1.0881 - val_accuracy: 0.5676
Epoch 8/35
10/10 [==============================] - 0s 42ms/step - loss: 1.0912 - accuracy: 0.3823 - val_loss: 1.0893 - val_accuracy: 0.3919
Epoch 9/35
10/10 [==============================] - 0s 41ms/step - loss: 1.0817 - accuracy: 0.4369 - val_loss: 1.0355 - val_accuracy: 0.7027
Epoch 10/35
10/10 [==============================] - 0s 41ms/step - loss: 1.0588 - accuracy: 0.4198 - val_loss: 0.9983 - val_accuracy: 0.6892
Epoch 11/35
10/10 [==============================] - 0s 41ms/step - loss: 0.9896 - accuracy: 0.5563 - val_loss: 0.8715 - val_accuracy: 0.5135
Epoch 12/35
10/10 [==============================] - 0s 41ms/step - loss: 0.9994 - accuracy: 0.4881 - val_loss: 0.8297 - val_accuracy: 0.6892
Epoch 13/35
10/10 [==============================] - 0s 40ms/step - loss: 0.8173 - accuracy: 0.6826 - val_loss: 0.6643 - val_accuracy: 0.7162
Epoch 14/35
10/10 [==============================] - 0s 40ms/step - loss: 0.8313 - accuracy: 0.6553 - val_loss: 0.6214 - val_accuracy: 0.7162
Epoch 15/35
10/10 [==============================] - 0s 41ms/step - loss: 0.6548 - accuracy: 0.6962 - val_loss: 0.6051 - val_accuracy: 0.7297
Epoch 16/35
10/10 [==============================] - 0s 42ms/step - loss: 0.6265 - accuracy: 0.7270 - val_loss: 0.5547 - val_accuracy: 0.7432
Epoch 17/35
10/10 [==============================] - 0s 49ms/step - loss: 0.5787 - accuracy: 0.7543 - val_loss: 0.5464 - val_accuracy: 0.7297
Epoch 18/35
10/10 [==============================] - 0s 44ms/step - loss: 0.6448 - accuracy: 0.6860 - val_loss: 0.5095 - val_accuracy: 0.7162
Epoch 19/35
10/10 [==============================] - 0s 43ms/step - loss: 0.5391 - accuracy: 0.7850 - val_loss: 0.5116 - val_accuracy: 0.7027
Epoch 20/35
10/10 [==============================] - 0s 40ms/step - loss: 0.5073 - accuracy: 0.7884 - val_loss: 0.5340 - val_accuracy: 0.7297
Epoch 21/35
10/10 [==============================] - 0s 40ms/step - loss: 0.5293 - accuracy: 0.8259 - val_loss: 0.6398 - val_accuracy: 0.7297
Epoch 22/35
10/10 [==============================] - 0s 40ms/step - loss: 0.4925 - accuracy: 0.7713 - val_loss: 0.4891 - val_accuracy: 0.7432
Epoch 23/35
10/10 [==============================] - 0s 42ms/step - loss: 0.4490 - accuracy: 0.8020 - val_loss: 0.4517 - val_accuracy: 0.7568
Epoch 24/35
10/10 [==============================] - 0s 40ms/step - loss: 0.4105 - accuracy: 0.8123 - val_loss: 0.4311 - val_accuracy: 0.7973
Epoch 25/35
10/10 [==============================] - 0s 40ms/step - loss: 0.4318 - accuracy: 0.8157 - val_loss: 0.6211 - val_accuracy: 0.7432
Epoch 26/35
10/10 [==============================] - 0s 40ms/step - loss: 0.4670 - accuracy: 0.8089 - val_loss: 0.5000 - val_accuracy: 0.7703
Epoch 27/35
10/10 [==============================] - 0s 40ms/step - loss: 0.4716 - accuracy: 0.7986 - val_loss: 0.5347 - val_accuracy: 0.7432
Epoch 28/35
10/10 [==============================] - 0s 41ms/step - loss: 0.4572 - accuracy: 0.7986 - val_loss: 0.5002 - val_accuracy: 0.7973
Epoch 29/35
10/10 [==============================] - 0s 39ms/step - loss: 0.3495 - accuracy: 0.8396 - val_loss: 0.4171 - val_accuracy: 0.8108
Epoch 30/35
10/10 [==============================] - 0s 40ms/step - loss: 0.3656 - accuracy: 0.8669 - val_loss: 0.4699 - val_accuracy: 0.7973
Epoch 31/35
10/10 [==============================] - 0s 39ms/step - loss: 0.2853 - accuracy: 0.8737 - val_loss: 0.4835 - val_accuracy: 0.7973
Epoch 32/35
10/10 [==============================] - 0s 39ms/step - loss: 0.2995 - accuracy: 0.8703 - val_loss: 0.4652 - val_accuracy: 0.8514
Epoch 33/35
10/10 [==============================] - 0s 40ms/step - loss: 0.2706 - accuracy: 0.9044 - val_loss: 0.4609 - val_accuracy: 0.8108
Epoch 34/35
10/10 [==============================] - 0s 39ms/step - loss: 0.3174 - accuracy: 0.8567 - val_loss: 0.4879 - val_accuracy: 0.7432
Epoch 35/35
10/10 [==============================] - 0s 41ms/step - loss: 0.2511 - accuracy: 0.8874 - val_loss: 0.4577 - val_accuracy: 0.8108
1/1 [==============================] - 0s 71ms/step
Image: E:/MSHI_2_KYRS_2_SIM/5.v5/tea_sickness_dataset/algal_leaf/UNADJUSTEDNONRAW_thumb_2a.jpg
Predicted class: algal_leaf
Probability: 0.99933463
1/1 [==============================] - 0s 14ms/step
Image: E:/MSHI_2_KYRS_2_SIM/5.v5/tea_sickness_dataset/brown_blight/UNADJUSTEDNONRAW_thumb_11c.jpg
Predicted class: brown_blight
Probability: 0.98417944