Некорректное поведение при обучении нейронной сети PyTorch
Необходимо написать нейронку, которая будет распознавать цифру на изображении. В настоящее время выглядит так. что чем больше ошибка. тем больше точность. Почему так - загадка. Исходный размер: 276х144.
class DigitRecognition(nn.Module):
def __init__(self):
super().__init__()
self.__forward_propagation = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=8, kernel_size=1, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Flatten(),
nn.Linear(in_features=280, out_features=32),
nn.Linear(in_features=32, out_features=16),
nn.Linear(in_features=16, out_features=10),
nn.Sigmoid(),
)
def forward(self, X) -> torch.Tensor:
X = self.__forward_propagation(X)
return X
@staticmethod
def accuracy_fn(y_true, y_pred) -> float:
correct: int = 0
for pred, true in zip(y_pred, y_true):
if torch.argmax(y_true[0]).item() == torch.argmax(y_pred[0]).item():
correct += 1
acc: float = correct / len(y_true) *100
return acc
EPOCHS: int = 100
lr: float = 0.0001
mean_train_error: float = 0.
mean_test_error: float = 0.
train_errors: float = 0.
test_errors: float = 0.
train_acc_errors: float = 0.
test_acc_errors: float = 0.
train_acc: float = 0.
test_acc: float = 0.
f: DigitRecognition = DigitRecognition()
loss_fn = nn.CrossEntropyLoss()
optim = torch.optim.Adam(f.parameters(), lr)
f.to(DEVICE)
for EPOCH in range(EPOCHS):
f.train()
for batch in train_dataloader:
X: torch.Tensor = batch[0].to(DEVICE)
y_pred = f(X).to(DEVICE)
y_true_digit: torch.Tensor = batch[1]
y_true_canvas: torch.Tensor = torch.zeros(y_pred.shape[0], 10)
y_true: torch.Tensor = y_true_canvas.scatter_(1, y_true_digit.unsqueeze(1), 1.)
y_true: torch.Tensor = y_true.to(DEVICE)
error: float = loss_fn(y_pred, y_true)
train_acc = f.accuracy_fn(y_true, y_pred)
train_errors += error
train_acc_errors += train_acc
error.backward()
optim.zero_grad()
optim.step()
f.eval()
with torch.inference_mode():
for batch in test_dataloader:
X: torch.Tensor = batch[0].to(DEVICE)
y_pred = f(X).to(DEVICE)
y_true_digit: torch.Tensor = batch[1]
y_true_canvas: torch.Tensor = torch.zeros(y_pred.shape[0], 10)
y_true: torch.Tensor = y_true_canvas.scatter_(1, y_true_digit.unsqueeze(1), 1.)
y_true: torch.Tensor = y_true.to(DEVICE)
error: float = loss_fn(y_pred, y_true)
test_acc = f.accuracy_fn(y_true, y_pred)
test_errors += error
test_acc_errors += test_acc
mean_train_error: float = train_errors / BATCH_SIZE
mean_test_error: float = test_errors / BATCH_SIZE
mean_train_acc_errors: float = train_acc_errors / BATCH_SIZE
mean_test_acc_errors: float = test_acc_errors / BATCH_SIZE
print(f"epoch: {EPOCH}/Train. Error {mean_train_error}/Accuracy: {mean_train_acc_errors}/t ** Test. Error: {mean_test_error}/Accuracy: {mean_test_acc_errors}")
mean_train_error: float = 0.
mean_test_error: float = 0.
train_errors: float = 0.
test_errors: float = 0.
train_acc_errors: float = 0.
test_acc_errors: float = 0.
mean_train_acc_errors: float = 0.
mean_test_acc_errors: float = 0.
файлы: https://drive.google.com/drive/folders/12nhWJo73gHUXWa0vg38cCOlz_aKHesKN?usp=sharing
смущает, что чем ниже ошибка, тем ниже точность, должно ведь быть наоборот?

