完成实验2
This commit is contained in:
parent
7e8fea0a3b
commit
d76db395de
@ -125,7 +125,7 @@ if __name__ == "__main__":
|
|||||||
avg_epoch_acc = total_epoch_acc / len(test_regression_dataset)
|
avg_epoch_acc = total_epoch_acc / len(test_regression_dataset)
|
||||||
print(
|
print(
|
||||||
f"Epoch [{epoch + 1}/{num_epochs}],",
|
f"Epoch [{epoch + 1}/{num_epochs}],",
|
||||||
f"Train Loss: {total_epoch_loss},",
|
f"Train Loss: {total_epoch_loss:.10f},",
|
||||||
f"Used Time: {train_time * 1000:.3f}ms,",
|
f"Used Time: {train_time * 1000:.3f}ms,",
|
||||||
f"Test Acc: {avg_epoch_acc * 100:.3f}%,",
|
f"Test Acc: {avg_epoch_acc * 100:.3f}%,",
|
||||||
f"Used Time: {test_time * 1000:.3f}ms",
|
f"Used Time: {test_time * 1000:.3f}ms",
|
||||||
|
@ -130,7 +130,7 @@ if __name__ == "__main__":
|
|||||||
avg_epoch_acc = total_epoch_acc / len(test_binarycls_dataset)
|
avg_epoch_acc = total_epoch_acc / len(test_binarycls_dataset)
|
||||||
print(
|
print(
|
||||||
f"Epoch [{epoch + 1}/{num_epochs}],",
|
f"Epoch [{epoch + 1}/{num_epochs}],",
|
||||||
f"Train Loss: {total_epoch_loss},",
|
f"Train Loss: {total_epoch_loss:.10f},",
|
||||||
f"Used Time: {train_time * 1000:.3f}ms,",
|
f"Used Time: {train_time * 1000:.3f}ms,",
|
||||||
f"Test Acc: {avg_epoch_acc * 100:.3f}%,",
|
f"Test Acc: {avg_epoch_acc * 100:.3f}%,",
|
||||||
f"Used Time: {test_time * 1000:.3f}ms",
|
f"Used Time: {test_time * 1000:.3f}ms",
|
||||||
|
@ -118,7 +118,7 @@ if __name__ == "__main__":
|
|||||||
avg_epoch_acc = total_epoch_acc / len(test_mnist_dataset)
|
avg_epoch_acc = total_epoch_acc / len(test_mnist_dataset)
|
||||||
print(
|
print(
|
||||||
f"Epoch [{epoch + 1}/{num_epochs}],",
|
f"Epoch [{epoch + 1}/{num_epochs}],",
|
||||||
f"Train Loss: {total_epoch_loss},",
|
f"Train Loss: {total_epoch_loss:.10f},",
|
||||||
f"Used Time: {train_time * 1000:.3f}ms,",
|
f"Used Time: {train_time * 1000:.3f}ms,",
|
||||||
f"Test Acc: {avg_epoch_acc * 100:.3f}%,",
|
f"Test Acc: {avg_epoch_acc * 100:.3f}%,",
|
||||||
f"Used Time: {test_time * 1000:.3f}ms",
|
f"Used Time: {test_time * 1000:.3f}ms",
|
||||||
|
@ -107,7 +107,7 @@ if __name__ == "__main__":
|
|||||||
avg_epoch_acc = total_epoch_acc / len(test_regression_dataset)
|
avg_epoch_acc = total_epoch_acc / len(test_regression_dataset)
|
||||||
print(
|
print(
|
||||||
f"Epoch [{epoch + 1}/{num_epochs}],",
|
f"Epoch [{epoch + 1}/{num_epochs}],",
|
||||||
f"Train Loss: {total_epoch_loss},",
|
f"Train Loss: {total_epoch_loss:.10f},",
|
||||||
f"Used Time: {train_time * 1000:.3f}ms,",
|
f"Used Time: {train_time * 1000:.3f}ms,",
|
||||||
f"Test Acc: {avg_epoch_acc * 100:.3f}%,",
|
f"Test Acc: {avg_epoch_acc * 100:.3f}%,",
|
||||||
f"Used Time: {test_time * 1000:.3f}ms",
|
f"Used Time: {test_time * 1000:.3f}ms",
|
||||||
|
@ -111,7 +111,7 @@ if __name__ == "__main__":
|
|||||||
avg_epoch_acc = total_epoch_acc / len(test_binarycls_dataset)
|
avg_epoch_acc = total_epoch_acc / len(test_binarycls_dataset)
|
||||||
print(
|
print(
|
||||||
f"Epoch [{epoch + 1}/{num_epochs}],",
|
f"Epoch [{epoch + 1}/{num_epochs}],",
|
||||||
f"Train Loss: {total_epoch_loss},",
|
f"Train Loss: {total_epoch_loss:.10f},",
|
||||||
f"Used Time: {train_time * 1000:.3f}ms,",
|
f"Used Time: {train_time * 1000:.3f}ms,",
|
||||||
f"Test Acc: {avg_epoch_acc * 100:.3f}%,",
|
f"Test Acc: {avg_epoch_acc * 100:.3f}%,",
|
||||||
f"Used Time: {test_time * 1000:.3f}ms",
|
f"Used Time: {test_time * 1000:.3f}ms",
|
||||||
|
@ -99,7 +99,7 @@ if __name__ == "__main__":
|
|||||||
avg_epoch_acc = total_epoch_acc / len(test_mnist_dataset)
|
avg_epoch_acc = total_epoch_acc / len(test_mnist_dataset)
|
||||||
print(
|
print(
|
||||||
f"Epoch [{epoch + 1}/{num_epochs}],",
|
f"Epoch [{epoch + 1}/{num_epochs}],",
|
||||||
f"Train Loss: {total_epoch_loss},",
|
f"Train Loss: {total_epoch_loss:.10f},",
|
||||||
f"Used Time: {train_time * 1000:.3f}ms,",
|
f"Used Time: {train_time * 1000:.3f}ms,",
|
||||||
f"Test Acc: {avg_epoch_acc * 100:.3f}%,",
|
f"Test Acc: {avg_epoch_acc * 100:.3f}%,",
|
||||||
f"Used Time: {test_time * 1000:.3f}ms",
|
f"Used Time: {test_time * 1000:.3f}ms",
|
||||||
|
@ -74,9 +74,37 @@ class Model_3_3(nn.Module):
|
|||||||
x = self.fc3(x)
|
x = self.fc3(x)
|
||||||
x = self.activate_fn(x)
|
x = self.activate_fn(x)
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
class Model_3_4(nn.Module):
|
||||||
|
def __init__(self, num_classes):
|
||||||
|
super().__init__()
|
||||||
|
self.flatten = nn.Flatten()
|
||||||
|
self.fc1 = nn.Linear(in_features=28 * 28, out_features=1024)
|
||||||
|
self.fc2 = nn.Linear(in_features=1024, out_features=256)
|
||||||
|
self.fc3 = nn.Linear(in_features=256, out_features=num_classes)
|
||||||
|
self.activate_fn = leaky_relu
|
||||||
|
|
||||||
|
def forward(self, x: torch.Tensor):
|
||||||
|
x = self.flatten(x)
|
||||||
|
x = self.fc1(x)
|
||||||
|
x = self.activate_fn(x)
|
||||||
|
|
||||||
|
x = self.fc2(x)
|
||||||
|
x = self.activate_fn(x)
|
||||||
|
|
||||||
|
x = self.fc3(x)
|
||||||
|
x = self.activate_fn(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
train_MNIST_CLS(Model=Model_3_1)
|
print("模型1开始训练,激活函数为relu:")
|
||||||
train_MNIST_CLS(Model=Model_3_2)
|
train_loss_3_1, test_acc_3_1 = train_MNIST_CLS(Model=Model_3_1) # 激活函数为relu
|
||||||
train_MNIST_CLS(Model=Model_3_3)
|
print("模型2开始训练,激活函数为sigmoid:")
|
||||||
|
train_loss_3_2, test_acc_3_2 = train_MNIST_CLS(Model=Model_3_2) # 激活函数为sigmoid
|
||||||
|
print("模型3开始训练,激活函数为tanh:")
|
||||||
|
train_loss_3_3, test_acc_3_3 = train_MNIST_CLS(Model=Model_3_3) # 激活函数为tanh
|
||||||
|
print("模型4开始训练,激活函数为leaky_relu:")
|
||||||
|
train_loss_3_4, test_acc_3_4 = train_MNIST_CLS(Model=Model_3_4) # 激活函数为leaky_relu
|
||||||
|
|
||||||
|
102
Lab2/code/4.py
Normal file
102
Lab2/code/4.py
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
import time
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
from torch.nn.functional import *
|
||||||
|
from torch.utils.data import Dataset, DataLoader
|
||||||
|
from torch import nn
|
||||||
|
from torchvision import datasets, transforms
|
||||||
|
from tqdm import tqdm
|
||||||
|
from utils import *
|
||||||
|
|
||||||
|
import ipdb
|
||||||
|
|
||||||
|
class Model_4_1(nn.Module):
|
||||||
|
def __init__(self, num_classes):
|
||||||
|
super().__init__()
|
||||||
|
self.flatten = nn.Flatten()
|
||||||
|
self.fc1 = nn.Linear(in_features=28 * 28, out_features=512)
|
||||||
|
self.fc2 = nn.Linear(in_features=512, out_features=num_classes)
|
||||||
|
self.activate_fn = leaky_relu
|
||||||
|
|
||||||
|
def forward(self, x: torch.Tensor):
|
||||||
|
x = self.flatten(x)
|
||||||
|
x = self.fc1(x)
|
||||||
|
x = self.activate_fn(x)
|
||||||
|
|
||||||
|
x = self.fc2(x)
|
||||||
|
x = self.activate_fn(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
class Model_4_2(nn.Module):
|
||||||
|
def __init__(self, num_classes):
|
||||||
|
super().__init__()
|
||||||
|
self.flatten = nn.Flatten()
|
||||||
|
self.fc1 = nn.Linear(in_features=28 * 28, out_features=1024)
|
||||||
|
self.fc2 = nn.Linear(in_features=1024, out_features=num_classes)
|
||||||
|
self.activate_fn = leaky_relu
|
||||||
|
|
||||||
|
def forward(self, x: torch.Tensor):
|
||||||
|
x = self.flatten(x)
|
||||||
|
x = self.fc1(x)
|
||||||
|
x = self.activate_fn(x)
|
||||||
|
|
||||||
|
x = self.fc2(x)
|
||||||
|
x = self.activate_fn(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
class Model_4_3(nn.Module):
|
||||||
|
def __init__(self, num_classes):
|
||||||
|
super().__init__()
|
||||||
|
self.flatten = nn.Flatten()
|
||||||
|
self.fc1 = nn.Linear(in_features=28 * 28, out_features=512)
|
||||||
|
self.fc2 = nn.Linear(in_features=512, out_features=512)
|
||||||
|
self.fc3 = nn.Linear(in_features=512, out_features=num_classes)
|
||||||
|
self.activate_fn = leaky_relu
|
||||||
|
|
||||||
|
def forward(self, x: torch.Tensor):
|
||||||
|
x = self.flatten(x)
|
||||||
|
x = self.fc1(x)
|
||||||
|
x = self.activate_fn(x)
|
||||||
|
|
||||||
|
x = self.fc2(x)
|
||||||
|
x = self.activate_fn(x)
|
||||||
|
|
||||||
|
x = self.fc3(x)
|
||||||
|
x = self.activate_fn(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
class Model_4_4(nn.Module):
|
||||||
|
def __init__(self, num_classes):
|
||||||
|
super().__init__()
|
||||||
|
self.flatten = nn.Flatten()
|
||||||
|
self.fc1 = nn.Linear(in_features=28 * 28, out_features=1024)
|
||||||
|
self.fc2 = nn.Linear(in_features=1024, out_features=1024)
|
||||||
|
self.fc3 = nn.Linear(in_features=1024, out_features=num_classes)
|
||||||
|
self.activate_fn = leaky_relu
|
||||||
|
|
||||||
|
def forward(self, x: torch.Tensor):
|
||||||
|
x = self.flatten(x)
|
||||||
|
x = self.fc1(x)
|
||||||
|
x = self.activate_fn(x)
|
||||||
|
|
||||||
|
x = self.fc2(x)
|
||||||
|
x = self.activate_fn(x)
|
||||||
|
|
||||||
|
x = self.fc3(x)
|
||||||
|
x = self.activate_fn(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
print("模型1开始训练,hidden_size=512,hidden_layer=1 :")
|
||||||
|
train_loss_4_1, test_acc_4_1 = train_MNIST_CLS(Model=Model_4_1) # hidden_size=512, hidden_layer=1
|
||||||
|
print("模型2开始训练,hidden_size=1024,hidden_layer=1 :")
|
||||||
|
train_loss_4_2, test_acc_4_2 = train_MNIST_CLS(Model=Model_4_2) # hidden_size=1024, hidden_layer=1
|
||||||
|
print("模型3开始训练,hidden_size=512,hidden_layer=2 :")
|
||||||
|
train_loss_4_3, test_acc_4_3 = train_MNIST_CLS(Model=Model_4_3) # hidden_size=512, hidden_layer=2
|
||||||
|
print("模型4开始训练,hidden_size=1024,hidden_layer=2 :")
|
||||||
|
train_loss_4_4, test_acc_4_4 = train_MNIST_CLS(Model=Model_4_4) # hidden_size=1024, hidden_layer=2
|
||||||
|
|
@ -106,7 +106,7 @@ class My_optimizer:
|
|||||||
|
|
||||||
|
|
||||||
def train_MNIST_CLS(Model:nn.Module):
|
def train_MNIST_CLS(Model:nn.Module):
|
||||||
learning_rate = 5e-2
|
learning_rate = 8e-2
|
||||||
num_epochs = 10
|
num_epochs = 10
|
||||||
batch_size = 512
|
batch_size = 512
|
||||||
num_classes = 10
|
num_classes = 10
|
||||||
@ -139,6 +139,8 @@ def train_MNIST_CLS(Model:nn.Module):
|
|||||||
criterion = nn.CrossEntropyLoss()
|
criterion = nn.CrossEntropyLoss()
|
||||||
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
|
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
|
||||||
|
|
||||||
|
train_loss = list()
|
||||||
|
test_acc = list()
|
||||||
for epoch in range(num_epochs):
|
for epoch in range(num_epochs):
|
||||||
model.train()
|
model.train()
|
||||||
total_epoch_loss = 0
|
total_epoch_loss = 0
|
||||||
@ -182,8 +184,11 @@ def train_MNIST_CLS(Model:nn.Module):
|
|||||||
avg_epoch_acc = total_epoch_acc / len(test_mnist_dataset)
|
avg_epoch_acc = total_epoch_acc / len(test_mnist_dataset)
|
||||||
print(
|
print(
|
||||||
f"Epoch [{epoch + 1}/{num_epochs}],",
|
f"Epoch [{epoch + 1}/{num_epochs}],",
|
||||||
f"Train Loss: {total_epoch_loss},",
|
f"Train Loss: {total_epoch_loss:.10f},",
|
||||||
f"Used Time: {train_time * 1000:.3f}ms,",
|
f"Used Time: {train_time * 1000:.3f}ms,",
|
||||||
f"Test Acc: {avg_epoch_acc * 100:.3f}%,",
|
f"Test Acc: {avg_epoch_acc * 100:.3f}%,",
|
||||||
f"Used Time: {test_time * 1000:.3f}ms",
|
f"Used Time: {test_time * 1000:.3f}ms",
|
||||||
)
|
)
|
||||||
|
train_loss.append(total_epoch_loss)
|
||||||
|
test_acc.append(avg_epoch_acc * 100)
|
||||||
|
return train_loss, test_acc
|
File diff suppressed because one or more lines are too long
Loading…
x
Reference in New Issue
Block a user