解决手动softmax模型训练梯度爆炸问题

This commit is contained in:
Jingfan Ke 2023-10-10 19:11:21 +08:00
parent c384059131
commit 9c8f12e431
4 changed files with 33 additions and 26 deletions

View File

@ -53,7 +53,7 @@ class My_Linear:
return self.params return self.params
class Model: class Model_2_1:
def __init__(self): def __init__(self):
self.linear = My_Linear(1, 1) self.linear = My_Linear(1, 1)
self.params = self.linear.params self.params = self.linear.params
@ -102,10 +102,14 @@ device = "cuda:0" if torch.cuda.is_available() else "cpu"
dataset = My_Dataset() dataset = My_Dataset()
dataloader = DataLoader( dataloader = DataLoader(
dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=5, pin_memory=True dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=14,
pin_memory=True,
) )
model = Model().to(device) model = Model_2_1().to(device)
criterion = My_BCELoss() criterion = My_BCELoss()
optimizer = My_optimizer(model.parameters(), lr=learning_rate) optimizer = My_optimizer(model.parameters(), lr=learning_rate)

View File

@ -7,9 +7,9 @@ from tqdm import tqdm
import ipdb import ipdb
class Model(nn.Module): class Model_2_2(nn.Module):
def __init__(self): def __init__(self):
super(Model, self).__init__() super(Model_2_2, self).__init__()
self.linear = nn.Linear(1, 1, dtype=torch.float64) self.linear = nn.Linear(1, 1, dtype=torch.float64)
def forward(self, x): def forward(self, x):
@ -38,17 +38,21 @@ class My_Dataset(Dataset):
return x, y return x, y
learning_rate = 1e-2 learning_rate = 5e-2
num_epochs = 10 num_epochs = 10
batch_size = 1024 batch_size = 1024
device = "cuda:0" if torch.cuda.is_available() else "cpu" device = "cuda:0" if torch.cuda.is_available() else "cpu"
dataset = My_Dataset() dataset = My_Dataset()
dataloader = DataLoader( dataloader = DataLoader(
dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=5, pin_memory=True dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=14,
pin_memory=True,
) )
model = Model().to(device) model = Model_2_2().to(device)
criterion = nn.BCELoss() criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

View File

@ -27,7 +27,7 @@ class My_CrossEntropyLoss:
class My_optimizer: class My_optimizer:
def __init__(self, params: list[torch.Tensor], lr: float): def __init__(self, params: list[torch.Tensor], lr: float):
self.params = params self.params = list(params)
self.lr = lr self.lr = lr
def step(self): def step(self):
@ -96,7 +96,7 @@ class Model_3_1:
return self.params return self.params
learning_rate = 5e-3 learning_rate = 5e-1
num_epochs = 10 num_epochs = 10
batch_size = 4096 batch_size = 4096
num_classes = 10 num_classes = 10
@ -105,27 +105,27 @@ device = "cuda:0" if torch.cuda.is_available() else "cpu"
transform = transforms.Compose( transform = transforms.Compose(
[ [
transforms.ToTensor(), transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)), transforms.Normalize((0.5,), (1.0,)),
] ]
) )
train_dataset = datasets.FashionMNIST( train_dataset = datasets.FashionMNIST(
root="./dataset", train=True, transform=transform, download=True root="../dataset", train=True, transform=transform, download=True
) )
test_dataset = datasets.FashionMNIST( test_dataset = datasets.FashionMNIST(
root="./dataset", train=False, transform=transform, download=True root="../dataset", train=False, transform=transform, download=True
) )
train_loader = DataLoader( train_loader = DataLoader(
dataset=train_dataset, dataset=train_dataset,
batch_size=batch_size, batch_size=batch_size,
shuffle=True, shuffle=True,
num_workers=4, num_workers=14,
pin_memory=True, pin_memory=True,
) )
test_loader = DataLoader( test_loader = DataLoader(
dataset=test_dataset, dataset=test_dataset,
batch_size=batch_size, batch_size=batch_size,
shuffle=True, shuffle=True,
num_workers=4, num_workers=14,
pin_memory=True, pin_memory=True,
) )
@ -148,7 +148,6 @@ for epoch in range(num_epochs):
) )
outputs = model(images) outputs = model(images)
# ipdb.set_trace()
loss = criterion(outputs, one_hot_targets) loss = criterion(outputs, one_hot_targets)
total_epoch_loss += loss total_epoch_loss += loss

View File

@ -8,9 +8,9 @@ from torch.utils.data import DataLoader
import ipdb import ipdb
class Model(nn.Module): class Model_3_2(nn.Module):
def __init__(self, num_classes): def __init__(self, num_classes):
super(Model, self).__init__() super(Model_3_2, self).__init__()
self.flatten = nn.Flatten() self.flatten = nn.Flatten()
self.linear = nn.Linear(28 * 28, num_classes) self.linear = nn.Linear(28 * 28, num_classes)
@ -20,7 +20,7 @@ class Model(nn.Module):
return x return x
learning_rate = 5e-3 learning_rate = 5e-2
num_epochs = 10 num_epochs = 10
batch_size = 4096 batch_size = 4096
num_classes = 10 num_classes = 10
@ -29,33 +29,33 @@ device = "cuda:0" if torch.cuda.is_available() else "cpu"
transform = transforms.Compose( transform = transforms.Compose(
[ [
transforms.ToTensor(), transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)), transforms.Normalize((0.5,), (1.0,)),
] ]
) )
train_dataset = datasets.FashionMNIST( train_dataset = datasets.FashionMNIST(
root="./dataset", train=True, transform=transform, download=True root="../dataset", train=True, transform=transform, download=True
) )
test_dataset = datasets.FashionMNIST( test_dataset = datasets.FashionMNIST(
root="./dataset", train=False, transform=transform, download=True root="../dataset", train=False, transform=transform, download=True
) )
train_loader = DataLoader( train_loader = DataLoader(
dataset=train_dataset, dataset=train_dataset,
batch_size=batch_size, batch_size=batch_size,
shuffle=True, shuffle=True,
num_workers=4, num_workers=14,
pin_memory=True, pin_memory=True,
) )
test_loader = DataLoader( test_loader = DataLoader(
dataset=test_dataset, dataset=test_dataset,
batch_size=batch_size, batch_size=batch_size,
shuffle=True, shuffle=True,
num_workers=4, num_workers=14,
pin_memory=True, pin_memory=True,
) )
model = Model(num_classes).to(device) model = Model_3_2(num_classes).to(device)
criterion = nn.CrossEntropyLoss() criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
for epoch in range(num_epochs): for epoch in range(num_epochs):
total_epoch_loss = 0 total_epoch_loss = 0