解决手动softmax模型训练梯度爆炸问题
This commit is contained in:
parent
c384059131
commit
9c8f12e431
@ -53,7 +53,7 @@ class My_Linear:
|
||||
return self.params
|
||||
|
||||
|
||||
class Model:
|
||||
class Model_2_1:
|
||||
def __init__(self):
|
||||
self.linear = My_Linear(1, 1)
|
||||
self.params = self.linear.params
|
||||
@ -102,10 +102,14 @@ device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
dataset = My_Dataset()
|
||||
dataloader = DataLoader(
|
||||
dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=5, pin_memory=True
|
||||
dataset=dataset,
|
||||
batch_size=batch_size,
|
||||
shuffle=True,
|
||||
num_workers=14,
|
||||
pin_memory=True,
|
||||
)
|
||||
|
||||
model = Model().to(device)
|
||||
model = Model_2_1().to(device)
|
||||
criterion = My_BCELoss()
|
||||
optimizer = My_optimizer(model.parameters(), lr=learning_rate)
|
||||
|
||||
|
@ -7,9 +7,9 @@ from tqdm import tqdm
|
||||
import ipdb
|
||||
|
||||
|
||||
class Model(nn.Module):
|
||||
class Model_2_2(nn.Module):
|
||||
def __init__(self):
|
||||
super(Model, self).__init__()
|
||||
super(Model_2_2, self).__init__()
|
||||
self.linear = nn.Linear(1, 1, dtype=torch.float64)
|
||||
|
||||
def forward(self, x):
|
||||
@ -38,17 +38,21 @@ class My_Dataset(Dataset):
|
||||
return x, y
|
||||
|
||||
|
||||
learning_rate = 1e-2
|
||||
learning_rate = 5e-2
|
||||
num_epochs = 10
|
||||
batch_size = 1024
|
||||
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
dataset = My_Dataset()
|
||||
dataloader = DataLoader(
|
||||
dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=5, pin_memory=True
|
||||
dataset=dataset,
|
||||
batch_size=batch_size,
|
||||
shuffle=True,
|
||||
num_workers=14,
|
||||
pin_memory=True,
|
||||
)
|
||||
|
||||
model = Model().to(device)
|
||||
model = Model_2_2().to(device)
|
||||
criterion = nn.BCELoss()
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
|
||||
|
||||
|
@ -27,7 +27,7 @@ class My_CrossEntropyLoss:
|
||||
|
||||
class My_optimizer:
|
||||
def __init__(self, params: list[torch.Tensor], lr: float):
|
||||
self.params = params
|
||||
self.params = list(params)
|
||||
self.lr = lr
|
||||
|
||||
def step(self):
|
||||
@ -96,7 +96,7 @@ class Model_3_1:
|
||||
return self.params
|
||||
|
||||
|
||||
learning_rate = 5e-3
|
||||
learning_rate = 5e-1
|
||||
num_epochs = 10
|
||||
batch_size = 4096
|
||||
num_classes = 10
|
||||
@ -105,27 +105,27 @@ device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
||||
transform = transforms.Compose(
|
||||
[
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize((0.5,), (0.5,)),
|
||||
transforms.Normalize((0.5,), (1.0,)),
|
||||
]
|
||||
)
|
||||
train_dataset = datasets.FashionMNIST(
|
||||
root="./dataset", train=True, transform=transform, download=True
|
||||
root="../dataset", train=True, transform=transform, download=True
|
||||
)
|
||||
test_dataset = datasets.FashionMNIST(
|
||||
root="./dataset", train=False, transform=transform, download=True
|
||||
root="../dataset", train=False, transform=transform, download=True
|
||||
)
|
||||
train_loader = DataLoader(
|
||||
dataset=train_dataset,
|
||||
batch_size=batch_size,
|
||||
shuffle=True,
|
||||
num_workers=4,
|
||||
num_workers=14,
|
||||
pin_memory=True,
|
||||
)
|
||||
test_loader = DataLoader(
|
||||
dataset=test_dataset,
|
||||
batch_size=batch_size,
|
||||
shuffle=True,
|
||||
num_workers=4,
|
||||
num_workers=14,
|
||||
pin_memory=True,
|
||||
)
|
||||
|
||||
@ -148,7 +148,6 @@ for epoch in range(num_epochs):
|
||||
)
|
||||
|
||||
outputs = model(images)
|
||||
# ipdb.set_trace()
|
||||
loss = criterion(outputs, one_hot_targets)
|
||||
total_epoch_loss += loss
|
||||
|
||||
|
@ -8,9 +8,9 @@ from torch.utils.data import DataLoader
|
||||
import ipdb
|
||||
|
||||
|
||||
class Model(nn.Module):
|
||||
class Model_3_2(nn.Module):
|
||||
def __init__(self, num_classes):
|
||||
super(Model, self).__init__()
|
||||
super(Model_3_2, self).__init__()
|
||||
self.flatten = nn.Flatten()
|
||||
self.linear = nn.Linear(28 * 28, num_classes)
|
||||
|
||||
@ -20,7 +20,7 @@ class Model(nn.Module):
|
||||
return x
|
||||
|
||||
|
||||
learning_rate = 5e-3
|
||||
learning_rate = 5e-2
|
||||
num_epochs = 10
|
||||
batch_size = 4096
|
||||
num_classes = 10
|
||||
@ -29,33 +29,33 @@ device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
||||
transform = transforms.Compose(
|
||||
[
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize((0.5,), (0.5,)),
|
||||
transforms.Normalize((0.5,), (1.0,)),
|
||||
]
|
||||
)
|
||||
train_dataset = datasets.FashionMNIST(
|
||||
root="./dataset", train=True, transform=transform, download=True
|
||||
root="../dataset", train=True, transform=transform, download=True
|
||||
)
|
||||
test_dataset = datasets.FashionMNIST(
|
||||
root="./dataset", train=False, transform=transform, download=True
|
||||
root="../dataset", train=False, transform=transform, download=True
|
||||
)
|
||||
train_loader = DataLoader(
|
||||
dataset=train_dataset,
|
||||
batch_size=batch_size,
|
||||
shuffle=True,
|
||||
num_workers=4,
|
||||
num_workers=14,
|
||||
pin_memory=True,
|
||||
)
|
||||
test_loader = DataLoader(
|
||||
dataset=test_dataset,
|
||||
batch_size=batch_size,
|
||||
shuffle=True,
|
||||
num_workers=4,
|
||||
num_workers=14,
|
||||
pin_memory=True,
|
||||
)
|
||||
|
||||
model = Model(num_classes).to(device)
|
||||
model = Model_3_2(num_classes).to(device)
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
|
||||
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
|
||||
|
||||
for epoch in range(num_epochs):
|
||||
total_epoch_loss = 0
|
||||
|
Loading…
x
Reference in New Issue
Block a user