real(b)-pred(r)
遇到的问题:
- 如果生成的数据x = torch.unsqueeze(torch.linspace(-3, 3, 300),dim=1) # shape=(300, 1)范围更大时,比如在区间[-30,30],如何用pytorch对数据集做标准化?(不做标准化loss最后爆掉了)torch.nn.batchnormalization1d不work,会报运行时错误
- 训练集和测试集更好的划分方法是什么?我用了按照奇数偶数index划分
import torch
import torch.utils.data as Data
import matplotlib.pyplot as plt
from torch.autograd import Variable
import torch.nn.functional as F
class Model(torch.nn.Module):
'''
两层的ReLU网络
'''
def __init__(self,n_inputs,n_hidden,n_outputs):
super(Model,self).__init__()
self.hidden = torch.nn.Linear(n_inputs,n_hidden)
self.relu = torch.nn.ReLU()
self.predict = torch.nn.Linear(n_hidden,n_outputs,)
def forward(self, batch_x):
batch_y = self.predict(self.relu(self.hidden(batch_x)))
return batch_y
def train(model,epochs,dataset,batch_size=16,lr=0.001):
optimizer = torch.optim.SGD(model.parameters(),lr=lr)
criterion = torch.nn.MSELoss()
loss_stats = []
# x = torch.unsqueeze(torch.linspace(-1, 1, 300), dim=1) # shape=(300, 1)
# y = x2(x)
# dataset = Data.TensorDataset(x, y)
dataloader = Data.DataLoader(dataset, batch_size, shuffle=True)
for i in range(epochs):
for step,(batch_x,batch_y) in enumerate(dataloader):
y_pred = model(batch_x)
loss = criterion(y_pred,batch_y)
loss_stats.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
# if i%50 == 0:
# print("Epoch :"+str(i)+", "+"loss :"+str(loss.item()))
# print("Epoch :"+str(step)+", "+"loss :"+str(loss.item()))
if i%100 == 0:
print("Epoch :"+str(i)+", "+"loss :"+str(loss.item()))
return loss_stats
def get_predictions(model,xs):
y_preds=[]
for x in xs:
y_preds.append(model(x))
return y_preds
def x2(x):
return x*x
def draw(xs,ys,y_preds):
plt.plot(xs.data.numpy(), ys.data.numpy(),'b-') # 绘制真实曲线
plt.plot(xs.data.numpy(), y_preds, 'r-', lw=5)
# plt.text(0.5, 0, 'Loss=' + str(loss.data[0]), fontdict={'size': 20, 'color': 'red'})
plt.show()
def main():
x = torch.unsqueeze(torch.linspace(-3, 3, 300),dim=1) # shape=(300, 1)
trainx = []
testx = []
for idx,t in enumerate(x):
if idx%2:
trainx.append(t)
else:
testx.append(t)
x = torch.stack(trainx)
testx = torch.stack(testx)
# y = x^2
y = torch.sin(x)
dataset = Data.TensorDataset(x,y)
n_in = 1
n_hidden = 16
n_out = 1
model = Model(n_in,n_hidden,n_out)
train(model,3000,dataset)
y_preds = get_predictions(model,x)
draw(x,y,y_preds)
testy = torch.sin(testx)
test_y_preds = get_predictions(model,testx)
draw(testx,testy,test_y_preds)
return
if __name__=="__main__":
main()
Loss:
Epoch :0, loss :0.76976078748703
Epoch :100, loss :0.0542999692261219
Epoch :200, loss :0.14310893416404724
Epoch :300, loss :0.0458090603351593
Epoch :400, loss :0.12701889872550964
Epoch :500, loss :0.04937600716948509
Epoch :600, loss :0.02582806535065174
Epoch :700, loss :0.059296756982803345
Epoch :800, loss :0.01921667717397213
Epoch :900, loss :0.041494838893413544
Epoch :1000, loss :0.01814175210893154
Epoch :1100, loss :0.047030553221702576
Epoch :1200, loss :0.03243629261851311
Epoch :1300, loss :0.010717405937612057
Epoch :1400, loss :0.022234657779335976
Epoch :1500, loss :0.011244750581681728
Epoch :1600, loss :0.0035188409965485334
Epoch :1700, loss :0.012628424912691116
Epoch :1800, loss :0.009878159500658512
Epoch :1900, loss :0.0218646377325058
Epoch :2000, loss :0.006821316201239824
Epoch :2100, loss :0.015920905396342278
Epoch :2200, loss :0.01449580304324627
Epoch :2300, loss :0.002937785116955638
Epoch :2400, loss :0.004693191964179277
Epoch :2500, loss :0.00621526874601841
Epoch :2600, loss :0.005822642240673304
Epoch :2700, loss :0.003821735503152013
Epoch :2800, loss :0.002171841450035572
Epoch :2900, loss :0.008617950603365898
y=e^x 效果更好些
y=e^x














网友评论