import torch
import torch.utils.data as Data
import torch.optim as optim
import torch.nn as nn
from torch.nn import init
import numpy as np
num_inputs=2
num_examples=1000
true_w=[2,-3.4]
true_b=4.2
batchsize=10
num_epochs=100
#generate data
features=torch.tensor(np.random.normal(0,1,(num_examples,num_inputs)),dtype=torch.float)
labels=true_w[0]*features[:,0]+true_w[1]*features[:,1]+true_b+ torch.tensor((np.random.normal(0,0.01,size=(num_examples))),dtype=torch.float)
#iter data
dataset=Data.TensorDataset(features,labels)
data_iter=Data.DataLoader(dataset,batch_size=batchsize,shuffle=True)
#define net
class LinearNet(nn.Module):
def __init__(self,n_features):
super().__init__()
self.linear=nn.Linear(n_features,1)
#init params way
init.normal_(self.linear.weight, mean=0, std=0.01)
init.constant_(self.linear.bias, val=0)
def forward(self,x):
y=self.linear(x)
return y
net=LinearNet(num_inputs)
#define loss
loss=nn.MSELoss()
#define optimizer
optimizer=optim.SGD(net.parameters(),lr=0.003)
#training...
for epoch in range(1,num_epochs+1):
for X,y in data_iter:
output=net(X)
l=loss(output,y.view(-1,1))
optimizer.zero_grad()
l.backward()
optimizer.step()
print(‘epoch %d, loss %f ‘% (epoch,l.item()))
原文:https://www.cnblogs.com/liutianrui1/p/13820293.html