博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
pytorch简单测试
阅读量:4626 次
发布时间:2019-06-09

本文共 4739 字,大约阅读时间需要 15 分钟。

pytorch demo

import torch.nn as nnimport torch.nn.functional as Ffrom torch.autograd import Variableimport torchimport torch.optim as optimclass Net(nn.Module):#需要继承这个类    def __init__(self):        super(Net, self).__init__()       # 建立了两个卷积层,self.conv1, self.conv2,注意,这些层都是不包含激活函数的        self.conv1 = nn.Conv2d(1, 6, 5)   # 1 input image channel, 6 output channels, 5x5 square convolution kernel        self.conv2 = nn.Conv2d(6, 16, 5)  # 三个全连接层        self.fc1 = nn.Linear(16*5*5, 120) # an affine operation: y = Wx + b        self.fc2 = nn.Linear(120, 84)        self.fc3 = nn.Linear(84, 10)    def forward(self, x):                 # 注意,2D卷积层的输入data维数是 batchsize*channel*height*width        x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))  # Max pooling over a (2, 2) window        x = F.max_pool2d(F.relu(self.conv2(x)), 2)       # If the size is a square you can only specify a single number        x = x.view(-1, self.num_flat_features(x))        x = F.relu(self.fc1(x))        x = F.relu(self.fc2(x))        x = self.fc3(x)        return x    def num_flat_features(self, x):        size = x.size()[1:] # all dimensions except the batch dimension        num_features = 1        for s in size:            num_features *= s        return num_features# net = Net()# print(net)# print(len(list(net.parameters())))## input = Variable(torch.randn(1, 1, 32, 32))# out = net(input)net = Net()   # create your optimizeroptimizer = optim.SGD(net.parameters(), lr=0.01)learning_rate=0.001input_data=torch.randn(2, 1, 32, 32)# input_data=Variable(input_data)target=torch.FloatTensor(2, 10).random_(8)print(target)criterion = torch.nn.MSELoss(reduce=True, size_average=True)# in your training loop:for i in range(1000):    optimizer.zero_grad()      # zero the gradient buffers,如果不归0的话,gradients会累加    output = net(input_data)   # 这里就体现出来动态建图了,你还可以传入其他的参数来改变网络的结构    loss = criterion(output, target)    loss.backward()             # 得到grad,i.e.给Variable.grad赋值    optimizer.step()            # Does the update,i.e.print(output)

output

tensor([[1., 3., 4., 3., 5., 1., 6., 6., 6., 6.],        [1., 2., 2., 7., 2., 4., 0., 4., 3., 6.]])tensor([[1.0419, 3.0951, 4.0900, 3.2657, 5.1304, 1.1834, 6.0200, 6.1616, 6.1678,         6.2592],        [0.9804, 2.0937, 2.2189, 6.6986, 2.2809, 3.8273, 0.5658, 4.1855, 3.3320,         6.0890]], grad_fn=
)

线性回归

import torchfrom torch.autograd import Variable# train datax_data = Variable(torch.Tensor([[1.0], [2.0], [3.0]]))y_data = Variable(torch.Tensor([[2.0], [4.0], [6.0]]))class Model(torch.nn.Module):    def __init__(self):        super(Model, self).__init__()        self.linear = torch.nn.Linear(1, 1)        # One in and one out    def forward(self, x):        y_pred = self.linear(x)        return y_pred    # our modelmodel = Model()criterion = torch.nn.MSELoss(size_average=False)# Defined loss functionoptimizer = torch.optim.SGD(model.parameters(), lr=0.01)# Defined optimizer#  Training: forward, loss, backward, step#  Training loopfor epoch in range(500):    #  Forward pass    y_pred = model(x_data)    # Compute loss    loss = criterion(y_pred, y_data)    print(epoch, loss.data[0])    # Zero gradients    optimizer.zero_grad()    # perform backward pass    loss.backward()    # update weights    optimizer.step()# After traininghour_var = Variable(torch.Tensor([[7.0]]))print("predict (after training)", 4, model.forward(hour_var).data[0][0])

逻辑回归

import torchfrom torch.autograd import Variablex_data = Variable(torch.Tensor([[0.4], [1.0], [3.5], [4.0]]))y_data = Variable(torch.Tensor([[0.], [0.], [1.], [1.]]))class Model(torch.nn.Module):    def __init__(self):        super(Model, self).__init__()        self.linear = torch.nn.Linear(1, 1)        # One in one out        self.sigmoid = torch.nn.Sigmoid()    def forward(self, x):        y_pred = self.sigmoid(self.linear(x))        return y_pred        # Our modelmodel = Model() # Construct loss function and optimizercriterion = torch.nn.BCELoss(size_average=True)optimizer = torch.optim.SGD(model.parameters(), lr=0.01)# Training loopfor epoch in range(80000): # Forward pass    y_pred = model(x_data) # Compute loss    loss = criterion(y_pred, y_data)    if epoch % 20 == 0:        print(epoch, loss.data[0])        # Zero gradients        optimizer.zero_grad() # Backward pass        loss.backward() # update weights        optimizer.step() # After traininghour_var = Variable(torch.Tensor([[0.5]]))print("predict (after training)", 0.5, model.forward(hour_var).data[0][0])hour_var = Variable(torch.Tensor([[7.0]]))print("predict (after training)", 7.0, model.forward(hour_var).data[0][0])

备注

pytorch0.4 的tensor和variable合在一起了,所以可以直接计算,低版本的还需将tensor包装进variable才能求导。

转载于:https://www.cnblogs.com/o-v-o/p/9975358.html

你可能感兴趣的文章
windows10配置jenkins
查看>>
controlfile
查看>>
[bbk4966]第70集 第8章 -性能维护 01
查看>>
充血模式和贫血模式
查看>>
输入、方法的运用
查看>>
Lucene类介绍
查看>>
Linux下修改mysql的root密码后数据库消失怎么处理
查看>>
Ubuntu下配置Nginx HTTPS
查看>>
深入研究敏捷的成功因素
查看>>
libmemcached 1.0.11 发布
查看>>
XWiki 4.3 正式版发布
查看>>
用JSP+JDBC开发Web程序
查看>>
Sass (Syntactically Awesome StyleSheets)
查看>>
PHP扩展类ZipArchive实现压缩解压Zip文件和文件打包下载
查看>>
cf-Sasha and Array
查看>>
vue Element-ui 表格自带筛选框自定义高度
查看>>
Go指南练习_错误
查看>>
2014年国内最热门的.NET开源平台
查看>>
Android自动化测试框架
查看>>
【Linux笔记(002) 】-- centos7 文档操作基本命令
查看>>