创一个矩阵,有木有觉得很爽,如果用tensorflow的同学可能会这么觉得。。。
x = torch.empty(5, 3) # 构建未初始化的矩阵;产生很小的数,可以当做 0;结果是一个张量 tensor; 所有的输入和计算都是 tensor 格式。
x
tensor([[ 0.0000e+00, -8.5899e+09, 2.1110e-19],
[-3.6902e+19, 1.1210e-44, 0.0000e+00],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00],
[ 0.0000e+00, 0.0000e+00, 0.0000e+00]])
x = torch.rand(5, 3) # 构建随机初始化矩阵
x
tensor([[0.4896, 0.0190, 0.4041],
[0.7866, 0.5946, 0.2882],
[0.9009, 0.2157, 0.7783],
[0.3611, 0.1883, 0.6338],
[0.6846, 0.9554, 0.6611]])
初始化一个全零的矩阵
x = torch.zeros(5, 3, dtype=torch.long)
x
tensor([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
直接传入数据
x = torch.tensor([5.5, 3]) # 从数据直接直接构建tensor
x # tensor([5.5000, 3.0000])
x = x.new_ones(5, 3, dtype=torch.double) # 从一个已有的tensor构建一个tensor,new_ones重用以前的信息,例如数据类型;
x = torch.randn_like(x, dtype=torch.float) # 产生一个形状一样的矩阵
x
tensor([[ 0.2848, 0.9409, -0.8545],
[ 0.0582, 0.0050, -1.1220],
[-1.1389, 0.2913, 0.1471],
[-0.6798, 0.2851, 1.5386],
[-0.7647, 1.0754, -0.7901]])
展示矩阵大小
x.dtype # torch.float32
x.size() # torch.Size([5, 3])
x.shape # torch.Size([5, 3])
# 改变tensor形状
x = torch.randn(4,4)
y = x.view(16)
z = x.view(-1,8)
z
tensor([[-1.6207, -1.0893, -1.1299, -0.4559, -1.8922, 0.4771, 2.0306, 1.6977],
[-0.8509, 2.1227, -0.3943, -1.8941, -0.6286, 0.3956, 0.5723, 1.4658]])
x = torch.rand(5, 3)
y = torch.rand(5, 3)
x + y
tensor([[0.9174, 1.3786, 1.0915],
[1.0750, 1.7506, 1.3954],
[0.5356, 1.2608, 1.5221],
[1.4083, 1.0147, 0.9909],
[0.8153, 0.6314, 0.5632]])
torch.add(x, y)#一样的也是加法
tensor([[0.9174, 1.3786, 1.0915],
[1.0750, 1.7506, 1.3954],
[0.5356, 1.2608, 1.5221],
[1.4083, 1.0147, 0.9909],
[0.8153, 0.6314, 0.5632]])
result = torch.empty(5,3)
torch.add(x, y, out=result)
tensor([[0.9174, 1.3786, 1.0915],
[1.0750, 1.7506, 1.3954],
[0.5356, 1.2608, 1.5221],
[1.4083, 1.0147, 0.9909],
[0.8153, 0.6314, 0.5632]])
# result = x + y
y.add_(x)
tensor([[0.9174, 1.3786, 1.0915],
[1.0750, 1.7506, 1.3954],
[0.5356, 1.2608, 1.5221],
[1.4083, 1.0147, 0.9909],
[0.8153, 0.6314, 0.5632]])
x = torch.randn(1) # 得到Python数值
x.item() # -0.49584466218948364
x[:, 1] # tensor([-1.1208, -0.0828, -0.4144, -0.7263, 0.1368])
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8) # -1 代表自动计算
print(x.size(), y.size(), z.size())
torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])
# tensor 转 numpy
a = torch.ones(5)
b = a.numpy()
b # array([1., 1., 1., 1., 1.], dtype=float32)
# numpy 转 tensor
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
b # tensor([1., 1., 1., 1., 1.], dtype=torch.float64)
# CPU转成GPU
if torch.cuda.is_available():
device = torch.device("cuda")
y = torch.ones_like(x, device=device)
x = x.to(device)
z = x + y
print(z)
print(z.to("cpu", torch.double))
model = model.cuda()
# GPU不能直接转化成numpy,必须先转化为CPU,再转化为numpy
y.to("cpu").data.numpy()
y.cpu().data.numpy()
原文:https://www.cnblogs.com/fldev/p/14466039.html