%matplotlib inline
import torch
# Construct a 5 x 3 matrix, uninitialized
x = torch.Tensor(5, 3)
print(x, '\n')
# Construct a randomly initialized matrix
x = torch.rand(5, 3)
print(x, '\n')
# Construct a matrix with the list
x = torch.tensor([[3, 4, 5], [1, 2, 3]])
print(x, '\n')
# Get its size
print(x.size())
# Construct a matrix with the list with dtype torch.float32 (FloatTensor)
x = torch.tensor([[3, 4, 5], [1, 2, 3]], dtype=torch.float32)
print(x, '\n')
# Construct a matrix with the list with default dtype (FloatTensor)
y = torch.tensor([[3., 4, 5], [1, 2, 3.]])
print(y, '\n')
# Construct a matrix with the list with default dtype (IntTensor)
z = torch.tensor([[3, 4, 5], [1, 2, 3]])
print(z, '\n')
# Success
print(x + y)
# Fail
print(y + z)
# Construct a matrix with the list with dtype torch.float64 (DoubleTensor)
x = torch.tensor([[3, 4, 5], [1, 2, 3]], dtype=torch.float64)
print(x, '\n')
# Construct a matrix with the list with dtype torch.float32 (FloatTensor)
y = torch.tensor([[3, 4, 5], [1, 2, 3]], dtype=torch.float32)
print(y, '\n')
# Construct a matrix with the list with dtype torch.int32 (IntTensor)
z = torch.tensor([[3, 4, 5], [1, 2, 3]], dtype=torch.int32)
print(z, '\n')
# Construct a matrix with the list with dtype torch.int64 (LongTensor)
w = torch.tensor([[3, 4, 5], [1, 2, 3]], dtype=torch.int64)
print(w, '\n')
y = y.double()
print(y, '\n')
print(x + y)
x = torch.rand(4, 5)
print(x, '\n')
print(x.device, '\n')
device = torch.device('cuda')
x = x.to(device)
print(x, '\n')
print(x.device, '\n')
print(x.double(), '\n')
device = torch.device('cuda')
x = torch.randn(4, 3, dtype=torch.float64)
y = torch.randn(4, 3, dtype=torch.float32)
z = torch.randint(0, 10, (4, 3), dtype=torch.int32)
z = z.to(device)
print('Before "to" method\n')
print(x.dtype, x.device)
print(y.dtype, y.device)
print(z.dtype, z.device, '\n')
print('After "to" method\n')
# to method with specific dtype and device
x = x.to(dtype=torch.int32, device=device)
# to method with some tensor
y = y.to(z) # z랑계산하게 z랑 똑같이 맞춰줘!
print(x.dtype, x.device)
print(y.dtype, y.device)
print(z.dtype, z.device, '\n')
# Use specific GPU
device1 = torch.device('cuda')
device2 = torch.device('cuda:1')
x = torch.randn(4, 3)
y = torch.randn(4, 3)
x = x.to(device1)
y = y.to(device2)
print(x.device, y.device)
print(x + y)
#같은 쿠다 안에있어야지만 연산가능
# NOT RECOMMENDED # .to(device) 를 쓰도록!
x = torch.randn(4, 3)
print(x, x.device, '\n')
x = x.cuda()
print(x, x.device, '\n')
x = x.cpu()
print(x, x.device, '\n')
# Construct a 3 x 5 matrix, uninitialized
x = torch.empty(3, 5)
print(x, '\n')
# Construct a 3 x 5 matrix with zeros
x = torch.zeros(3, 5)
print(x, '\n')
# Construct a 3 x 5 matrix with ones
x = torch.ones(3, 5)
print(x, '\n')
# Construct a 3 x 5 matrix with given value
x = torch.full((3,5), 3.1415)
print(x, '\n')
# Construct a vector with uniformly spaced values in a given range
x = torch.arange(2, 10, 2)
print(x, '\n')
y = torch.linspace(0, 5, 3) #0부터 5사이에 9개 등차수열로
print(y, '\n')
# Construct a vector with logarithmically spaced values in a given range
z = torch.logspace(-10, 10, 5) # 10^-10부터 10^10 까지 리니어하게 등비수열 5칸
print(z, '\n')
# Construct a identity matrix
z = torch.eye(5)
print(z, '\n')
# Construct a 3 x 5 matrix with random value from uniform distribution, i.e. Uniform[0, 1)
x = torch.rand(3, 5)
print(x, '\n')
# Construct a 3 x 5 matrix with random value from normal distribution, i.e. Normal(0, 1)
x = torch.randn(3, 5)
print(x, '\n')
# Construct a 3 x 5 matrix filled with random integers, i.e. Uniform[low, high)
x = torch.randint(3, 10, (3, 5))
print(x, '\n')
# Construct a vector with random permutation
x = torch.randperm(9)
print(x, '\n')
x = torch.randint(3, (2,3)).cuda()
print(x, '\n')
y= torch.zeros_like(x)
print(y, '\n')
x = torch.rand(2, 3)
print(x, '\n')
y = torch.zeros_like(x)
print(y, '\n')
x = torch.rand(2,3).int()
print(x, '\n')
y = torch.randint_like(x, 2,5)
print(y, '\n')
device = torch.device('cuda')
x = torch.rand(2, 3)
x = x.to(device)
y = torch.zeros_like(x)
print(x, '\n')
print(y, '\n')
#~_like 는 다따라가고
#뉴제로스는 shape맘대로
device = torch.device('cuda')
x = torch.randint(0, 10, (3, 4), dtype=torch.int32)
y = torch.randint(0, 10, (3, 4), dtype=torch.float32, device=device)
# Make zero matrix with attribute of x
z = x.new_zeros(2, 3)
print(z, '\n')
print(z.dtype, z.device,'\n')
# Make zero matrix with attribute of y
z = y.new_zeros(2, 3)
print(z, '\n')
print(z.dtype, z.device, '\n')
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
print(a)
print(b)
print(" ")
np.add(a, 1, out=a)
print(a)
print(b)
torch.add(b, 1, out=b)
print(a)
print(b)
a = torch.ones(5)
b = a.numpy()
print(b)
# Caution: the variable on GPU cannot convert to NumPy Array.
a = torch.ones(5).to(torch.device('cuda'))
b = a.numpy() # error
# Addition: syntax 1
x = torch.rand(5, 3)
y = torch.rand(5, 3)
print(x + y, '\n')
# Addition: syntax 2
print(torch.add(x, y), '\n')
# Addition: giving an output tensor
result = torch.Tensor(5, 3)
torch.add(x, y, out=result)
print(result, '\n')
# Addition: in-place
y.add_(x) # (y = y + x)
print(y, '\n')
# You can use standard numpy-like indexing with all bells and whistles
print(x)
print(x[:, 1], '\n')
print(x[x > 0.5])
x = torch.randn(2, 3)
y = torch.randn(2, 3)
z = torch.randn(3, 2)
print('Pre-define\n')
print(x, '\n')
print(y, '\n')
print(z, '\n')
print('Operation\n')
# Add
print(x + y, '\n')
# Sub
print(x - y, '\n')
# Element-wise Mul
print(x * y, '\n')
# Element-wise Div
print(x / y, '\n')
# Matrix Mul.
print(x @ z, '\n')
print(torch.matmul(x, z))
# Change the shape of tensor
x = torch.arange(0, 10)
print(x, '\n')
y = x.view(2, 5)
print(y, '\n')
x = torch.arange(0, 30).view(5, 6)
print(x, '\n')
print(x.size(), '\n')
y = x.view(-1, 2, 5) # of element : 30 # -1 와일드카드 그냥 계산 딱맞을때만
print(y, '\n') # ? *2 * 5
print(y.size(), '\n')
# Change the dimension of tensor
x = torch.arange(0, 10).view(2, 5)
print(x, '\n')
y = x.permute(1, 0) #transpose
print(y, '\n')
y = x.t()
print(y, '\n') # 2차원까지만가능
# Add the dimension of tensor
print(x)
z = x.unsqueeze(0) (2, 5) -> (1,2,5)
print(z)
print(z.size(), '\n')
z = x.unsqueeze(1)
print(z)
print(z.size(), '\n')
# Remove the unnecessary dimension of tensor (size=1)
z = z.squeeze()
print(z.size(), '\n')
a = torch.arange(9).view(3,3)
b = torch.arange(3).view(3,1)
print(a)
print(b)
print(a+b)
import time
a= torch.arange(10000).view(100, 100)
b = torch.arange(100)
start = time.time()
c = a+b.expand(100,100)
end = time.time()
print(end - start)
start = time.time()
c=a+b
end = time.time()
print(end - start)