a = [1.0, 2.0, 1.0]
a[0]
1.0
a[2] = 3.0
a
[1.0, 2.0, 3.0]
import torch # <1>
a = torch.ones(3) # <2>
a
tensor([1., 1., 1.])
a[1]
tensor(1.)
float(a[1])
1.0
a[2] = 2.0
a
tensor([1., 1., 2.])
points = torch.zeros(6) # <1>
points[0] = 4.0 # <2>
points[1] = 1.0
points[2] = 5.0
points[3] = 3.0
points[4] = 2.0
points[5] = 1.0
points = torch.tensor([4.0, 1.0, 5.0, 3.0, 2.0, 1.0])
points
tensor([4., 1., 5., 3., 2., 1.])
float(points[0]), float(points[1])
(4.0, 1.0)
points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]])
points
tensor([[4., 1.],
        [5., 3.],
        [2., 1.]])
points.shape
torch.Size([3, 2])
points = torch.zeros(3, 2)
points
tensor([[0., 0.],
        [0., 0.],
        [0., 0.]])
points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]])
points
tensor([[4., 1.],
        [5., 3.],
        [2., 1.]])
points[0, 1]
tensor(1.)
points[0]
tensor([4., 1.])
points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]])
points.storage()
 4.0
 1.0
 5.0
 3.0
 2.0
 1.0
[torch.FloatStorage of size 6]
points_storage = points.storage()
points_storage[0]
4.0
points.storage()[1]
1.0
points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]])
points_storage = points.storage()
points_storage[0] = 2.0
points
tensor([[2., 1.],
        [5., 3.],
        [2., 1.]])
points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]])
second_point = points[1]
second_point.storage_offset()
2
second_point.size()
torch.Size([2])
second_point.shape
torch.Size([2])
points.stride()
(2, 1)
second_point = points[1]
second_point.size()
torch.Size([2])
second_point.storage_offset()
2
second_point.stride()
(1,)
points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]])
second_point = points[1]
second_point[0] = 10.0
points
tensor([[ 4.,  1.],
        [10.,  3.],
        [ 2.,  1.]])
points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]])
second_point = points[1].clone()
second_point[0] = 10.0
points
tensor([[4., 1.],
        [5., 3.],
        [2., 1.]])
points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]])
points
tensor([[4., 1.],
        [5., 3.],
        [2., 1.]])
points_t = points.t()
points_t
tensor([[4., 5., 2.],
        [1., 3., 1.]])
id(points.storage()) == id(points_t.storage())
True
points.stride()
(2, 1)
points_t.stride()
(1, 2)
some_t = torch.ones(3, 4, 5)
transpose_t = some_t.transpose(0, 2)
some_t.shape
torch.Size([3, 4, 5])
transpose_t.shape
torch.Size([5, 4, 3])
some_t.stride()
(20, 5, 1)
transpose_t.stride()
(1, 5, 20)
points.is_contiguous()
True
points_t.is_contiguous()
False
points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]])
points_t = points.t()
points_t
tensor([[4., 5., 2.],
        [1., 3., 1.]])
points_t.storage()
 4.0
 1.0
 5.0
 3.0
 2.0
 1.0
[torch.FloatStorage of size 6]
points_t.stride()
(1, 2)
points_t_cont = points_t.contiguous()
points_t_cont
tensor([[4., 5., 2.],
        [1., 3., 1.]])
points_t_cont.stride()
(3, 1)
points_t_cont.storage()
 4.0
 5.0
 2.0
 1.0
 3.0
 1.0
[torch.FloatStorage of size 6]
double_points = torch.ones(10, 2, dtype=torch.double)
short_points = torch.tensor([[1, 2], [3, 4]], dtype=torch.short)
short_points.dtype
torch.int16
double_points = torch.zeros(10, 2).double()
short_points = torch.ones(10, 2).short()
double_points = torch.zeros(10, 2).to(torch.double)
short_points = torch.ones(10, 2).to(dtype=torch.short)
points_64 = torch.rand(5, dtype=torch.double)  # <1>
points_short = points_64.to(torch.short)
points_64 * points_short  # works from PyTorch 1.3 onwards
tensor([0., 0., 0., 0., 0.], dtype=torch.float64)
points = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]])
some_list = list(range(6))
some_list[:]     # <1>
some_list[1:4]   # <2>
some_list[1:]    # <3>
some_list[:4]    # <4>
some_list[:-1]   # <5>
some_list[1:4:2] # <6>
[1, 3]
points[1:]       # <1>
points[1:, :]    # <2>
points[1:, 0]    # <3>
points[None]     # <4>
tensor([[[4., 1.],
         [5., 3.],
         [2., 1.]]])
points = torch.ones(3, 4)
points_np = points.numpy()
points_np
array([[1., 1., 1., 1.],
       [1., 1., 1., 1.],
       [1., 1., 1., 1.]], dtype=float32)
points = torch.from_numpy(points_np)
torch.save(points, '../data/p1ch3/ourpoints.t')
with open('../data/p1ch3/ourpoints.t','wb') as f:
   torch.save(points, f)
points = torch.load('../data/p1ch3/ourpoints.t')
with open('../data/p1ch3/ourpoints.t','rb') as f:
   points = torch.load(f)
import h5py

f = h5py.File('../data/p1ch3/ourpoints.hdf5', 'w')
dset = f.create_dataset('coords', data=points.numpy())
f.close()
f = h5py.File('../data/p1ch3/ourpoints.hdf5', 'r')
dset = f['coords']
last_points = dset[-2:]
last_points = torch.from_numpy(dset[-2:])
f.close()
points_gpu = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]], device='cuda')
---------------------------------------------------------------------------
AssertionError                            Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_21324/3537965398.py in <module>
----> 1 points_gpu = torch.tensor([[4.0, 1.0], [5.0, 3.0], [2.0, 1.0]], device='cuda')

~\AppData\Roaming\Python\Python37\site-packages\torch\cuda\__init__.py in _lazy_init()
    164                 "multiprocessing, you must use the 'spawn' start method")
    165         if not hasattr(torch._C, '_cuda_getDeviceCount'):
--> 166             raise AssertionError("Torch not compiled with CUDA enabled")
    167         if _cudart is None:
    168             raise AssertionError(

AssertionError: Torch not compiled with CUDA enabled
points_gpu = points.to(device='cuda')
points_gpu = points.to(device='cuda:0')
points = 2 * points  # <1>
points_gpu = 2 * points.to(device='cuda')  # <2>
points_gpu = points_gpu + 4
points_cpu = points_gpu.to(device='cpu')
points_gpu = points.cuda()  # <1>
points_gpu = points.cuda(0)
points_cpu = points_gpu.cpu()
a = torch.ones(3, 2)
a_t = torch.transpose(a, 0, 1)

a.shape, a_t.shape
(torch.Size([3, 2]), torch.Size([2, 3]))
a = torch.ones(3, 2)
a_t = a.transpose(0, 1)

a.shape, a_t.shape
(torch.Size([3, 2]), torch.Size([2, 3]))
a = torch.ones(3, 2)
a.zero_()
a
tensor([[0., 0.],
        [0., 0.],
        [0., 0.]])
_ = torch.tensor([0.2126, 0.7152, 0.0722], names=['c'])
C:\Users\hp\.conda\envs\pytorch\lib\site-packages\ipykernel_launcher.py:1: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at  ..\c10/core/TensorImpl.h:1156.)
  """Entry point for launching an IPython kernel.
img_t = torch.randn(3, 5, 5) # shape [channels, rows, columns]
weights = torch.tensor([0.2126, 0.7152, 0.0722])
batch_t = torch.randn(2, 3, 5, 5) # shape [batch, channels, rows, columns]
img_gray_naive = img_t.mean(-3)
batch_gray_naive = batch_t.mean(-3)
img_gray_naive.shape, batch_gray_naive.shape
(torch.Size([5, 5]), torch.Size([2, 5, 5]))
unsqueezed_weights = weights.unsqueeze(-1).unsqueeze_(-1)
img_weights = (img_t * unsqueezed_weights)
batch_weights = (batch_t * unsqueezed_weights)
img_gray_weighted = img_weights.sum(-3)
batch_gray_weighted = batch_weights.sum(-3)
batch_weights.shape, batch_t.shape, unsqueezed_weights.shape
(torch.Size([2, 3, 5, 5]), torch.Size([2, 3, 5, 5]), torch.Size([3, 1, 1]))
img_gray_weighted_fancy = torch.einsum('...chw,c->...hw', img_t, weights)
batch_gray_weighted_fancy = torch.einsum('...chw,c->...hw', batch_t, weights)
batch_gray_weighted_fancy.shape
torch.Size([2, 5, 5])
weights_named = torch.tensor([0.2126, 0.7152, 0.0722], names=['channels'])
weights_named
tensor([0.2126, 0.7152, 0.0722], names=('channels',))
img_named =  img_t.refine_names(..., 'channels', 'rows', 'columns')
batch_named = batch_t.refine_names(..., 'channels', 'rows', 'columns')
print("img named:", img_named.shape, img_named.names)
print("batch named:", batch_named.shape, batch_named.names)
img named: torch.Size([3, 5, 5]) ('channels', 'rows', 'columns')
batch named: torch.Size([2, 3, 5, 5]) (None, 'channels', 'rows', 'columns')
weights_aligned = weights_named.align_as(img_named)
weights_aligned.shape, weights_aligned.names
(torch.Size([3, 1, 1]), ('channels', 'rows', 'columns'))
gray_named = (img_named * weights_aligned).sum('channels')
gray_named.shape, gray_named.names
(torch.Size([5, 5]), ('rows', 'columns'))
try:
    gray_named = (img_named[..., :3] * weights_named).sum('channels')
except Exception as e:
    print(e)
Error when attempting to broadcast dims ['channels', 'rows', 'columns'] and dims ['channels']: dim 'columns' and dim 'channels' are at the same position from the right but do not match.
gray_plain = gray_named.rename(None)
gray_plain.shape, gray_plain.names
(torch.Size([5, 5]), (None, None))