The hardware and bandwidth for this mirror is donated by dogado GmbH, the Webhosting and Full Service-Cloud Provider. Check out our Wordpress Tutorial.
If you wish to report a bug, or if you are interested in having us mirror your free-software or open-source project, please feel free to contact us at mirror[@]dogado.de.
The goal of rTorch
is providing an R wrapper to PyTorch. rTorch
provides
all the functionality of PyTorch plus all the features that R provides.
We have borrowed some ideas and code used in R tensorflow to implement
rTorch
.
Besides the module torch
, which directly provides
PyTorch
methods, classes and functions, the package also
provides the modules numpy
as a method called
np
, and torchvision
, as well. The dollar sign
$
after the module will provide you access to all their
sub-objects. Example:
<- rTorch::torchvision
tv
tv#> Module(torchvision)
<- rTorch::np
np
np#> Module(numpy)
<- rTorch::torch
torch_module
torch_module#> Module(torch)
To lighten up the time in building this rTorch
package,
we moved the examples that use tensor operations and neural
networks to separate repositories. There are two sets of
examples:
rTorch
is available via CRAN and from GitHub.
Install from CRAN using install.packages("rTorch")
from
the R console, or from RStudio using Tools
,
Install Packages
from the menu.
For the latest version install from GitHub. Install
rTorch
with:
devtools::install_github("f0nzie/rTorch")
Installing from GitHub gives you the flexibility of experimenting
with the latest development version of rTorch
. For
instance, to install rTorch
from the develop
branch:
devtools::install_github("f0nzie/rTorch", ref="develop")
or clone with Git with:
git clone https://github.com/f0nzie/rTorch.git
There are five major type of Tensors in PyTorch: * Byte * Float * Double * Long * Bool
library(rTorch)
<- torch$ByteTensor(3L, 3L)
byte_tensor <- torch$FloatTensor(3L, 3L)
float_tensor <- torch$DoubleTensor(3L, 3L)
double_tensor <- torch$LongTensor(3L, 3L)
long_tensor <- torch$BoolTensor(5L, 5L)
bool_tensor
byte_tensor #> tensor([[58, 0, 0],
#> [ 0, 0, 0],
#> [ 0, 0, 0]], dtype=torch.uint8)
float_tensor #> tensor([[0., 0., 0.],
#> [0., 0., 0.],
#> [0., 0., 0.]])
double_tensor #> tensor([[6.9259e-310, 6.9259e-310, 1.2095e-312],
#> [1.2697e-321, 6.9259e-310, 4.6888e-310],
#> [ 0.0000e+00, 0.0000e+00, 0.0000e+00]], dtype=torch.float64)
long_tensor #> tensor([[140182629985840, 140182629985840, 0],
#> [ 94902887985680, 0, 0],
#> [ 0, 0, 0]])
bool_tensor #> tensor([[False, False, False, False, False],
#> [False, False, False, False, False],
#> [False, False, False, False, False],
#> [False, False, False, False, False],
#> [False, False, False, False, False]])
A 4D
tensor like in MNIST hand-written digits
recognition dataset:
<- torch$FloatTensor(60000L, 3L, 28L, 28L)
mnist_4d
# size
$size()
mnist_4d#> torch.Size([60000, 3, 28, 28])
# length
length(mnist_4d)
#> [1] 141120000
# shape, like in numpy
$shape
mnist_4d#> torch.Size([60000, 3, 28, 28])
# number of elements
$numel()
mnist_4d#> [1] 141120000
A 3D
tensor:
<- torch$FloatTensor(4L, 3L, 2L)
ft3d
ft3d#> tensor([[[0., 0.],
#> [0., 0.],
#> [0., 0.]],
#>
#> [[0., 0.],
#> [0., 0.],
#> [0., 0.]],
#>
#> [[0., 0.],
#> [0., 0.],
#> [0., 0.]],
#>
#> [[0., 0.],
#> [0., 0.],
#> [0., 0.]]])
# get first element in a tensor
1, 1, 1]
ft3d[#> tensor(0.)
# create a tensor with a value
$full(list(2L, 3L), 3.141592)
torch#> tensor([[3.1416, 3.1416, 3.1416],
#> [3.1416, 3.1416, 3.1416]])
# 3x5 matrix uniformly distributed between 0 and 1
<- torch$FloatTensor(3L, 5L)$uniform_(0L, 1L)
mat0
# fill a 3x5 matrix with 0.1
<- torch$FloatTensor(3L, 5L)$uniform_(0.1, 0.1)
mat1
# a vector with all ones
<- torch$FloatTensor(5L)$uniform_(1, 1) mat2
# add two tensors
+ mat1
mat0 #> tensor([[1.0472, 0.3601, 0.8105, 0.2573, 0.9311],
#> [0.8394, 0.1631, 0.5836, 0.2908, 0.1806],
#> [1.0795, 0.7679, 0.7024, 0.1542, 1.0788]])
# add three tensors
+ mat1 + mat2
mat0 #> tensor([[2.0472, 1.3601, 1.8105, 1.2573, 1.9311],
#> [1.8394, 1.1631, 1.5836, 1.2908, 1.1806],
#> [2.0795, 1.7679, 1.7024, 1.1542, 2.0788]])
# PyTorch add two tensors using add() function
= torch$rand(5L, 4L)
x = torch$rand(5L, 4L)
y
print(x$add(y))
#> tensor([[1.3989, 1.0835, 0.5394, 1.1483],
#> [1.3036, 0.3943, 1.2853, 1.2172],
#> [1.0630, 1.6868, 1.1131, 1.5882],
#> [0.5632, 0.6763, 1.1735, 1.5093],
#> [1.5244, 0.9094, 1.4205, 0.5644]])
print(x + y)
#> tensor([[1.3989, 1.0835, 0.5394, 1.1483],
#> [1.3036, 0.3943, 1.2853, 1.2172],
#> [1.0630, 1.6868, 1.1131, 1.5882],
#> [0.5632, 0.6763, 1.1735, 1.5093],
#> [1.5244, 0.9094, 1.4205, 0.5644]])
# add an element of a tensor to another tensor
1, 1] + mat2
mat1[#> tensor([1.1000, 1.1000, 1.1000, 1.1000, 1.1000])
mat1#> tensor([[0.1000, 0.1000, 0.1000, 0.1000, 0.1000],
#> [0.1000, 0.1000, 0.1000, 0.1000, 0.1000],
#> [0.1000, 0.1000, 0.1000, 0.1000, 0.1000]])
# extract part of the tensor
<- torch$tensor(c(0L, 3L))
indices $index_select(mat1, 1L, indices) # rows = 0; columns = 1
torch#> tensor([[0.1000, 0.1000],
#> [0.1000, 0.1000],
#> [0.1000, 0.1000]])
# add a scalar to a tensor
+ 0.1
mat0 #> tensor([[1.0472, 0.3601, 0.8105, 0.2573, 0.9311],
#> [0.8394, 0.1631, 0.5836, 0.2908, 0.1806],
#> [1.0795, 0.7679, 0.7024, 0.1542, 1.0788]])
# Multiply tensor by scalar
= torch$ones(4L, dtype=torch$float64)
tensor = np$float64(4.321)
scalar message("a numpy scalar: ", scalar)
#> a numpy scalar: 4.321
message("a PyTorch scalar: ", torch$scalar_tensor(scalar))
#> a PyTorch scalar: tensor(4.3210)
message("\nResult")
#>
#> Result
prod = torch$mul(tensor, torch$scalar_tensor(scalar)))
(#> tensor([4.3210, 4.3210, 4.3210, 4.3210], dtype=torch.float64)
# short version using generics
prod = tensor * scalar)
(#> tensor([4.3210, 4.3210, 4.3210, 4.3210], dtype=torch.float64)
= torch$tensor(c(1, 2))
t1 = torch$tensor(c(3, 2))
t2
t1#> tensor([1., 2.])
t2#> tensor([3., 2.])
* t2
t1 #> tensor([3., 4.])
= torch$tensor(list(
t1 c(1, 2, 3),
c(1, 2, 3)
))
= torch$tensor(list(
t2 c(1, 2),
c(1, 2),
c(1, 2)
))
t1#> tensor([[1., 2., 3.],
#> [1., 2., 3.]])
t2#> tensor([[1., 2.],
#> [1., 2.],
#> [1., 2.]])
$mm(t1, t2)
torch#> tensor([[ 6., 12.],
#> [ 6., 12.]])
= torch$tensor(c(1, 2))
t1 = torch$tensor(c(3, 2))
t2
t1#> tensor([1., 2.])
t2#> tensor([3., 2.])
# dot product of two vectors
$dot(t1, t2)
torch#> tensor(7.)
# Dot product of 1D tensors is a scalar
<- torch$Tensor(list(4L, 2L))
p <- torch$Tensor(list(3L, 1L))
q
r = torch$dot(p, q)) # 14
(#> tensor(14.)
<- p %.*% q)
(r #> tensor(14.)
# torch$dot product will work for vectors not matrices
= torch$tensor(list(
t1 c(1, 2, 3),
c(1, 2, 3)
))
= torch$tensor(list(
t2 c(1, 2),
c(1, 2),
c(1, 2)
))
$shape
t1#> torch.Size([2, 3])
$shape
t2#> torch.Size([3, 2])
# RuntimeError: 1D tensors expected, got 2D, 2D tensors
$dot(t1, t2) torch
The number of columns of the first matrix must be equal to the number of rows of the second matrix.
# for the dot product of nD tensors we use torch$mm()
= torch$tensor(list(
t1 c(1, 2, 3),
c(1, 2, 3)
))
= torch$tensor(list(
t2 c(1, 2),
c(1, 2),
c(1, 2)
))
$mm(t1, t2)
torch#> tensor([[ 6., 12.],
#> [ 6., 12.]])
$mm(t2, t1)
torch#> tensor([[3., 6., 9.],
#> [3., 6., 9.],
#> [3., 6., 9.]])
# for the dot product of 2D tensors we use torch$mm()
= torch$arange(1, 11)$view(c(2L,5L))
t1 = torch$arange(11, 21)$view(c(5L,2L))
t2
t1#> tensor([[ 1., 2., 3., 4., 5.],
#> [ 6., 7., 8., 9., 10.]])
t2#> tensor([[11., 12.],
#> [13., 14.],
#> [15., 16.],
#> [17., 18.],
#> [19., 20.]])
# result
$mm(t1, t2)
torch#> tensor([[245., 260.],
#> [620., 660.]])
# 1D tensor
= torch$tensor(c(1, 2))
t1 = torch$tensor(c(3, 2))
t2
$matmul(t1, t2)
torch#> tensor(7.)
# 2D tensor
= torch$tensor(list(
t1 c(1, 2, 3),
c(1, 2, 3)
))
= torch$tensor(list(
t2 c(1, 2),
c(1, 2),
c(1, 2)
))
$matmul(t1, t2)
torch#> tensor([[ 6., 12.],
#> [ 6., 12.]])
# for the dot product of 3D tensors we use torch$matmul()
= torch$arange(1, 13)$view(c(2L, 2L, 3L)) # number of columns = 2
t1 = torch$arange(0, 18)$view(c(2L, 3L, 3L)) # number of rows = 2
t2
t1#> tensor([[[ 1., 2., 3.],
#> [ 4., 5., 6.]],
#>
#> [[ 7., 8., 9.],
#> [10., 11., 12.]]])
t2#> tensor([[[ 0., 1., 2.],
#> [ 3., 4., 5.],
#> [ 6., 7., 8.]],
#>
#> [[ 9., 10., 11.],
#> [12., 13., 14.],
#> [15., 16., 17.]]])
message("result")
#> result
$matmul(t1, t2)
torch#> tensor([[[ 24., 30., 36.],
#> [ 51., 66., 81.]],
#>
#> [[294., 318., 342.],
#> [402., 435., 468.]]])
= torch$arange(1, 13)$view(c(3L, 2L, 2L)) # number of columns = 3
t1 = torch$arange(0, 12)$view(c(3L, 2L, 2L)) # number of rows = 3
t2
t1#> tensor([[[ 1., 2.],
#> [ 3., 4.]],
#>
#> [[ 5., 6.],
#> [ 7., 8.]],
#>
#> [[ 9., 10.],
#> [11., 12.]]])
t2#> tensor([[[ 0., 1.],
#> [ 2., 3.]],
#>
#> [[ 4., 5.],
#> [ 6., 7.]],
#>
#> [[ 8., 9.],
#> [10., 11.]]])
message("result")
#> result
$matmul(t1, t2)
torch#> tensor([[[ 4., 7.],
#> [ 8., 15.]],
#>
#> [[ 56., 67.],
#> [ 76., 91.]],
#>
#> [[172., 191.],
#> [208., 231.]]])
= torch$ones(3L, 5L)
m1 = torch$ones(3L, 5L)
m2 = torch$ones(3L)
v1 # Cross product
# Size 3x5
r = torch$cross(m1, m2))
(#> tensor([[0., 0., 0., 0., 0.],
#> [0., 0., 0., 0., 0.],
#> [0., 0., 0., 0., 0.]])
numpy
has been made available as a module inside
rTorch
. We could call functions from numpy
refrerring to it as np$any_function
. Examples:
# a 2D numpy array
<- np$random$rand(3L, 5L)
syn0
syn0#> [,1] [,2] [,3] [,4] [,5]
#> [1,] 0.1524218 0.7769113 0.6153864 0.00220404 0.78412198
#> [2,] 0.4959399 0.7230621 0.9840282 0.64843544 0.06556167
#> [3,] 0.5931231 0.6513373 0.4399219 0.57722973 0.94843503
# numpy arrays of zeros
<- np$zeros(c(5L, 10L))
syn1
syn1#> [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
#> [1,] 0 0 0 0 0 0 0 0 0 0
#> [2,] 0 0 0 0 0 0 0 0 0 0
#> [3,] 0 0 0 0 0 0 0 0 0 0
#> [4,] 0 0 0 0 0 0 0 0 0 0
#> [5,] 0 0 0 0 0 0 0 0 0 0
# add a scalar to a numpy array
= syn1 + 0.1
syn1
syn1#> [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
#> [1,] 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1
#> [2,] 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1
#> [3,] 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1
#> [4,] 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1
#> [5,] 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1
# in numpy a multidimensional array needs to be defined with a tuple
# From R we use a vector to refer to a tuple in Python
<- np$ones(c(5L, 5L))
l1
l1#> [,1] [,2] [,3] [,4] [,5]
#> [1,] 1 1 1 1 1
#> [2,] 1 1 1 1 1
#> [3,] 1 1 1 1 1
#> [4,] 1 1 1 1 1
#> [5,] 1 1 1 1 1
# vector-matrix multiplication
$dot(syn0, syn1)
np#> [,1] [,2] [,3] [,4] [,5] [,6] [,7]
#> [1,] 0.2331045 0.2331045 0.2331045 0.2331045 0.2331045 0.2331045 0.2331045
#> [2,] 0.2917027 0.2917027 0.2917027 0.2917027 0.2917027 0.2917027 0.2917027
#> [3,] 0.3210047 0.3210047 0.3210047 0.3210047 0.3210047 0.3210047 0.3210047
#> [,8] [,9] [,10]
#> [1,] 0.2331045 0.2331045 0.2331045
#> [2,] 0.2917027 0.2917027 0.2917027
#> [3,] 0.3210047 0.3210047 0.3210047
# build a numpy array from three R vectors
<- np$array(rbind(c(1,2,3), c(4,5,6), c(7,8,9)))
X
X#> [,1] [,2] [,3]
#> [1,] 1 2 3
#> [2,] 4 5 6
#> [3,] 7 8 9
# transpose the array
$transpose(X)
np#> [,1] [,2] [,3]
#> [1,] 1 4 7
#> [2,] 2 5 8
#> [3,] 3 6 9
With newer PyTorch versions we should work with NumPy array copies There have been minor changes in the latest versions of PyTorch that prevents a direct use of a NumPy array. You will get this warning:
sys:1: UserWarning: The given NumPy array is not writeable, and PyTorch does
not support non-writeable tensors. This means you can write to the underlying
(supposedly non-writeable) NumPy array using the tensor. You may want to copy
the array to protect its data or make it writeable before converting it to a
tensor. This type of warning will be suppressed for the rest of this program.
For instance, this code will produce the warning:
# as_tensor. Modifying tensor modifies numpy object as well
= np$array(list(1, 2, 3))
a = torch$as_tensor(a)
t print(t)
$tensor(list( 1, 2, 3))
torch$fill_(-1)
t[1L]print(a)
while this other one -with some extra code- will not:
= np$array(list(1, 2, 3))
a = r_to_py(a)$copy() # we make a copy of the numpy array first
a_copy
= torch$as_tensor(a_copy)
t print(t)
#> tensor([1., 2., 3.], dtype=torch.float64)
$tensor(list( 1, 2, 3))
torch#> tensor([1., 2., 3.])
$fill_(-1)
t[1L]#> tensor(-1., dtype=torch.float64)
print(a)
#> [1] 1 2 3
make_copy()
To make easier to copy an object in rTorch
we
implemented the function make_copy
, which makes a safe copy
regardless if it is a torch, numpy or an R type object.
= np$array(list(1, 2, 3, 4, 5))
a
<- make_copy(a)
a_copy <- torch$as_tensor(a_copy)
t
t#> tensor([1., 2., 3., 4., 5.], dtype=torch.float64)
# convert a numpy array to a tensor
= np$array(c(c(3, 4), c(3, 6)))
np_a = torch$from_numpy(r_to_py(np_a)$copy())
t_a print(t_a)
#> tensor([3., 4., 3., 6.], dtype=torch.float64)
# a random 1D tensor
<- np$random$rand(5L)
np_arr <- torch$FloatTensor(r_to_py(np_arr)$copy()) # make a copy of numpy array
ft1
ft1#> tensor([0.9408, 0.8752, 0.5924, 0.7329, 0.6719])
# tensor as a float of 64-bits
<- r_to_py(np$random$rand(5L))$copy() # make a copy of numpy array
np_copy <- torch$as_tensor(np_copy, dtype= torch$float64)
ft2
ft2#> tensor([0.0462, 0.5851, 0.4886, 0.0725, 0.8959], dtype=torch.float64)
This is a very common operation in machine learning:
# convert tensor to a numpy array
= torch$rand(5L, 4L)
a = a$numpy()
b print(b)
#> [,1] [,2] [,3] [,4]
#> [1,] 0.8167720 0.8075168 0.2668687105 0.7421414
#> [2,] 0.6829966 0.5185235 0.0005332828 0.9414444
#> [3,] 0.2030416 0.7496545 0.0358363986 0.3475423
#> [4,] 0.7263991 0.6163300 0.2169904113 0.9363614
#> [5,] 0.4336911 0.5996053 0.2127178907 0.8461853
# convert tensor to float 16-bits
<- torch$as_tensor(ft2, dtype = torch$float16)
ft2_dbl
ft2_dbl#> tensor([0.0462, 0.5850, 0.4885, 0.0724, 0.8960], dtype=torch.float16)
Create a tensor of size (5 x 7) with uninitialized memory:
<- torch$FloatTensor(5L, 7L)
a print(a)
#> tensor([[1.4013e-45, 1.4013e-45, 1.4013e-45, 1.4013e-45, 1.4013e-45, 1.4013e-45,
#> 1.4013e-45],
#> [1.4013e-45, 1.4013e-45, 1.4013e-45, 1.4013e-45, 1.4013e-45, 1.4013e-45,
#> 1.4013e-45],
#> [1.4013e-45, 1.4013e-45, 1.4013e-45, 1.4013e-45, 1.4013e-45, 1.4013e-45,
#> 1.4013e-45],
#> [1.4013e-45, 1.4013e-45, 1.4013e-45, 1.4013e-45, 1.4013e-45, 1.4013e-45,
#> 1.4013e-45],
#> [1.4013e-45, 1.4013e-45, 0.0000e+00, 1.4013e-45, 1.4013e-45, 1.4013e-45,
#> 1.4013e-45]])
# using arange to create tensor. starts from 0
= torch$arange(9L)
v v = v$view(3L, 3L))
(#> tensor([[0, 1, 2],
#> [3, 4, 5],
#> [6, 7, 8]])
Initialize a tensor randomized with a normal distribution with mean=0, var=1:
<- torch$randn(5L, 7L)
a print(a)
#> tensor([[-0.9889, 0.6058, 0.5492, -0.0291, 1.3752, -0.2528, -1.8089],
#> [ 0.6621, -0.4370, 1.5590, 0.8149, 0.4004, -0.5299, 0.9275],
#> [ 0.0880, 0.5931, 0.2601, 1.6336, 1.0154, 1.2189, -1.6278],
#> [ 1.2171, 0.1377, -0.2377, 0.0792, -0.2885, 0.6316, 1.7481],
#> [-0.7538, 0.6162, 1.3023, -1.5574, 0.1196, -1.1652, 1.5082]])
print(a$size())
#> torch.Size([5, 7])
library(rTorch)
# 3x5 matrix uniformly distributed between 0 and 1
<- torch$FloatTensor(3L, 5L)$uniform_(0L, 1L)
mat0
# fill a 3x5 matrix with 0.1
<- torch$FloatTensor(3L, 5L)$uniform_(0.1, 0.1)
mat1
# a vector with all ones
<- torch$FloatTensor(5L)$uniform_(1, 1)
mat2
mat0#> tensor([[0.9587, 0.3368, 0.3534, 0.0709, 0.6827],
#> [0.5521, 0.6874, 0.6756, 0.3705, 0.9120],
#> [0.4415, 0.9895, 0.4699, 0.5890, 0.0901]])
mat1#> tensor([[0.1000, 0.1000, 0.1000, 0.1000, 0.1000],
#> [0.1000, 0.1000, 0.1000, 0.1000, 0.1000],
#> [0.1000, 0.1000, 0.1000, 0.1000, 0.1000]])
<- torch$distributions$binomial$Binomial
Binomial
= Binomial(100, torch$tensor(list(0 , .2, .8, 1)))
m x = m$sample())
(#> tensor([ 0., 29., 80., 100.])
= Binomial(torch$tensor(list(list(5.), list(10.))),
m $tensor(list(0.5, 0.8)))
torchx = m$sample())
(#> tensor([[0., 4.],
#> [2., 8.]])
<- torch$distributions$exponential$Exponential
Exponential
= Exponential(torch$tensor(list(1.0)))
m $sample() # Exponential distributed with rate=1
m#> tensor([0.4214])
<- torch$distributions$weibull$Weibull
Weibull
= Weibull(torch$tensor(list(1.0)), torch$tensor(list(1.0)))
m $sample() # sample from a Weibull distribution with scale=1, concentration=1
m#> tensor([0.0745])
Only floating-point types are supported as the default type.
# Default data type
$tensor(list(1.2, 3))$dtype # default for floating point is torch.float32
torch#> torch.float32
# change default data type to float64
$set_default_dtype(torch$float64)
torch$tensor(list(1.2, 3))$dtype # a new floating point tensor
torch#> torch.float64
$set_default_dtype(torch$double)
torch$tensor(list(1.2, 3))$dtype
torch#> torch.float64
= torch$randn(2L, 3L) # Size 2x3
x = x$view(6L) # Resize x to size 6
y = x$view(-1L, 2L) # Size 3x2
z print(y)
#> tensor([ 0.8073, -0.7656, 1.0641, 0.3801, 0.5983, 0.7950])
print(z)
#> tensor([[ 0.8073, -0.7656],
#> [ 1.0641, 0.3801],
#> [ 0.5983, 0.7950]])
# 0 1 2
# 3 4 5
# 6 7 8
= torch$arange(9L)
v v = v$view(3L, 3L))
(#> tensor([[0, 1, 2],
#> [3, 4, 5],
#> [6, 7, 8]])
# concatenate tensors
= torch$randn(2L, 3L)
x print(x)
#> tensor([[-0.6563, 1.5943, -0.0617],
#> [ 0.5502, 1.6150, -2.0000]])
# concatenate tensors by dim=0"
$cat(list(x, x, x), 0L)
torch#> tensor([[-0.6563, 1.5943, -0.0617],
#> [ 0.5502, 1.6150, -2.0000],
#> [-0.6563, 1.5943, -0.0617],
#> [ 0.5502, 1.6150, -2.0000],
#> [-0.6563, 1.5943, -0.0617],
#> [ 0.5502, 1.6150, -2.0000]])
# concatenate tensors by dim=1
$cat(list(x, x, x), 1L)
torch#> tensor([[-0.6563, 1.5943, -0.0617, -0.6563, 1.5943, -0.0617, -0.6563, 1.5943,
#> -0.0617],
#> [ 0.5502, 1.6150, -2.0000, 0.5502, 1.6150, -2.0000, 0.5502, 1.6150,
#> -2.0000]])
# ----- Reshape tensors -----
<- torch$ones(3L, 28L, 28L)
img print(img$size())
#> torch.Size([3, 28, 28])
<- torch$chunk(img, chunks = 3L, dim = 0L)
img_chunks print(length(img_chunks))
#> [1] 3
# 1st chunk member
<- img_chunks[[1]]
img_chunk_1 print(img_chunk_1$size())
#> torch.Size([1, 28, 28])
print(img_chunk_1$sum())
#> tensor(784.)
# 2nd chunk member
<- img_chunks[[2]]
img_chunk_1 print(img_chunk_1$size())
#> torch.Size([1, 28, 28])
print(img_chunk_1$sum())
#> tensor(784.)
# index_select. get layer 1
= torch$tensor(c(0L))
indices <- torch$index_select(img, dim = 0L, index = indices)
img2 print(img2$size())
#> torch.Size([1, 28, 28])
print(img2$sum())
#> tensor(784.)
# index_select. get layer 2
= torch$tensor(c(1L))
indices <- torch$index_select(img, dim = 0L, index = indices)
img2 print(img2$size())
#> torch.Size([1, 28, 28])
print(img2$sum())
#> tensor(784.)
# index_select. get layer 3
= torch$tensor(c(2L))
indices <- torch$index_select(img, dim = 0L, index = indices)
img2 print(img2$size())
#> torch.Size([1, 28, 28])
print(img2$sum())
#> tensor(784.)
# identity matrix
= torch$eye(3L) # Create an identity 3x3 tensor
eye print(eye)
#> tensor([[1., 0., 0.],
#> [0., 1., 0.],
#> [0., 0., 1.]])
v = torch$ones(10L)) # A tensor of size 10 containing all ones
(#> tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
v = torch$ones(2L, 1L, 2L, 1L)) # Size 2x1x2x1
(#> tensor([[[[1.],
#> [1.]]],
#>
#>
#> [[[1.],
#> [1.]]]])
= torch$ones_like(eye) # A tensor with same shape as eye. Fill it with 1.
v
v#> tensor([[1., 1., 1.],
#> [1., 1., 1.],
#> [1., 1., 1.]])
z = torch$zeros(10L)) # A tensor of size 10 containing all zeros
(#> tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
# a tensor filled with ones
v = torch$ones(3L, 3L))
(#> tensor([[1., 1., 1.],
#> [1., 1., 1.],
#> [1., 1., 1.]])
# change two rows in the tensor
# we are using 1-based index
$fill_(2L) # fill row 1 with 2s
v[2L, ]#> tensor([2., 2., 2.])
$fill_(3L) # fill row 2 with 3s
v[3L, ]#> tensor([3., 3., 3.])
print(v)
#> tensor([[1., 1., 1.],
#> [2., 2., 2.],
#> [3., 3., 3.]])
# Initialize Tensor with a range of values
v = torch$arange(10L)) # similar to range(5) but creating a Tensor
(#> tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
v = torch$arange(0L, 10L, step = 1L)) # Size 5. Similar to range(0, 5, 1)
(#> tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
<- torch$arange(0, 10, step = 0.5)
u
u#> tensor([0.0000, 0.5000, 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000,
#> 4.5000, 5.0000, 5.5000, 6.0000, 6.5000, 7.0000, 7.5000, 8.0000, 8.5000,
#> 9.0000, 9.5000])
# range of values with increments including the end value
<- 0
start <- 10
end <- 0.25
step
<- torch$arange(start, end+step, step)
w
w#> tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000, 1.2500, 1.5000, 1.7500,
#> 2.0000, 2.2500, 2.5000, 2.7500, 3.0000, 3.2500, 3.5000, 3.7500,
#> 4.0000, 4.2500, 4.5000, 4.7500, 5.0000, 5.2500, 5.5000, 5.7500,
#> 6.0000, 6.2500, 6.5000, 6.7500, 7.0000, 7.2500, 7.5000, 7.7500,
#> 8.0000, 8.2500, 8.5000, 8.7500, 9.0000, 9.2500, 9.5000, 9.7500,
#> 10.0000])
# Initialize a linear or log scale Tensor
# Create a Tensor with 10 linear points for (1, 10) inclusively
v = torch$linspace(1L, 10L, steps = 10L))
(#> tensor([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])
# Size 5: 1.0e-10 1.0e-05 1.0e+00, 1.0e+05, 1.0e+10
v = torch$logspace(start=-10L, end = 10L, steps = 5L))
(#> tensor([1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10])
= torch$rand(5L, 4L)
a print(class(a))
#> [1] "torch.Tensor" "torch._C._TensorBase" "python.builtin.object"
# converting the tensor to a numpy array, R automatically converts it
= a$numpy()
b print(class(b))
#> [1] "matrix"
$fill_(3.5)
a#> tensor([[3.5000, 3.5000, 3.5000, 3.5000],
#> [3.5000, 3.5000, 3.5000, 3.5000],
#> [3.5000, 3.5000, 3.5000, 3.5000],
#> [3.5000, 3.5000, 3.5000, 3.5000],
#> [3.5000, 3.5000, 3.5000, 3.5000]])
# a has now been filled with the value 3.5
# add a scalar to a tensor.
# notice that was auto-converted from an array to a tensor
<- a$add(4.0)
b
# a is still filled with 3.5
# new tensor b is returned with values 3.5 + 4.0 = 7.5
print(a)
#> tensor([[3.5000, 3.5000, 3.5000, 3.5000],
#> [3.5000, 3.5000, 3.5000, 3.5000],
#> [3.5000, 3.5000, 3.5000, 3.5000],
#> [3.5000, 3.5000, 3.5000, 3.5000],
#> [3.5000, 3.5000, 3.5000, 3.5000]])
print(b)
#> tensor([[7.5000, 7.5000, 7.5000, 7.5000],
#> [7.5000, 7.5000, 7.5000, 7.5000],
#> [7.5000, 7.5000, 7.5000, 7.5000],
#> [7.5000, 7.5000, 7.5000, 7.5000],
#> [7.5000, 7.5000, 7.5000, 7.5000]])
# this will throw an error because we don't still have a function for assignment
1, 1] <- 7.7
a[print(a)
# Error in a[1, 1] <- 7.7 : object of type 'environment' is not subsettable
# This would be the right wayy to assign a value to a tensor element
1, 1]$fill_(7.7)
a[#> tensor(7.7000)
# we can see that the first element has been changed
a#> tensor([[7.7000, 3.5000, 3.5000, 3.5000],
#> [3.5000, 3.5000, 3.5000, 3.5000],
#> [3.5000, 3.5000, 3.5000, 3.5000],
#> [3.5000, 3.5000, 3.5000, 3.5000],
#> [3.5000, 3.5000, 3.5000, 3.5000]])
Some operations like
narrow
do not have in-place versions, and hence,.narrow_
does not exist. Similarly, some operations likefill_
do not have an out-of-place version, so.fill
does not exist.
# a[[0L, 3L]]
1, 4]
a[#> tensor(3.5000)
# replace an element at position 0, 0
new_tensor = torch$Tensor(list(list(1, 2), list(3, 4))))
(#> tensor([[1., 2.],
#> [3., 4.]])
# first row, firt column
print(new_tensor[1L, 1L])
#> tensor(1.)
# change row 1, col 1 with value of 5
$fill_(5)
new_tensor[1L, 1L]#> tensor(5.)
# which is the same as doing this
1, 1]$fill_(5)
new_tensor[#> tensor(5.)
Notice that the element was changed in-place because of
fill_
.
print(new_tensor) # tensor([[ 5., 2.],[ 3., 4.]])
#> tensor([[5., 2.],
#> [3., 4.]])
# access an element at position (1, 0), 0-based index
print(new_tensor[2L, 1L]) # tensor([ 3.])
#> tensor(3.)
# convert it to a scalar value
print(new_tensor[2L, 1L]$item()) # 3.
#> [1] 3
# which is the same as
print(new_tensor[2, 1])
#> tensor(3.)
# and the scalar
print(new_tensor[2, 1]$item())
#> [1] 3
# Select indices
= torch$randn(3L, 4L)
x print(x)
#> tensor([[-2.4324, 0.5563, 1.3308, -0.6363],
#> [ 1.1925, 2.4744, -0.0463, -2.2281],
#> [-1.5476, 1.1377, 0.3645, -0.8908]])
# extract first and third row
# Select indices, dim=0
= torch$tensor(list(0L, 2L))
indices $index_select(x, 0L, indices)
torch#> tensor([[-2.4324, 0.5563, 1.3308, -0.6363],
#> [-1.5476, 1.1377, 0.3645, -0.8908]])
# extract first and third column
# Select indices, dim=1
$index_select(x, 1L, indices)
torch#> tensor([[-2.4324, 1.3308],
#> [ 1.1925, -0.0463],
#> [-1.5476, 0.3645]])
# Take by indices
= torch$tensor(list(list(4, 3, 5),
src list(6, 7, 8)) )
print(src)
#> tensor([[4., 3., 5.],
#> [6., 7., 8.]])
print( torch$take(src, torch$tensor(list(0L, 2L, 5L))) )
#> tensor([4., 5., 8.])
# two dimensions: 3x3
<- torch$arange(9L)
x <- x$view(c(3L, 3L))
x <- torch$transpose(x, 0L, 1L)
t
# "Original tensor"
x #> tensor([[0, 1, 2],
#> [3, 4, 5],
#> [6, 7, 8]])
# "Transposed"
t #> tensor([[0, 3, 6],
#> [1, 4, 7],
#> [2, 5, 8]])
# three dimensions: 1x2x3
<- torch$ones(c(1L, 2L, 3L))
x <- torch$transpose(x, 1L, 0L)
t
print(x) # original tensor
#> tensor([[[1., 1., 1.],
#> [1., 1., 1.]]])
print(t) # transposed
#> tensor([[[1., 1., 1.]],
#>
#> [[1., 1., 1.]]])
print(x$shape) # original tensor
#> torch.Size([1, 2, 3])
print(t$shape) # transposed
#> torch.Size([2, 1, 3])
<- torch$tensor(list(list(list(1,2)), list(list(3,4)), list(list(5,6))))
x <- torch$as_tensor(x$shape)
xs <- x$permute(c(1L, 2L, 0L))
xp <- torch$as_tensor(xp$shape)
xps
print(x) # original tensor
#> tensor([[[1., 2.]],
#>
#> [[3., 4.]],
#>
#> [[5., 6.]]])
print(xp) # permuted tensor
#> tensor([[[1., 3., 5.],
#> [2., 4., 6.]]])
print(xs) # shape original tensor
#> tensor([3, 1, 2])
print(xps) # shape permuted tensor
#> tensor([1, 2, 3])
$manual_seed(1234)
torch#> <torch._C.Generator>
<- torch$randn(10L, 480L, 640L, 3L)
x 1:3, 1:2, 1:3, 1:2]
x[#> tensor([[[[-0.0883, 0.3420],
#> [ 1.0051, -0.1117],
#> [-0.0982, -0.3511]],
#>
#> [[-0.1465, 0.3960],
#> [-1.6878, 0.5720],
#> [ 0.9426, 2.1187]]],
#>
#>
#> [[[ 0.8107, 0.9289],
#> [ 0.4210, -1.5109],
#> [-1.8483, -0.4636]],
#>
#> [[-1.8324, -1.9304],
#> [-2.7020, 0.3491],
#> [ 0.9180, -1.9872]]],
#>
#>
#> [[[ 1.6555, -0.3531],
#> [ 0.4763, 0.8037],
#> [-0.2171, -0.0839]],
#>
#> [[-0.0886, -1.3389],
#> [ 0.7163, -0.9050],
#> [-0.8144, -1.4922]]]])
<- torch$as_tensor(x$size()) # torch$tensor(c(10L, 480L, 640L, 3L))
xs <- x$permute(0L, 3L, 1L, 2L) # specify dimensions order
xp <- torch$as_tensor(xp$size()) # torch$tensor(c(10L, 3L, 480L, 640L))
xps
print(xs) # original tensor size
#> tensor([ 10, 480, 640, 3])
print(xps) # permuted tensor size
#> tensor([ 10, 3, 480, 640])
1:3, 1:2, 1:3, 1:2]
xp[#> tensor([[[[-0.0883, 1.0051],
#> [-0.1465, -1.6878],
#> [-0.6429, 0.5577]],
#>
#> [[ 0.3420, -0.1117],
#> [ 0.3960, 0.5720],
#> [ 0.3014, 0.7813]]],
#>
#>
#> [[[ 0.8107, 0.4210],
#> [-1.8324, -2.7020],
#> [ 1.1724, 0.4434]],
#>
#> [[ 0.9289, -1.5109],
#> [-1.9304, 0.3491],
#> [ 0.9901, -1.3630]]],
#>
#>
#> [[[ 1.6555, 0.4763],
#> [-0.0886, 0.7163],
#> [-0.7774, -0.6281]],
#>
#> [[-0.3531, 0.8037],
#> [-1.3389, -0.9050],
#> [-0.7920, 1.3634]]]])
m0 = torch$zeros(3L, 5L))
(#> tensor([[0., 0., 0., 0., 0.],
#> [0., 0., 0., 0., 0.],
#> [0., 0., 0., 0., 0.]])
m1 = torch$ones(3L, 5L))
(#> tensor([[1., 1., 1., 1., 1.],
#> [1., 1., 1., 1., 1.],
#> [1., 1., 1., 1., 1.]])
m2 = torch$eye(3L, 5L))
(#> tensor([[1., 0., 0., 0., 0.],
#> [0., 1., 0., 0., 0.],
#> [0., 0., 1., 0., 0.]])
# is m1 equal to m0
print(m1 == m0)
#> tensor([[False, False, False, False, False],
#> [False, False, False, False, False],
#> [False, False, False, False, False]])
print(as_boolean(m1 == m0))
#> tensor([[False, False, False, False, False],
#> [False, False, False, False, False],
#> [False, False, False, False, False]])
# is it not equal
print(m1 != m1)
#> tensor([[False, False, False, False, False],
#> [False, False, False, False, False],
#> [False, False, False, False, False]])
# are both equal
print(m2 == m2)
#> tensor([[True, True, True, True, True],
#> [True, True, True, True, True],
#> [True, True, True, True, True]])
print(as_boolean(m2 == m2))
#> tensor([[True, True, True, True, True],
#> [True, True, True, True, True],
#> [True, True, True, True, True]])
# some are equal, others don't
!= m2
m1 #> tensor([[False, True, True, True, True],
#> [ True, False, True, True, True],
#> [ True, True, False, True, True]])
# some are equal, others don't
!= m2
m0 #> tensor([[ True, False, False, False, False],
#> [False, True, False, False, False],
#> [False, False, True, False, False]])
as_boolean(m0 != m2)
#> tensor([[ True, False, False, False, False],
#> [False, True, False, False, False],
#> [False, False, True, False, False]])
# AND
& m1
m1 #> tensor([[1, 1, 1, 1, 1],
#> [1, 1, 1, 1, 1],
#> [1, 1, 1, 1, 1]], dtype=torch.uint8)
as_boolean(m1 & m1)
#> tensor([[True, True, True, True, True],
#> [True, True, True, True, True],
#> [True, True, True, True, True]])
# OR
| m2
m0 #> tensor([[1, 0, 0, 0, 0],
#> [0, 1, 0, 0, 0],
#> [0, 0, 1, 0, 0]], dtype=torch.uint8)
# OR
| m2
m1 #> tensor([[1, 1, 1, 1, 1],
#> [1, 1, 1, 1, 1],
#> [1, 1, 1, 1, 1]], dtype=torch.uint8)
as_boolean(m1 | m2)
#> tensor([[True, True, True, True, True],
#> [True, True, True, True, True],
#> [True, True, True, True, True]])
# tensor is less than
<- torch$ones(60000L, 1L, 28L, 28L)
A <- A * 0.5
C
# is C < A = TRUE
all(torch$lt(C, A))
#> tensor(1, dtype=torch.uint8)
all(C < A)
#> tensor(1, dtype=torch.uint8)
# is A < C = FALSE
all(A < C)
#> tensor(0, dtype=torch.uint8)
# tensor is greater than
<- torch$ones(60000L, 1L, 28L, 28L)
A <- A * 2.0
D all(torch$gt(D, A))
#> tensor(1, dtype=torch.uint8)
all(torch$gt(A, D))
#> tensor(0, dtype=torch.uint8)
# tensor is less than or equal
<- torch$ones(60000L, 1L, 28L, 28L)
A1 all(torch$le(A1, A1))
#> tensor(1, dtype=torch.uint8)
all(A1 <= A1)
#> tensor(1, dtype=torch.uint8)
# tensor is greater than or equal
<- torch$zeros(60000L, 1L, 28L, 28L)
A0 all(torch$ge(A0, A0))
#> tensor(1, dtype=torch.uint8)
all(A0 >= A0)
#> tensor(1, dtype=torch.uint8)
all(A1 >= A0)
#> tensor(1, dtype=torch.uint8)
all(A1 <= A0)
#> tensor(0, dtype=torch.uint8)
# we implement this little function
<- function(x) {
all_as_boolean # convert tensor of 1s and 0s to a unique boolean
as.logical(torch$all(x)$numpy())
}
all_as_boolean(torch$gt(D, A))
#> [1] TRUE
all_as_boolean(torch$gt(A, D))
#> [1] FALSE
all_as_boolean(A1 <= A1)
#> [1] TRUE
all_as_boolean(A1 >= A0)
#> [1] TRUE
all_as_boolean(A1 <= A0)
#> [1] FALSE
# vector of booleans
<- torch$BoolTensor(list(TRUE, TRUE, TRUE, TRUE))
all_true
all_true#> tensor([True, True, True, True])
# logical NOT
# negate vector with "!"
<- !all_true
not_all_true
not_all_true#> tensor([False, False, False, False])
# a diagonal matrix
<- torch$eye(5L)
diag <- diag$to(dtype=torch$uint8) # convert to unsigned integer
diag
diag#> tensor([[1, 0, 0, 0, 0],
#> [0, 1, 0, 0, 0],
#> [0, 0, 1, 0, 0],
#> [0, 0, 0, 1, 0],
#> [0, 0, 0, 0, 1]], dtype=torch.uint8)
as_boolean(diag)
#> tensor([[ True, False, False, False, False],
#> [False, True, False, False, False],
#> [False, False, True, False, False],
#> [False, False, False, True, False],
#> [False, False, False, False, True]])
# logical NOT
<- !diag
not_diag
not_diag#> tensor([[0, 1, 1, 1, 1],
#> [1, 0, 1, 1, 1],
#> [1, 1, 0, 1, 1],
#> [1, 1, 1, 0, 1],
#> [1, 1, 1, 1, 0]], dtype=torch.uint8)
# and the negation
!not_diag
#> tensor([[1, 0, 0, 0, 0],
#> [0, 1, 0, 0, 0],
#> [0, 0, 1, 0, 0],
#> [0, 0, 0, 1, 0],
#> [0, 0, 0, 0, 1]], dtype=torch.uint8)
as_boolean(!not_diag)
#> tensor([[ True, False, False, False, False],
#> [False, True, False, False, False],
#> [False, False, True, False, False],
#> [False, False, False, True, False],
#> [False, False, False, False, True]])
These binaries (installable software) and packages are in development.
They may not be fully stable and should be used with caution. We make no claims about them.
Health stats visible at Monitor.