Skip to content

Commit 4f80d70

Browse files
AngelEzquerrametagn
authored andcommitted
Add support for running some key bitops functions on integer Tensors (mratsim#661)
While we already supported most of nim's std/math features in Arraymancer, we did not support any of the std/bitops operators and procedures yet. These are very useful to implement some important algorithms such as gray coding and others. This commit adds some of the most important std/bitops features. These will soon be used in `impulse` to implement some new algorithms.
1 parent 0d3db8b commit 4f80d70

File tree

3 files changed

+202
-0
lines changed

3 files changed

+202
-0
lines changed

Diff for: src/arraymancer/tensor.nim

+2
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ import ./laser/dynamic_stack_arrays,
3939
./tensor/math_functions,
4040
./tensor/aggregate,
4141
./tensor/algorithms,
42+
./tensor/bitops_functions,
4243
./tensor/lapack,
4344
./tensor/optim_ops_fusion,
4445
./tensor/syntactic_sugar,
@@ -67,6 +68,7 @@ export dynamic_stack_arrays,
6768
math_functions,
6869
aggregate,
6970
algorithms,
71+
bitops_functions,
7072
lapack,
7173
optim_ops_fusion,
7274
syntactic_sugar,

Diff for: src/arraymancer/tensor/bitops_functions.nim

+131
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,131 @@
1+
# Copyright 2017 the Arraymancer contributors
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import ./data_structure,
16+
./higher_order_applymap,
17+
./shapeshifting,
18+
./ufunc
19+
import std / bitops
20+
21+
export bitops
22+
23+
proc `shr`*[T1, T2: SomeInteger](t: Tensor[T1], value: T2): Tensor[T1] {.noinit.} =
24+
## Broadcasted tensor-value `shr` (i.e. shift right) operator
25+
##
26+
## This is similar to numpy's `right_shift` and Matlab's `bitsra`
27+
## (or `bitshift` with a positive shift value).
28+
t.map_inline(x shr value)
29+
30+
proc `shr`*[T1, T2: SomeInteger](value: T1, t: Tensor[T2]): Tensor[T2] {.noinit.} =
31+
## Broadcasted value-tensor `shr` (i.e. shift right) operator
32+
##
33+
## This is similar to numpy's `right_shift` and Matlab's `bitsra`
34+
## (or `bitshift` with a positive shift value).
35+
t.map_inline(value shr x)
36+
37+
proc `shr`*[T: SomeInteger](t1, t2: Tensor[T]): Tensor[T] {.noinit.} =
38+
## Tensor element-wise `shr` (i.e. shift right) broadcasted operator
39+
##
40+
## This is similar to numpy's `right_shift` and Matlab's `bitsra`
41+
## (or `bitshift` with a positive shift value).
42+
let (tmp1, tmp2) = broadcast2(t1, t2)
43+
result = map2_inline(tmp1, tmp2, x shr y)
44+
45+
proc `shl`*[T1, T2: SomeInteger](t: Tensor[T1], value: T2): Tensor[T1] {.noinit.} =
46+
## Broadcasted tensor-value `shl` (i.e. shift left) operator
47+
##
48+
## This is similar to numpy's `left_shift` and Matlab's `bitsla`
49+
## (or `bitshift` with a negative shift value).
50+
t.map_inline(x shl value)
51+
52+
proc `shl`*[T1, T2: SomeInteger](value: T1, t: Tensor[T2]): Tensor[T2] {.noinit.} =
53+
## Broadcasted value-tensor `shl` (i.e. shift left) operator
54+
##
55+
## This is similar to numpy's `left_shift` and Matlab's `bitsla`
56+
## (or `bitshift` with a negative shift value).
57+
t.map_inline(value shl x)
58+
59+
proc `shl`*[T: SomeInteger](t1, t2: Tensor[T]): Tensor[T] {.noinit.} =
60+
## Tensor element-wise `shl` (i.e. shift left) broadcasted operator
61+
##
62+
## This is similar to numpy's `left_shift` and Matlab's `bitsla`
63+
## (or `bitshift` with a negative shift value).
64+
let (tmp1, tmp2) = broadcast2(t1, t2)
65+
result = map2_inline(tmp1, tmp2, x shl y)
66+
67+
makeUniversal(bitnot,
68+
docSuffix="""Element-wise `bitnot` procedure
69+
70+
This is similar to numpy's `bitwise_not` and Matlab's `bitnot`.""")
71+
72+
proc bitand*[T](t: Tensor[T], value: T): Tensor[T] {.noinit.} =
73+
## Broadcasted tensor-value `bitand` procedure
74+
##
75+
## This is similar to numpy's `bitwise_and` and Matlab's `bitand`.
76+
t.map_inline(bitand(x, value))
77+
78+
proc bitand*[T](value: T, t: Tensor[T]): Tensor[T] {.noinit.} =
79+
## Broadcasted value-tensor `bitand` procedure
80+
##
81+
## This is similar to numpy's `bitwise_and` and Matlab's `bitand`.
82+
t.map_inline(bitand(value, x))
83+
84+
proc bitand*[T](t1, t2: Tensor[T]): Tensor[T] {.noinit.} =
85+
## Tensor element-wise `bitand` procedure
86+
##
87+
## This is similar to numpy's `bitwise_and` and Matlab's `bitand`.
88+
let (tmp1, tmp2) = broadcast2(t1, t2)
89+
result = map2_inline(tmp1, tmp2, bitand(x, y))
90+
91+
92+
proc bitor*[T](t: Tensor[T], value: T): Tensor[T] {.noinit.} =
93+
## Broadcasted tensor-value `bitor` procedure
94+
##
95+
## This is similar to numpy's `bitwise_or` and Matlab's `bitor`.
96+
t.map_inline(bitor(x, value))
97+
98+
proc bitor*[T](value: T, t: Tensor[T]): Tensor[T] {.noinit.} =
99+
## Broadcasted value-tensor `bitor` procedure
100+
##
101+
## This is similar to numpy's `bitwise_or` and Matlab's `bitor`.
102+
t.map_inline(bitor(value, x))
103+
104+
proc bitor*[T](t1, t2: Tensor[T]): Tensor[T] {.noinit.} =
105+
## Tensor element-wise `bitor` procedure
106+
##
107+
## This is similar to numpy's `bitwise_or` and Matlab's `bitor`.
108+
let (tmp1, tmp2) = broadcast2(t1, t2)
109+
map2_inline(tmp1, tmp2, bitor(x, y))
110+
111+
proc bitxor*[T](t: Tensor[T], value: T): Tensor[T] {.noinit.} =
112+
## Broadcasted tensor-value `bitxor` procedure
113+
##
114+
## This is similar to numpy's `bitwise_xor` and Matlab's `bitxor`.
115+
t.map_inline(bitxor(x, value))
116+
117+
proc bitxor*[T](value: T, t: Tensor[T]): Tensor[T] {.noinit.} =
118+
## Broadcasted value-tensor `bitxor` procedure
119+
##
120+
## This is similar to numpy's `bitwise_xor` and Matlab's `bitxor`.
121+
t.map_inline(bitxor(value, x))
122+
123+
proc bitxor*[T](t1, t2: Tensor[T]): Tensor[T] {.noinit.} =
124+
## Tensor element-wise `bitxor` procedure
125+
##
126+
## This is similar to numpy's `bitwise_xor` and Matlab's `bitxor`.
127+
let (tmp1, tmp2) = broadcast2(t1, t2)
128+
map2_inline(tmp1, tmp2, bitxor(x, y))
129+
130+
makeUniversal(reverseBits,
131+
docSuffix="Element-wise `reverseBits` procedure")

Diff for: tests/tensor/test_bitops_functions.nim

+69
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
# Copyright 2017 the Arraymancer contributors
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import ../../src/arraymancer
16+
import std / unittest
17+
18+
proc main() =
19+
suite "Bitops functions":
20+
test "bitnot":
21+
let t = [0, 1, 57, 1022, -100].toTensor
22+
let expected = [-1, -2, -58, -1023, 99].toTensor
23+
check: t.bitnot == expected
24+
25+
test "shr":
26+
let t1 = [0, 1, 57, 1022, -100].toTensor
27+
let t2 = [0, 1, 2, 3, 4].toTensor
28+
check: t1 shr 3 == [0, 0, 7, 127, -13].toTensor
29+
check: 1024 shr t2 == [1024, 512, 256, 128, 64].toTensor
30+
check: t1 shr t2 == [0, 0, 14, 127, -7].toTensor
31+
32+
test "shl":
33+
let t1 = [0, 1, 57, 1022, -100].toTensor
34+
let t2 = [0, 1, 2, 3, 4].toTensor
35+
check: t1 shl 3 == [0, 8, 456, 8176, -800].toTensor
36+
check: 3 shl t2 == [3, 6, 12, 24, 48].toTensor
37+
check: t1 shl t2 == [0, 2, 228, 8176, -1600].toTensor
38+
39+
test "bitand":
40+
let t1 = [0, 1, 57, 1022, -100].toTensor
41+
let t2 = [0, 2, 7, 15, 11].toTensor
42+
check: bitand(t1, 0b010_110_101) == [0, 1, 49, 180, 148].toTensor
43+
check: bitand(t1, 0b010_110_101) == bitand(0b010_110_101, t1)
44+
check: bitand(t1, t2) == [0, 0, 1, 14, 8].toTensor
45+
check: bitand(t1, t2) == bitand(t1, t2)
46+
47+
test "bitor":
48+
let t1 = [0, 1, 57, 1022, -100].toTensor
49+
let t2 = [0, 2, 7, 15, 11].toTensor
50+
check: bitor(t1, 0b010_110_101) == [181, 181, 189, 1023, -67].toTensor
51+
check: bitor(t1, 0b010_110_101) == bitor(0b010_110_101, t1)
52+
check: bitor(t1, t2) == [0, 3, 63, 1023, -97].toTensor
53+
check: bitor(t1, t2) == bitor(t1, t2)
54+
55+
test "bitxor":
56+
let t1 = [0, 1, 57, 1022, -100].toTensor
57+
let t2 = [0, 2, 7, 15, 11].toTensor
58+
check: bitxor(t1, 0b010_110_101) == [181, 180, 140, 843, -215].toTensor
59+
check: bitxor(t1, 0b010_110_101) == bitxor(0b010_110_101, t1)
60+
check: bitxor(t1, t2) == [0, 3, 62, 1009, -105].toTensor
61+
check: bitxor(t1, t2) == bitxor(t1, t2)
62+
63+
test "reverse_bits":
64+
let t = [0, 1, 57, 1022].toTensor(uint16)
65+
let expected = [0, 32768, 39936, 32704].toTensor(uint16)
66+
check: t.reverse_bits == expected
67+
68+
main()
69+
GC_fullCollect()

0 commit comments

Comments
 (0)