Skip to content

Commit 1a67c67

Browse files
authored
Revert "TensorBearer API and cleaner integrator steps (#107)"
This reverts commit dcff6d6.
1 parent dcff6d6 commit 1a67c67

19 files changed

+520
-660
lines changed

src/simulated_bifurcation/core/ising.py

Lines changed: 18 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -24,13 +24,13 @@
2424
from numpy import ndarray
2525

2626
from ..optimizer import SimulatedBifurcationEngine, SimulatedBifurcationOptimizer
27-
from .tensor_bearer import TensorBearer
27+
from .utils import safe_get_device, safe_get_dtype
2828

2929
# Workaround because `Self` type is only available in Python >= 3.11
3030
SelfIsing = TypeVar("SelfIsing", bound="Ising")
3131

3232

33-
class Ising(TensorBearer):
33+
class Ising(object):
3434
"""
3535
Internal implementation of the Ising model.
3636
@@ -92,10 +92,13 @@ def __init__(
9292
dtype: Optional[torch.dtype] = None,
9393
device: Optional[Union[str, torch.device]] = None,
9494
) -> None:
95-
super().__init__(dtype=dtype, device=device)
96-
J = self._safe_get_tensor(J)
97-
if h is not None:
98-
h = self._safe_get_tensor(h)
95+
self._dtype = safe_get_dtype(dtype)
96+
self._device = safe_get_device(device)
97+
98+
if isinstance(J, ndarray):
99+
J = torch.from_numpy(J)
100+
if isinstance(h, ndarray):
101+
h = torch.from_numpy(h)
99102

100103
if J.ndim != 2:
101104
raise ValueError(
@@ -107,25 +110,27 @@ def __init__(
107110
f"Expected J to be square, but got {rows} rows and {cols} columns."
108111
)
109112

110-
self._J = J
113+
self._J = J.to(dtype=self._dtype, device=self._device)
111114
self._dimension = rows
112115

113116
if h is None:
114-
self._h = torch.zeros(self._dimension, dtype=self.dtype, device=self.device)
117+
self._h = torch.zeros(
118+
self._dimension, dtype=self._dtype, device=self._device
119+
)
115120
elif h.shape != (self._dimension,):
116121
raise ValueError(
117122
f"Expected the shape of h to be {self._dimension}, but got {tuple(h.shape)}."
118123
)
119124
else:
120-
self._h = h
125+
self._h = h.to(dtype=self._dtype, device=self._device)
121126

122127
self._has_linear_term = not torch.equal(
123128
self._h,
124-
torch.zeros(self._dimension, dtype=self.dtype, device=self.device),
129+
torch.zeros(self._dimension, dtype=self._dtype, device=self._device),
125130
)
126131

127132
def __neg__(self) -> SelfIsing:
128-
return self.__class__(-self._J, -self._h, self.dtype, self.device)
133+
return self.__class__(-self._J, -self._h, self._dtype, self._device)
129134

130135
def as_simulated_bifurcation_tensor(self) -> torch.Tensor:
131136
"""
@@ -174,8 +179,8 @@ def as_simulated_bifurcation_tensor(self) -> torch.Tensor:
174179
if self._has_linear_term:
175180
sb_tensor = torch.zeros(
176181
(self._dimension + 1, self._dimension + 1),
177-
dtype=self.dtype,
178-
device=self.device,
182+
dtype=self._dtype,
183+
device=self._device,
179184
)
180185
sb_tensor[: self._dimension, : self._dimension] = symmetrical_J
181186
sb_tensor[: self._dimension, self._dimension] = -self._h
@@ -346,8 +351,6 @@ def minimize(
346351
verbose,
347352
sampling_period,
348353
convergence_threshold,
349-
self.dtype,
350-
self.device,
351354
)
352355
tensor = self.as_simulated_bifurcation_tensor()
353356
spins = optimizer.run_integrator(tensor, early_stopping)

src/simulated_bifurcation/core/quadratic_polynomial.py

Lines changed: 69 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,20 @@
2828
from sympy import Poly
2929

3030
from .ising import Ising
31-
from .tensor_bearer import TensorBearer
31+
from .utils import safe_get_device, safe_get_dtype
3232
from .variable import Variable
3333

34+
INTEGER_REGEX = re.compile("^int[1-9][0-9]*$")
35+
DOMAIN_ERROR = ValueError(
36+
f'Input type must be one of "spin" or "binary", or be a string starting'
37+
f'with "int" and be followed by a positive integer.\n'
38+
f"More formally, it should match the following regular expression.\n"
39+
f"{INTEGER_REGEX}\n"
40+
f'Examples: "int7", "int42", ...'
41+
)
3442

35-
class QuadraticPolynomial(TensorBearer):
43+
44+
class QuadraticPolynomial(object):
3645
"""
3746
Internal implementation of a multivariate quadratic polynomial.
3847
@@ -125,7 +134,7 @@ class QuadraticPolynomial(TensorBearer):
125134
Maximize this polynomial over {0, 1, ..., 14, 15} x {0, 1, ..., 14, 15}
126135
(outputs are located on the GPU)
127136
128-
>>> best_vector, best_value = poly.maximize(domain="int4")
137+
>>> best_vector, best_value = poly.maximize(domain="int4)
129138
>>> best_vector
130139
tensor([ 0., 15.], device='cuda:0')
131140
>>> best_value
@@ -148,7 +157,8 @@ def __init__(
148157
dtype: Optional[torch.dtype] = None,
149158
device: Optional[Union[str, torch.device]] = None,
150159
):
151-
super().__init__(dtype=dtype, device=device)
160+
self._dtype = safe_get_dtype(dtype)
161+
self._device = safe_get_device(device)
152162
self.sb_result = None
153163

154164
if len(polynomial_data) == 1 and isinstance(polynomial_data[0], Poly):
@@ -159,17 +169,17 @@ def __init__(
159169
)
160170
dimension = len(polynomial.gens)
161171
self._quadratic_coefficients = torch.zeros(
162-
dimension, dimension, dtype=self.dtype, device=self.device
172+
dimension, dimension, dtype=self._dtype, device=self._device
163173
)
164174
self._linear_coefficients = torch.zeros(
165-
dimension, dtype=self.dtype, device=self.device
175+
dimension, dtype=self._dtype, device=self._device
166176
)
167-
self._bias = torch.tensor(0.0, dtype=self.dtype, device=self.device)
177+
self._bias = torch.tensor(0.0, dtype=self._dtype, device=self._device)
168178
for monom, coeff in polynomial.terms():
169179
coeff = float(coeff)
170180
if sum(monom) == 0:
171181
self._bias = torch.tensor(
172-
coeff, dtype=self.dtype, device=self.device
182+
coeff, dtype=self._dtype, device=self._device
173183
)
174184
elif sum(monom) == 1:
175185
self._linear_coefficients[monom.index(1)] = coeff
@@ -186,58 +196,67 @@ def __init__(
186196
self._quadratic_coefficients = None
187197
self._linear_coefficients = None
188198
self._bias = None
189-
for polynomial_data_element in polynomial_data:
190-
# noinspection PyTypeChecker
191-
tensor_like = self._safe_get_tensor(polynomial_data_element)
192-
if tensor_like.ndim == 0:
193-
attribute_to_set = "_bias"
194-
elif tensor_like.ndim == 1:
195-
attribute_to_set = "_linear_coefficients"
196-
elif tensor_like.ndim == 2:
197-
attribute_to_set = "_quadratic_coefficients"
198-
rows, cols = tensor_like.shape
199-
if rows != cols:
199+
for tensor_like in polynomial_data:
200+
if isinstance(tensor_like, np.ndarray):
201+
tensor_like = torch.from_numpy(tensor_like)
202+
elif isinstance(tensor_like, (int, float)):
203+
tensor_like = torch.tensor(
204+
tensor_like, dtype=self._dtype, device=self._device
205+
)
206+
if isinstance(tensor_like, torch.Tensor):
207+
if tensor_like.ndim == 0:
208+
attribute_to_set = "_bias"
209+
elif tensor_like.ndim == 1:
210+
attribute_to_set = "_linear_coefficients"
211+
elif tensor_like.ndim == 2:
212+
attribute_to_set = "_quadratic_coefficients"
213+
rows, cols = tensor_like.shape
214+
if rows != cols:
215+
raise ValueError(
216+
"Provided quadratic coefficients tensor is not square."
217+
)
218+
else:
200219
raise ValueError(
201-
"Provided quadratic coefficients tensor is not square."
220+
f"Expected a tensor with at most 2 dimensions, got {tensor_like.ndim}."
221+
)
222+
if getattr(self, attribute_to_set) is not None:
223+
raise ValueError(
224+
f"Providing two tensors for the same degree is ambiguous. Got at least two tensors for degree {tensor_like.ndim}."
225+
)
226+
else:
227+
if tensor_like.ndim > 0:
228+
if dimension is None:
229+
dimension = tensor_like.shape[0]
230+
elif dimension != tensor_like.shape[0]:
231+
raise ValueError(
232+
f"Inconsistant shape among provided tensors. Expected {dimension} but got {tensor_like.shape[0]}."
233+
)
234+
setattr(
235+
self,
236+
attribute_to_set,
237+
tensor_like.to(dtype=self._dtype, device=self._device),
202238
)
203239
else:
204240
raise ValueError(
205-
f"Expected a tensor with at most 2 dimensions, got {tensor_like.ndim}."
206-
)
207-
if getattr(self, attribute_to_set) is not None:
208-
raise ValueError(
209-
f"Providing two tensors for the same degree is ambiguous. Got at least two tensors for degree {tensor_like.ndim}."
210-
)
211-
else:
212-
if tensor_like.ndim > 0:
213-
if dimension is None:
214-
dimension = tensor_like.shape[0]
215-
elif dimension != tensor_like.shape[0]:
216-
raise ValueError(
217-
f"Inconsistent shape among provided tensors. Expected {dimension} but got {tensor_like.shape[0]}."
218-
)
219-
setattr(
220-
self,
221-
attribute_to_set,
222-
tensor_like,
241+
f"Unsupported coefficient tensor type: {type(tensor_like)}. Expected a torch.Tensor or a numpy.ndarray."
223242
)
224243
if self._quadratic_coefficients is None:
225244
self._quadratic_coefficients = torch.zeros(
226-
dimension, dimension, dtype=self.dtype, device=self.device
245+
dimension, dimension, dtype=self._dtype, device=self._device
227246
)
228247
if self._linear_coefficients is None:
229248
self._linear_coefficients = torch.zeros(
230-
dimension, dtype=self.dtype, device=self.device
249+
dimension, dtype=self._dtype, device=self._device
231250
)
232251
if self._bias is None:
233-
self._bias = torch.tensor(0.0, dtype=self.dtype, device=self.device)
252+
self._bias = torch.tensor(0.0, dtype=self._dtype, device=self._device)
234253

235254
self._dimension = self._quadratic_coefficients.shape[0]
236255

237256
def __call__(self, value: Union[torch.Tensor, np.ndarray]) -> torch.Tensor:
238257
if not isinstance(value, torch.Tensor):
239258
try:
240-
value = torch.tensor(value, dtype=self.dtype, device=self.device)
259+
value = torch.tensor(value, dtype=self._dtype, device=self._device)
241260
except Exception as err:
242261
raise TypeError("Input value cannot be cast to Tensor.") from err
243262

@@ -349,12 +368,12 @@ def to_ising(self, domain: Union[str, List[str]]) -> Ising:
349368
"""
350369
variables = self.__get_variables(domain=domain)
351370
spin_identity_vector = QuadraticPolynomial.__spin_identity_vector(
352-
variables=variables, dtype=self.dtype, device=self.device
371+
variables=variables, dtype=self._dtype, device=self._device
353372
)
354373
spin_weighted_integer_to_binary_matrix = (
355374
spin_identity_vector + 1
356375
) * QuadraticPolynomial.__integer_to_binary_matrix(
357-
variables=variables, dtype=self.dtype, device=self.device
376+
variables=variables, dtype=self._dtype, device=self._device
358377
)
359378
symmetric_quadratic_tensor = (
360379
self._quadratic_coefficients + self._quadratic_coefficients.t()
@@ -377,7 +396,7 @@ def to_ising(self, domain: Union[str, List[str]]) -> Ising:
377396
-1,
378397
)
379398
torch.diag(J)[...] = 0
380-
return Ising(J, h, self.dtype, self.device)
399+
return Ising(J, h, self._dtype, self._device)
381400

382401
def convert_spins(
383402
self, optimized_spins: torch.Tensor, domain: Union[str, List[str]]
@@ -422,12 +441,12 @@ def convert_spins(
422441
"""
423442
variables = self.__get_variables(domain=domain)
424443
spin_identity_vector = QuadraticPolynomial.__spin_identity_vector(
425-
variables=variables, dtype=self.dtype, device=self.device
444+
variables=variables, dtype=self._dtype, device=self._device
426445
)
427446
spin_weighted_integer_to_binary_matrix = (
428447
spin_identity_vector + 1
429448
) * QuadraticPolynomial.__integer_to_binary_matrix(
430-
variables=variables, dtype=self.dtype, device=self.device
449+
variables=variables, dtype=self._dtype, device=self._device
431450
)
432451
return (
433452
None
@@ -562,8 +581,9 @@ def __optimize(
562581
convergence_threshold=convergence_threshold,
563582
timeout=timeout,
564583
)
565-
self.sb_result = self._cast_tensor(self.convert_spins(optimized_spins, domain))
566-
584+
self.sb_result = self.convert_spins(optimized_spins, domain).to(
585+
dtype=self._dtype, device=self._device
586+
)
567587
result = self.sb_result.t()
568588
evaluation = self(result)
569589
if best_only:

src/simulated_bifurcation/core/tensor_bearer.py

Lines changed: 0 additions & 57 deletions
This file was deleted.
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
from typing import Optional, Union
2+
3+
import torch
4+
5+
6+
def safe_get_dtype(dtype: Optional[torch.dtype]) -> torch.dtype:
7+
if dtype is None:
8+
return torch.float32
9+
elif dtype == torch.float32 or dtype == torch.float64:
10+
return dtype
11+
raise ValueError(
12+
"The Simulated Bifurcation algorithm can only run with a torch.float32 or a torch.float64 dtype."
13+
)
14+
15+
16+
def safe_get_device(device: Optional[Union[str, torch.device]]) -> torch.device:
17+
return torch.get_default_device() if device is None else torch.device(device)

0 commit comments

Comments
 (0)