From 889298e278a7280f2bff1cda5c47b2b2fb816aad Mon Sep 17 00:00:00 2001 From: erik Date: Mon, 27 Jan 2025 21:36:53 +0000 Subject: [PATCH 01/40] feat: Move the package to use poetry for installation. --- .bumpversion.cfg | 8 ++++++++ pyproject.toml | 25 +++++++++++++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 .bumpversion.cfg create mode 100644 pyproject.toml diff --git a/.bumpversion.cfg b/.bumpversion.cfg new file mode 100644 index 0000000..1541736 --- /dev/null +++ b/.bumpversion.cfg @@ -0,0 +1,8 @@ +[bumpversion] +current_version = 0.0.1 +commit = False +tag = False + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..a5f2c0f --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,25 @@ +[tool.poetry] +name = "tangled-adjudicate" +version = "0.0.1" +description = "Tangled adjudicators" +authors = ["Geordie Rose "] +license = "MIT" +homepage = "https://www.snowdropquantum.com/" +packages = [ + { include = "tangled_adjudicate" }, + { include = "tests" }, +] + +[tool.poetry.dependencies] +python = "^3.8" # You may want to adjust this based on your needs +dwave-ocean-sdk = "*" +matplotlib = "*" +gdown = "*" + +[tool.poetry.group.dev.dependencies] +# Add development dependencies here if needed +# pytest = "^7.0.0" + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" From 80abf92edf36df8aca4c303c4600b8dc906135cb Mon Sep 17 00:00:00 2001 From: erik Date: Mon, 27 Jan 2025 22:20:28 +0000 Subject: [PATCH 02/40] refactor: Convert to base class and subclasses to implement different adjudicators. Precursor to including in game service. --- pyproject.toml | 2 + setup.py | 13 - tangled_adjudicate/__init__.py | 5 + tangled_adjudicate/adjudicators/adjudicate.py | 2 +- .../adjudicators/adjudicator.py | 135 +++++++++ .../adjudicators/lookup_table.py | 117 ++++++++ .../adjudicators/quantum_annealing.py | 257 ++++++++++++++++++ .../adjudicators/schrodinger.py | 103 +++++++ .../adjudicators/simulated_annealing.py | 113 ++++++++ 9 files changed, 733 insertions(+), 14 deletions(-) delete mode 100644 setup.py create mode 100644 tangled_adjudicate/adjudicators/adjudicator.py create mode 100644 tangled_adjudicate/adjudicators/lookup_table.py create mode 100644 tangled_adjudicate/adjudicators/quantum_annealing.py create mode 100644 tangled_adjudicate/adjudicators/schrodinger.py create mode 100644 tangled_adjudicate/adjudicators/simulated_annealing.py diff --git a/pyproject.toml b/pyproject.toml index a5f2c0f..a4b3e92 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,6 +13,7 @@ packages = [ [tool.poetry.dependencies] python = "^3.8" # You may want to adjust this based on your needs dwave-ocean-sdk = "*" +dwave-neal = ">=0.6.0" matplotlib = "*" gdown = "*" @@ -23,3 +24,4 @@ gdown = "*" [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" + diff --git a/setup.py b/setup.py deleted file mode 100644 index c32a1d1..0000000 --- a/setup.py +++ /dev/null @@ -1,13 +0,0 @@ -from setuptools import setup - -setup( - name='tangled-adjudicate', - version='0.0.1', - packages=['tests', 'tangled_adjudicate', 'tangled_adjudicate.utils', 'tangled_adjudicate.schrodinger', 'tangled_adjudicate.adjudicators'], - url='https://www.snowdropquantum.com/', - license='MIT', - author='Geordie Rose', - author_email='geordie@snowdropquantum.com', - description='Tangled adjudicators', - install_requires=['dwave-ocean-sdk', 'matplotlib', 'gdown'] -) diff --git a/tangled_adjudicate/__init__.py b/tangled_adjudicate/__init__.py index e69de29..2fea5db 100644 --- a/tangled_adjudicate/__init__.py +++ b/tangled_adjudicate/__init__.py @@ -0,0 +1,5 @@ +from .adjudicators.adjudicator import Adjudicator, GameState, AdjudicationResult +from .adjudicators.lookup_table import LookupTableAdjudicator +from .adjudicators.schrodinger import SchrodingerEquationAdjudicator +from .adjudicators.simulated_annealing import SimulatedAnnealingAdjudicator +from .adjudicators.quantum_annealing import QuantumAnnealingAdjudicator diff --git a/tangled_adjudicate/adjudicators/adjudicate.py b/tangled_adjudicate/adjudicators/adjudicate.py index e90fa53..1f04785 100644 --- a/tangled_adjudicate/adjudicators/adjudicate.py +++ b/tangled_adjudicate/adjudicators/adjudicate.py @@ -19,7 +19,7 @@ from dwave.system.testing import MockDWaveSampler -class Adjudicator(object): +class old_Adjudicator(object): def __init__(self, params): self.params = params self.results_dict = None diff --git a/tangled_adjudicate/adjudicators/adjudicator.py b/tangled_adjudicate/adjudicators/adjudicator.py new file mode 100644 index 0000000..6985089 --- /dev/null +++ b/tangled_adjudicate/adjudicators/adjudicator.py @@ -0,0 +1,135 @@ +from abc import ABC, abstractmethod +from typing import Any, TypedDict, List, Tuple, Optional, Dict, Union, Set +import numpy as np +import numpy.typing as npt + +class GameState(TypedDict): + num_nodes: int + edges: List[Tuple[int, int, int]] # (node1, node2, weight) + player1_id: str + player2_id: str + turn_count: int + current_player_index: int + player1_node: Optional[int] + player2_node: Optional[int] + +class AdjudicationResult(TypedDict): + game_state: GameState + adjudicator: str + winner: Optional[str] # 'red', 'blue', 'draw', or None + score: Optional[float] + influence_vector: Optional[npt.NDArray[np.float64]] + correlation_matrix: Optional[npt.NDArray[np.float64]] + parameters: Dict[str, Union[str, int, float, bool]] + +class IsingModel(TypedDict): + h: Dict[int, float] # Local fields + j: Dict[Tuple[int, int], float] # Coupling strengths + +class Adjudicator(ABC): + """Base interface for game state adjudication implementations.""" + + def __init__(self) -> None: + """Initialize base adjudicator.""" + self._parameters: Dict[str, Any] = {} + + @abstractmethod + def setup(self, **kwargs) -> None: + """Optional setup method for implementation-specific initialization.""" + pass + + @abstractmethod + def adjudicate(self, game_state: GameState) -> AdjudicationResult: + """Adjudicate the given game state.""" + pass + + def _validate_game_state(self, game_state: GameState) -> None: + """Validate the game state structure and contents.""" + required_keys = { + 'num_nodes', 'edges', 'player1_id', 'player2_id', + 'turn_count', 'current_player_index', 'player1_node', 'player2_node' + } + + if not all(key in game_state for key in required_keys): + missing_keys = required_keys - set(game_state.keys()) + raise ValueError(f"Game state missing required keys: {missing_keys}") + + if game_state['num_nodes'] < 1: + raise ValueError("Number of nodes must be positive") + + for edge in game_state['edges']: + if len(edge) != 3: + raise ValueError(f"Invalid edge format: {edge}") + if not (0 <= edge[0] < game_state['num_nodes'] and + 0 <= edge[1] < game_state['num_nodes']): + raise ValueError(f"Edge vertices out of range: {edge}") + + def _game_state_to_ising(self, game_state: GameState) -> IsingModel: + """Convert game state to Ising model parameters. + + Args: + game_state: The current game state + + Returns: + IsingModel containing h (local fields) and j (coupling strengths) + """ + h = {i: 0.0 for i in range(game_state['num_nodes'])} + j = {} + + for edge in game_state['edges']: + v1, v2, weight = edge + if v1 > v2: + v1, v2 = v2, v1 + j[(v1, v2)] = float(weight) + + return IsingModel(h=h, j=j) + + def _find_isolated_vertices(self, game_state: GameState) -> Set[int]: + """Find vertices with no connections in the graph. + + Args: + game_state: The current game state + + Returns: + Set of isolated vertex indices + """ + connected_vertices = set() + for edge in game_state['edges']: + connected_vertices.add(edge[0]) + connected_vertices.add(edge[1]) + + all_vertices = set(range(game_state['num_nodes'])) + return all_vertices - connected_vertices + + def _compute_winner_score_and_influence( + self, + game_state: GameState, + correlation_matrix: npt.NDArray[np.float64], + epsilon: float = 1e-6 + ) -> Tuple[Optional[str], Optional[float], npt.NDArray[np.float64]]: + """Compute winner, score and influence from correlation matrix.""" + if not isinstance(correlation_matrix, np.ndarray): + raise ValueError("Correlation matrix must be a numpy array") + + if correlation_matrix.shape[0] != correlation_matrix.shape[1]: + raise ValueError("Correlation matrix must be square") + + if correlation_matrix.shape[0] != game_state['num_nodes']: + raise ValueError("Correlation matrix size must match number of nodes") + + influence_vector = np.sum(correlation_matrix, axis=0) + + if game_state['player1_node'] is None or game_state['player2_node'] is None: + return None, None, influence_vector + + score = (influence_vector[game_state['player1_node']] - + influence_vector[game_state['player2_node']]) + + if score > epsilon: + winner = 'red' + elif score < -epsilon: + winner = 'blue' + else: + winner = 'draw' + + return winner, score, influence_vector \ No newline at end of file diff --git a/tangled_adjudicate/adjudicators/lookup_table.py b/tangled_adjudicate/adjudicators/lookup_table.py new file mode 100644 index 0000000..994c3f3 --- /dev/null +++ b/tangled_adjudicate/adjudicators/lookup_table.py @@ -0,0 +1,117 @@ +import os +import pickle +from typing import Dict, Optional +import numpy as np + +from ..utils.utilities import ( + convert_erik_game_state_to_my_game_state, + get_tso, + build_results_dict +) +from .adjudicator import Adjudicator, GameState, AdjudicationResult + +class LookupTableAdjudicator(Adjudicator): + """Adjudicator implementation using pre-computed lookup tables.""" + + def __init__(self) -> None: + """Initialize the lookup table adjudicator.""" + super().__init__() + self.data_dir: Optional[str] = None + self.results_dict: Optional[Dict[str, str]] = None + + def setup(self, **kwargs) -> None: + """Configure lookup table parameters. + + Args: + data_dir: Directory containing lookup table data files + + Raises: + ValueError: If parameters are invalid or data directory doesn't exist + """ + if 'data_dir' in kwargs: + if not isinstance(kwargs['data_dir'], str): + raise ValueError("data_dir must be a string") + if not os.path.isdir(kwargs['data_dir']): + raise ValueError(f"Directory not found: {kwargs['data_dir']}") + self.data_dir = kwargs['data_dir'] + + self._parameters = {'data_dir': self.data_dir} + + def _load_lookup_table(self, num_nodes: int) -> None: + """Load the appropriate lookup table for the given graph size. + + Args: + num_nodes: Number of nodes in the graph + + Raises: + ValueError: If lookup table is not available for this graph size + RuntimeError: If lookup table file cannot be loaded + """ + if num_nodes not in [3, 4]: + raise ValueError( + "Lookup table only available for complete graphs with 3 or 4 vertices" + ) + + if not self.data_dir: + raise RuntimeError("Data directory not set. Call setup() first.") + + graph_number = num_nodes - 1 # Convert from num_nodes to graph_number + file_path = os.path.join( + self.data_dir, + f'graph_{graph_number}_terminal_state_outcomes.pkl' + ) + + # Generate lookup table if it doesn't exist + if not os.path.exists(file_path): + get_tso(graph_number, file_path) + + try: + with open(file_path, 'rb') as fp: + results = pickle.load(fp) + self.results_dict = build_results_dict(results) + except Exception as e: + raise RuntimeError(f"Failed to load lookup table: {str(e)}") + + def adjudicate(self, game_state: GameState) -> AdjudicationResult: + """Adjudicate the game state using the lookup table. + + Args: + game_state: The current game state + + Returns: + AdjudicationResult containing the adjudication details + + Raises: + ValueError: If the game state is invalid or unsupported + RuntimeError: If lookup table is not loaded + """ + self._validate_game_state(game_state) + + # Load lookup table if needed + if (self.results_dict is None or + len(next(iter(self.results_dict.keys()))) != game_state['num_nodes']): + self._load_lookup_table(game_state['num_nodes']) + + if not self.results_dict: + raise RuntimeError("Failed to load lookup table") + + # Convert game state to lookup format + lookup_state = convert_erik_game_state_to_my_game_state(game_state) + + try: + winner = self.results_dict[str(lookup_state)] + except KeyError: + raise ValueError( + f"Game state not found in lookup table: {lookup_state}" + ) + + return AdjudicationResult( + game_state=game_state, + adjudicator='lookup_table', + winner=winner, + score=None, # Lookup table doesn't provide scores + influence_vector=None, + correlation_matrix=None, + parameters=self._parameters + ) + \ No newline at end of file diff --git a/tangled_adjudicate/adjudicators/quantum_annealing.py b/tangled_adjudicate/adjudicators/quantum_annealing.py new file mode 100644 index 0000000..0728021 --- /dev/null +++ b/tangled_adjudicate/adjudicators/quantum_annealing.py @@ -0,0 +1,257 @@ +from typing import Dict, Any, List, Optional +import numpy as np +from dataclasses import dataclass +from dwave.system import DWaveSampler, FixedEmbeddingComposite +from dwave.system.testing import MockDWaveSampler + +from ..utils.find_graph_automorphisms import get_automorphisms +from ..utils.find_hardware_embeddings import get_embeddings +from .adjudicator import Adjudicator, GameState, AdjudicationResult + +@dataclass +class QAParameters: + """Parameters for quantum annealing.""" + num_reads: int = 1000 + anneal_time: float = 5.0 # ns + num_chip_runs: int = 1 + use_gauge_transform: bool = True + use_shim: bool = False + shim_iterations: int = 1 + alpha_phi: float = 0.1 + use_mock: bool = False + solver_name: Optional[str] = None + graph_number: Optional[int] = None + +class QuantumAnnealingAdjudicator(Adjudicator): + """Adjudicator implementation using D-Wave quantum annealing.""" + + def __init__(self) -> None: + """Initialize the quantum annealing adjudicator.""" + super().__init__() + self.params = QAParameters() + self.embeddings: List[List[int]] = [] + self.automorphisms: List[Dict[int, int]] = [] + self.sampler: Optional[FixedEmbeddingComposite] = None + + def setup(self, **kwargs) -> None: + """Configure quantum annealing parameters and initialize D-Wave connection. + + Args: + num_reads: Number of annealing reads per run + anneal_time: Annealing time in nanoseconds + num_chip_runs: Number of separate chip programming runs + use_gauge_transform: Whether to apply gauge transformations + use_shim: Whether to use shimming process + shim_iterations: Number of shimming iterations if shimming is used + alpha_phi: Learning rate for flux bias offsets + use_mock: Whether to use mock D-Wave sampler (for testing) + solver_name: Name of D-Wave solver to use + graph_number: Graph number for embedding lookup + + Raises: + ValueError: If parameters are invalid + RuntimeError: If D-Wave connection fails + """ + # Update parameters from kwargs + for key, value in kwargs.items(): + if hasattr(self.params, key): + setattr(self.params, key, value) + else: + raise ValueError(f"Unknown parameter: {key}") + + # Validate parameters + if self.params.num_reads <= 0: + raise ValueError("num_reads must be positive") + if self.params.anneal_time <= 0: + raise ValueError("anneal_time must be positive") + if self.params.num_chip_runs <= 0: + raise ValueError("num_chip_runs must be positive") + if self.params.shim_iterations <= 0: + raise ValueError("shim_iterations must be positive") + if self.params.alpha_phi <= 0 or self.params.alpha_phi > 1: + raise ValueError("alpha_phi must be in (0, 1]") + + # Get graph-specific data if graph number provided + if self.params.graph_number is not None: + self.automorphisms = get_automorphisms(self.params.graph_number) + self.embeddings = get_embeddings( + self.params.graph_number, + self.params.solver_name + ) + + # Initialize sampler + try: + if self.params.use_mock: + base_sampler = MockDWaveSampler( + topology_type='zephyr', + topology_shape=[6, 4] + ) + else: + base_sampler = DWaveSampler(solver=self.params.solver_name) + + # Store for later use in adjudicate + self._base_sampler = base_sampler + + except Exception as e: + raise RuntimeError(f"Failed to initialize D-Wave sampler: {str(e)}") + + # Store parameters + self._parameters = self.params.__dict__ + + def _apply_gauge_transform( + self, + samples: np.ndarray, + flip_indices: List[int] + ) -> np.ndarray: + """Apply gauge transformation to samples. + + Args: + samples: Sample array to transform + flip_indices: Indices where spins should be flipped + + Returns: + Transformed sample array + """ + samples = samples.copy() + samples[:, flip_indices] = -samples[:, flip_indices] + return samples + + def _process_embedding( + self, + game_state: GameState, + automorphism: Dict[int, int] + ) -> Dict[int, List[int]]: + """Process embedding with given automorphism. + + Args: + game_state: Current game state + automorphism: Graph automorphism to apply + + Returns: + Processed embedding mapping + """ + inverted_automorphism = {v: k for k, v in automorphism.items()} + num_vertices = game_state['num_nodes'] + + embedding_map = {} + for embedding_idx, embedding in enumerate(self.embeddings): + for vertex in range(num_vertices): + logical_idx = num_vertices * embedding_idx + vertex + physical_qubit = embedding[inverted_automorphism[vertex]] + embedding_map[logical_idx] = [physical_qubit] + + return embedding_map + + def adjudicate(self, game_state: GameState) -> AdjudicationResult: + """Adjudicate the game state using quantum annealing. + + Args: + game_state: The current game state + + Returns: + AdjudicationResult containing the adjudication details + + Raises: + ValueError: If the game state is invalid + RuntimeError: If quantum annealing fails + """ + if not self._base_sampler: + raise RuntimeError("Sampler not initialized. Call setup() first.") + + self._validate_game_state(game_state) + + num_vertices = game_state['num_nodes'] + num_embeddings = len(self.embeddings) + total_samples = np.zeros((1, num_vertices)) # Initial array for stacking + + # Process each chip run + for _ in range(self.params.num_chip_runs): + # Select random automorphism + automorphism = np.random.choice(self.automorphisms) + embedding_map = self._process_embedding(game_state, automorphism) + + # Create sampler with fixed embedding + sampler = FixedEmbeddingComposite( + self._base_sampler, + embedding=embedding_map + ) + + # Get Ising model + ising_model = self._game_state_to_ising(game_state) + + # Set up sampling parameters + sample_kwargs = { + 'num_reads': self.params.num_reads, + 'answer_mode': 'raw' + } + + if not self.params.use_mock: + sample_kwargs.update({ + 'annealing_time': self.params.anneal_time / 1000, + 'auto_scale': False + }) + + # Perform sampling + response = sampler.sample_ising( + ising_model['h'], + ising_model['j'], + **sample_kwargs + ) + + # Process samples + samples = np.array(response.record.sample) + + # Apply gauge transform if enabled + if self.params.use_gauge_transform: + flip_indices = np.random.choice( + [0, 1], + size=samples.shape[1], + p=[0.5, 0.5] + ).nonzero()[0] + samples = self._apply_gauge_transform(samples, flip_indices) + + # Stack samples for all embeddings + processed_samples = samples[:, :num_vertices] + for k in range(1, num_embeddings): + processed_samples = np.vstack(( + processed_samples, + samples[:, k*num_vertices:(k+1)*num_vertices] + )) + + total_samples = np.vstack((total_samples, processed_samples)) + + # Remove initial zero row + total_samples = np.delete(total_samples, 0, axis=0) + + # Handle isolated vertices + isolated_vertices = self._find_isolated_vertices(game_state) + if isolated_vertices: + random_samples = np.random.choice( + [1, -1], + size=(total_samples.shape[0], len(isolated_vertices)) + ) + for i, vertex in enumerate(isolated_vertices): + total_samples[:, vertex] = random_samples[:, i] + + # Calculate correlation matrix + sample_count = (self.params.num_reads * num_embeddings * + self.params.num_chip_runs) + correlation_matrix = ( + np.einsum('si,sj->ij', total_samples, total_samples) / sample_count - + np.eye(num_vertices) + ) + + # Compute results + winner, score, influence_vector = self._compute_winner_score_and_influence( + game_state, correlation_matrix + ) + + return AdjudicationResult( + game_state=game_state, + adjudicator='quantum_annealing', + winner=winner, + score=score, + influence_vector=influence_vector, + correlation_matrix=correlation_matrix, + parameters=self._parameters + ) \ No newline at end of file diff --git a/tangled_adjudicate/adjudicators/schrodinger.py b/tangled_adjudicate/adjudicators/schrodinger.py new file mode 100644 index 0000000..ff3506f --- /dev/null +++ b/tangled_adjudicate/adjudicators/schrodinger.py @@ -0,0 +1,103 @@ +from typing import Dict, Any +import numpy as np +from ..schrodinger.schrodinger_functions import evolve_schrodinger + +from .adjudicator import Adjudicator, GameState, AdjudicationResult + +class SchrodingerEquationAdjudicator(Adjudicator): + """Adjudicator implementation using Schrödinger equation evolution.""" + + def __init__(self) -> None: + """Initialize the adjudicator with default values.""" + super().__init__() + self.anneal_time: float = 5.0 # ns + self.s_min: float = 0.001 + self.s_max: float = 0.999 + + def setup(self, **kwargs) -> None: + """Configure the Schrödinger equation parameters. + + Args: + anneal_time: Annealing time in nanoseconds (default: 5.0) + s_min: Minimum annealing parameter (default: 0.001) + s_max: Maximum annealing parameter (default: 0.999) + + Raises: + ValueError: If parameters are invalid + """ + if 'anneal_time' in kwargs: + if not isinstance(kwargs['anneal_time'], (int, float)) or kwargs['anneal_time'] <= 0: + raise ValueError("anneal_time must be a positive number") + self.anneal_time = float(kwargs['anneal_time']) + + if 's_min' in kwargs: + if not isinstance(kwargs['s_min'], (int, float)) or not 0 <= kwargs['s_min'] < 1: + raise ValueError("s_min must be in [0, 1)") + self.s_min = float(kwargs['s_min']) + + if 's_max' in kwargs: + if not isinstance(kwargs['s_max'], (int, float)) or not 0 < kwargs['s_max'] <= 1: + raise ValueError("s_max must be in (0, 1]") + self.s_max = float(kwargs['s_max']) + + if self.s_min >= self.s_max: + raise ValueError("s_min must be less than s_max") + + self._parameters = { + 'anneal_time': self.anneal_time, + 's_min': self.s_min, + 's_max': self.s_max + } + + def adjudicate(self, game_state: GameState) -> AdjudicationResult: + """Adjudicate the game state using Schrödinger equation evolution. + + Args: + game_state: The current game state + + Returns: + AdjudicationResult containing the adjudication details + + Raises: + ValueError: If the game state is invalid + """ + self._validate_game_state(game_state) + + # Convert game state to Ising model + ising_model = self._game_state_to_ising(game_state) + + # Evolve Schrödinger equation + correlation_matrix = evolve_schrodinger( + ising_model['h'], + ising_model['j'], + s_min=self.s_min, + s_max=self.s_max, + tf=self.anneal_time, + n_qubits=game_state['num_nodes'] + ) + + # Make symmetric (evolve_schrodinger returns upper triangular) + correlation_matrix = correlation_matrix + correlation_matrix.T + + # Handle isolated vertices + isolated_vertices = self._find_isolated_vertices(game_state) + if isolated_vertices: + for vertex in isolated_vertices: + correlation_matrix[:, vertex] = 0 + correlation_matrix[vertex, :] = 0 + + # Compute results + winner, score, influence_vector = self._compute_winner_score_and_influence( + game_state, correlation_matrix + ) + + return AdjudicationResult( + game_state=game_state, + adjudicator='schrodinger_equation', + winner=winner, + score=score, + influence_vector=influence_vector, + correlation_matrix=correlation_matrix, + parameters=self._parameters + ) + \ No newline at end of file diff --git a/tangled_adjudicate/adjudicators/simulated_annealing.py b/tangled_adjudicate/adjudicators/simulated_annealing.py new file mode 100644 index 0000000..73250a3 --- /dev/null +++ b/tangled_adjudicate/adjudicators/simulated_annealing.py @@ -0,0 +1,113 @@ +from typing import Dict, Any +import neal +import numpy as np + +from .adjudicator import Adjudicator, GameState, AdjudicationResult + +class SimulatedAnnealingAdjudicator(Adjudicator): + """Adjudicator implementation using simulated annealing.""" + + def __init__(self) -> None: + """Initialize the adjudicator with default values.""" + super().__init__() + self.num_reads: int = 1000 + self.num_sweeps: int = 16 + self.beta_max: float = 3.0 + + def setup(self, **kwargs) -> None: + """Configure the simulated annealing parameters. + + Args: + num_reads: Number of annealing reads (default: 1000) + num_sweeps: Number of sweeps per read (default: 16) + beta_max: Maximum inverse temperature (default: 3.0) + + Raises: + ValueError: If parameters are invalid + """ + if 'num_reads' in kwargs: + if not isinstance(kwargs['num_reads'], int) or kwargs['num_reads'] <= 0: + raise ValueError("num_reads must be a positive integer") + self.num_reads = kwargs['num_reads'] + + if 'num_sweeps' in kwargs: + if not isinstance(kwargs['num_sweeps'], int) or kwargs['num_sweeps'] <= 0: + raise ValueError("num_sweeps must be a positive integer") + self.num_sweeps = kwargs['num_sweeps'] + + if 'beta_max' in kwargs: + if not isinstance(kwargs['beta_max'], (int, float)) or kwargs['beta_max'] <= 0: + raise ValueError("beta_max must be a positive number") + self.beta_max = float(kwargs['beta_max']) + + self._parameters = { + 'num_reads': self.num_reads, + 'num_sweeps': self.num_sweeps, + 'beta_max': self.beta_max + } + + def adjudicate(self, game_state: GameState) -> AdjudicationResult: + """Adjudicate the game state using simulated annealing. + + Args: + game_state: The current game state + + Returns: + AdjudicationResult containing the adjudication details + + Raises: + ValueError: If the game state is invalid + """ + self._validate_game_state(game_state) + + # Convert game state to Ising model + ising_model = self._game_state_to_ising(game_state) + sampler = neal.SimulatedAnnealingSampler() + + # Calculate beta range based on coupling strengths + beta_range = [ + 1 / np.sqrt(np.sum([Jij ** 2 for Jij in ising_model['j'].values()]) + 0.001), + self.beta_max + ] + + # Perform simulated annealing + response = sampler.sample_ising( + ising_model['h'], + ising_model['j'], + beta_range=beta_range, + num_reads=self.num_reads, + num_sweeps=self.num_sweeps, + randomize_order=True + ) + + # Calculate correlation matrix + samples = np.array(response.record.sample, dtype=float) + correlation_matrix = ( + np.einsum('si,sj->ij', samples, samples) / self.num_reads - + np.eye(game_state['num_nodes']) + ) + + # Handle isolated vertices + isolated_vertices = self._find_isolated_vertices(game_state) + if isolated_vertices: + samples = np.random.choice([1, -1], size=(self.num_reads, len(isolated_vertices))) + for i, vertex in enumerate(isolated_vertices): + correlation_matrix[:, vertex] = np.mean(samples[:, i]) + correlation_matrix[vertex, :] = np.mean(samples[:, i]) + correlation_matrix[vertex, vertex] = 0 + + # Compute results + winner, score, influence_vector = self._compute_winner_score_and_influence( + game_state, correlation_matrix + ) + + return AdjudicationResult( + game_state=game_state, + adjudicator='simulated_annealing', + winner=winner, + score=score, + influence_vector=influence_vector, + correlation_matrix=correlation_matrix, + parameters=self._parameters + ) + \ No newline at end of file From 2b6a68f51f52c5bb4f271f7400395db309a0b7be Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Wed, 29 Jan 2025 11:15:08 -0800 Subject: [PATCH 03/40] changed get_embeddings to have data_dir as input parameter --- tangled_adjudicate/utils/find_hardware_embeddings.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tangled_adjudicate/utils/find_hardware_embeddings.py b/tangled_adjudicate/utils/find_hardware_embeddings.py index 5f29aa3..b27eea9 100644 --- a/tangled_adjudicate/utils/find_hardware_embeddings.py +++ b/tangled_adjudicate/utils/find_hardware_embeddings.py @@ -150,7 +150,7 @@ def raster_embedding_search(hardware_graph, subgraph, raster_breadth=2, delete_u return embmat -def get_embeddings(source_graph_number, qc_solver_to_use): +def get_embeddings(source_graph_number, qc_solver_to_use, data_dir): # generates multiple parallel embeddings into hardware for your graph # the smaller the graph, the longer this takes -- e.g. source_graph_number == 1 takes about 4 minutes # @@ -166,9 +166,7 @@ def get_embeddings(source_graph_number, qc_solver_to_use): file_name = ('embeddings_graph_number_' + str(source_graph_number) + '_raster_breadth_' + str(raster_breadth) + '_gridsize_' + str(grid_size) + '_qc_' + qc_solver_to_use + '.pkl') - data_dir = os.path.join(os.getcwd(), '..', 'data') # checks to see if /data exists; if not, creates it - - if not os.path.isdir(data_dir): + if not os.path.isdir(data_dir): # checks to see if /data exists; if not, creates it os.mkdir(data_dir) file_path = os.path.join(data_dir, file_name) From 3bdd1b8676a855ab7bbf860db8ad15066255e1c1 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Wed, 29 Jan 2025 11:21:43 -0800 Subject: [PATCH 04/40] added self.j_map to base Adjudicator __init__ and use it in _game_state_to_ising to get correct J values --- tangled_adjudicate/adjudicators/adjudicator.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tangled_adjudicate/adjudicators/adjudicator.py b/tangled_adjudicate/adjudicators/adjudicator.py index 6985089..1556ddd 100644 --- a/tangled_adjudicate/adjudicators/adjudicator.py +++ b/tangled_adjudicate/adjudicators/adjudicator.py @@ -32,6 +32,10 @@ class Adjudicator(ABC): def __init__(self) -> None: """Initialize base adjudicator.""" self._parameters: Dict[str, Any] = {} + self.j_map = {0: 0.0, # edge (i, j) uncolored , J_ij=0 + 1: 0.0, # edge (i, j) colored gray, J_ij=0 + 2: -1.0, # edge (i, j) colored green, FM coupling, J_ij=-1.0 + 3: 1.0} # edge (i, j) colored purple, AFM coupling, J_ij=+1.0 @abstractmethod def setup(self, **kwargs) -> None: @@ -77,10 +81,10 @@ def _game_state_to_ising(self, game_state: GameState) -> IsingModel: j = {} for edge in game_state['edges']: - v1, v2, weight = edge + v1, v2, edge_label = edge if v1 > v2: v1, v2 = v2, v1 - j[(v1, v2)] = float(weight) + j[(v1, v2)] = float(self.j_map[edge_label]) return IsingModel(h=h, j=j) From 71d861a0e54926ee6afb14ea9d17789308503545 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Wed, 29 Jan 2025 11:46:43 -0800 Subject: [PATCH 05/40] changed to compare old and new; quantum_annealing not added yet --- .../utils/how_to_adjudicate_states.py | 108 +++++++++++++++--- 1 file changed, 90 insertions(+), 18 deletions(-) diff --git a/tangled_adjudicate/utils/how_to_adjudicate_states.py b/tangled_adjudicate/utils/how_to_adjudicate_states.py index 942671b..da5d02a 100644 --- a/tangled_adjudicate/utils/how_to_adjudicate_states.py +++ b/tangled_adjudicate/utils/how_to_adjudicate_states.py @@ -6,7 +6,13 @@ import time import numpy as np -from tangled_adjudicate.adjudicators.adjudicate import Adjudicator +from tangled_adjudicate.adjudicators.adjudicate import old_Adjudicator + +from tangled_adjudicate.adjudicators.simulated_annealing import SimulatedAnnealingAdjudicator +from tangled_adjudicate.adjudicators.quantum_annealing import QuantumAnnealingAdjudicator +from tangled_adjudicate.adjudicators.lookup_table import LookupTableAdjudicator +from tangled_adjudicate.adjudicators.schrodinger import SchrodingerEquationAdjudicator + from tangled_adjudicate.utils.parameters import Params from tangled_adjudicate.utils.game_graph_properties import GraphProperties from tangled_adjudicate.utils.generate_terminal_states import convert_state_string_to_game_state @@ -17,14 +23,18 @@ def main(): # there are two example_game_state dictionaries provided, which are terminal states in graph_number 2 and 3 # respectively, that are of the sort that are closest to the draw line at score = +- 1/2 - # solver_list = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing', 'look_up'] - solver_list = ['simulated_annealing', 'lookup_table'] + # solver_list = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing', 'lookup_table'] + solver_list = ['simulated_annealing', 'schrodinger_equation', 'lookup_table'] precision_digits = 4 # just to clean up print output np.set_printoptions(suppress=True) # remove scientific notation params = Params() - adjudicator = Adjudicator(params) + old_adjudicator = old_Adjudicator(params) + + args = {'data_dir': os.path.join(os.getcwd(), '..', 'data'), + 'graph_number': params.GRAPH_NUMBER, + 'solver_name': params.QC_SOLVER_TO_USE} example_game_state = None @@ -49,35 +59,97 @@ def main(): print('this introduction only has included game states for graphs 2 and 3. If you want a different' 'graph please add a new example_game_state here!') + # if 'simulated_annealing' in solver_list: + # sa_adjudicator = SimulatedAnnealingAdjudicator() + # sa_adjudicator.setup() + # start = time.time() + # new_sa_results = sa_adjudicator.adjudicate(example_game_state) + # print('elapsed time for simulated_annealing was', round(time.time() - start, precision_digits), 'seconds.') + # + # if 'quantum_annealing' in solver_list: + # qa_adjudicator = QuantumAnnealingAdjudicator() + # qa_adjudicator.setup(**args) + # new_qa_results = qa_adjudicator.adjudicate(example_game_state) + # + # if 'lookup_table' in solver_list: + # lt_adjudicator = LookupTableAdjudicator() + # lt_adjudicator.setup(**args) + # new_lt_results = lt_adjudicator.adjudicate(example_game_state) + # + # if 'schrodinger_equation' in solver_list: + # se_adjudicator = SchrodingerEquationAdjudicator() + # se_adjudicator.setup() + # new_se_results = se_adjudicator.adjudicate(example_game_state) + for solver_to_use in solver_list: start = time.time() # equivalent to e.g. results = adjudicator.simulated_annealing(example_game_state) - results = getattr(adjudicator, solver_to_use)(example_game_state) + old_results = getattr(old_adjudicator, solver_to_use)(example_game_state) + + print('elapsed time for old', solver_to_use, 'was', round(time.time() - start, precision_digits), 'seconds.') + + start = time.time() - print('elapsed time for', solver_to_use, 'was', round(time.time() - start, precision_digits), 'seconds.') + adjudicator = None - if results['correlation_matrix'] is None: - print('correlation matrix:', None) + if solver_to_use == 'simulated_annealing': + adjudicator = SimulatedAnnealingAdjudicator() else: - print('correlation matrix:') - print(np.round(results['correlation_matrix'], precision_digits)) + if solver_to_use == 'quantum_annealing': + adjudicator = QuantumAnnealingAdjudicator() + else: + if solver_to_use == 'lookup_table': + adjudicator = LookupTableAdjudicator() + else: + if solver_to_use == 'schrodinger_equation': + adjudicator = SchrodingerEquationAdjudicator() + + adjudicator.setup(**args) + new_results = adjudicator.adjudicate(example_game_state) + + print('elapsed time for new', solver_to_use, 'was', round(time.time() - start, precision_digits), 'seconds.') + + if old_results['correlation_matrix'] is None: + print('old correlation matrix:', None) + else: + print('old correlation matrix:') + print(np.round(old_results['correlation_matrix'], precision_digits)) + + if new_results['correlation_matrix'] is None: + print('new correlation matrix:', None) + else: + print('new correlation matrix:') + print(np.round(new_results['correlation_matrix'], precision_digits)) + + print('old winner:', old_results['winner']) + print('new winner:', new_results['winner']) - print('winner:', results['winner']) + if old_results['score'] is None: + print('old score:', old_results['score']) + else: + print('old score:', round(old_results['score'], precision_digits)) + + if new_results['score'] is None: + print('new score:', new_results['score']) + else: + print('new score:', round(new_results['score'], precision_digits)) - if results['score'] is None: - print('score:', results['score']) + if old_results['influence_vector'] is None: + print('old influence vector:', None) else: - print('score:', round(results['score'], precision_digits)) + print('old influence vector:', [round(old_results['influence_vector'][k], precision_digits) + for k in range(len(old_results['influence_vector']))]) - if results['influence_vector'] is None: - print('influence vector:', None) + if new_results['influence_vector'] is None: + print('new influence vector:', None) else: - print('influence vector:', [round(results['influence_vector'][k], precision_digits) - for k in range(len(results['influence_vector']))]) + print('new influence vector:', [round(new_results['influence_vector'][k], precision_digits) + for k in range(len(new_results['influence_vector']))]) print() + print() if __name__ == "__main__": From 07c0fa55ef379f7c38f73e614783bfd27d8ca76d Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Wed, 29 Jan 2025 13:02:33 -0800 Subject: [PATCH 06/40] added 'data_dir' kwarg requirement, and requirement to always load/compute automorphisms & embeddings --- .../adjudicators/quantum_annealing.py | 34 +++++++++++++------ 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/tangled_adjudicate/adjudicators/quantum_annealing.py b/tangled_adjudicate/adjudicators/quantum_annealing.py index 0728021..1d58b5a 100644 --- a/tangled_adjudicate/adjudicators/quantum_annealing.py +++ b/tangled_adjudicate/adjudicators/quantum_annealing.py @@ -1,3 +1,4 @@ +import os from typing import Dict, Any, List, Optional import numpy as np from dataclasses import dataclass @@ -8,6 +9,7 @@ from ..utils.find_hardware_embeddings import get_embeddings from .adjudicator import Adjudicator, GameState, AdjudicationResult + @dataclass class QAParameters: """Parameters for quantum annealing.""" @@ -21,6 +23,8 @@ class QAParameters: use_mock: bool = False solver_name: Optional[str] = None graph_number: Optional[int] = None + data_dir: Optional[str] = None + class QuantumAnnealingAdjudicator(Adjudicator): """Adjudicator implementation using D-Wave quantum annealing.""" @@ -70,13 +74,23 @@ def setup(self, **kwargs) -> None: raise ValueError("shim_iterations must be positive") if self.params.alpha_phi <= 0 or self.params.alpha_phi > 1: raise ValueError("alpha_phi must be in (0, 1]") - - # Get graph-specific data if graph number provided - if self.params.graph_number is not None: - self.automorphisms = get_automorphisms(self.params.graph_number) - self.embeddings = get_embeddings( - self.params.graph_number, - self.params.solver_name + + # load directory for automorphisms & embeddings + if 'data_dir' in kwargs: + if not isinstance(kwargs['data_dir'], str): + raise ValueError("data_dir must be a string") + if not os.path.isdir(kwargs['data_dir']): + raise ValueError(f"Directory not found: {kwargs['data_dir']}") + self.params.data_dir = kwargs['data_dir'] + + self._parameters = {'data_dir': self.params.data_dir} + + # we need these so always compute / load in + self.automorphisms = get_automorphisms(self.params.graph_number, self.params.data_dir) + self.embeddings = get_embeddings( + self.params.graph_number, + self.params.solver_name, + self.params.data_dir ) # Initialize sampler @@ -234,8 +248,8 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: total_samples[:, vertex] = random_samples[:, i] # Calculate correlation matrix - sample_count = (self.params.num_reads * num_embeddings * - self.params.num_chip_runs) + sample_count = (self.params.num_reads * num_embeddings * self.params.num_chip_runs) + correlation_matrix = ( np.einsum('si,sj->ij', total_samples, total_samples) / sample_count - np.eye(num_vertices) @@ -254,4 +268,4 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: influence_vector=influence_vector, correlation_matrix=correlation_matrix, parameters=self._parameters - ) \ No newline at end of file + ) From 86047af3a4b1bf0108ea87d1406649a16fdb0710 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Wed, 29 Jan 2025 13:17:35 -0800 Subject: [PATCH 07/40] PEP --- tangled_adjudicate/adjudicators/adjudicator.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tangled_adjudicate/adjudicators/adjudicator.py b/tangled_adjudicate/adjudicators/adjudicator.py index 1556ddd..87379f4 100644 --- a/tangled_adjudicate/adjudicators/adjudicator.py +++ b/tangled_adjudicate/adjudicators/adjudicator.py @@ -3,6 +3,7 @@ import numpy as np import numpy.typing as npt + class GameState(TypedDict): num_nodes: int edges: List[Tuple[int, int, int]] # (node1, node2, weight) @@ -13,6 +14,7 @@ class GameState(TypedDict): player1_node: Optional[int] player2_node: Optional[int] + class AdjudicationResult(TypedDict): game_state: GameState adjudicator: str @@ -22,10 +24,12 @@ class AdjudicationResult(TypedDict): correlation_matrix: Optional[npt.NDArray[np.float64]] parameters: Dict[str, Union[str, int, float, bool]] + class IsingModel(TypedDict): h: Dict[int, float] # Local fields j: Dict[Tuple[int, int], float] # Coupling strengths + class Adjudicator(ABC): """Base interface for game state adjudication implementations.""" @@ -136,4 +140,4 @@ def _compute_winner_score_and_influence( else: winner = 'draw' - return winner, score, influence_vector \ No newline at end of file + return winner, score, influence_vector From d5b363d622e30c4404bbbe19ed183e1b2e00e76d Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Wed, 29 Jan 2025 15:32:31 -0800 Subject: [PATCH 08/40] temporarily set USE_QC and USE_MOCK_DWAVE_SAMPLER to True for testing --- tangled_adjudicate/utils/parameters.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tangled_adjudicate/utils/parameters.py b/tangled_adjudicate/utils/parameters.py index dce9240..2361743 100644 --- a/tangled_adjudicate/utils/parameters.py +++ b/tangled_adjudicate/utils/parameters.py @@ -19,8 +19,8 @@ def __init__(self): # The defaults here are no shimming, no gauge transforms, only use M=1 automorphism, and collect a lot of # samples (N=1000) - self.USE_QC = False # set to False if you just want to use e.g. simulated annealer - self.USE_MOCK_DWAVE_SAMPLER = False # set to True if you want a software version of the hardware (doesn't sample like the HW tho so don't trust it, just for debugging) + self.USE_QC = True # set to False if you just want to use e.g. simulated annealer + self.USE_MOCK_DWAVE_SAMPLER = True # set to True if you want a software version of the hardware (doesn't sample like the HW tho so don't trust it, just for debugging) self.QC_SOLVER_TO_USE = 'Advantage2_prototype2.6' # modify if you want to use a different QC self.NUMBER_OF_CHIP_RUNS = 1 # this is M From 89392c3f64e3a5cc8c21fcc3c20f0a529190d95f Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Wed, 29 Jan 2025 15:35:08 -0800 Subject: [PATCH 09/40] added self.data_dir to __init__, changed variable names to be the same as the new thing --- tangled_adjudicate/adjudicators/adjudicate.py | 57 ++++++++++--------- 1 file changed, 29 insertions(+), 28 deletions(-) diff --git a/tangled_adjudicate/adjudicators/adjudicate.py b/tangled_adjudicate/adjudicators/adjudicate.py index 1f04785..73f5263 100644 --- a/tangled_adjudicate/adjudicators/adjudicate.py +++ b/tangled_adjudicate/adjudicators/adjudicate.py @@ -23,9 +23,10 @@ class old_Adjudicator(object): def __init__(self, params): self.params = params self.results_dict = None + self.data_dir = os.path.join(os.getcwd(), '..', 'data') if self.params.USE_QC: # if using QC, get embeddings and automorphisms - self.automorphisms = get_automorphisms(self.params.GRAPH_NUMBER) - self.embeddings = get_embeddings(self.params.GRAPH_NUMBER, self.params.QC_SOLVER_TO_USE) + self.automorphisms = get_automorphisms(self.params.GRAPH_NUMBER, self.data_dir) + self.embeddings = get_embeddings(self.params.GRAPH_NUMBER, self.params.QC_SOLVER_TO_USE, self.data_dir) def compute_winner_score_and_influence_from_correlation_matrix(self, game_state, correlation_matrix): # correlation_matrix is assumed to be symmetric matrix with zeros on diagonal (so that self-correlation of @@ -134,10 +135,10 @@ def schrodinger_equation(self, game_state): def quantum_annealing(self, game_state): - number_of_embeddings = len(self.embeddings) # e.g. P=343 - number_of_problem_variables = game_state['num_nodes'] # e.g. 3 + num_vertices = game_state['num_nodes'] # e.g. 3 + num_embeddings = len(self.embeddings) # e.g. P=343 + total_samples = np.zeros((1, num_vertices)) # 0th layer to get vstack going, remove at the end - samples = np.zeros((1, number_of_problem_variables)) # 0th layer to get vstack going, remove at the end shim_stats = None all_samples = None indices_of_flips = None @@ -180,14 +181,14 @@ def quantum_annealing(self, game_state): # this finds any isolated vertices that may be in the graph -- we will replace the samples returned for these # at the end with true 50/50 statistics, so we don't have to worry about them - isolated_vertices = find_isolated_vertices(number_of_problem_variables, base_jay) + isolated_vertices = find_isolated_vertices(num_vertices, base_jay) # We now enter a loop where each pass through the loop programs the chip to specific values of h and J but # now for the entire chip. We do this by first selecting one automorphism and embedding it in multiple # parallel ways across the entire chip, and then optionally applying a gauge transform across all the qubits # used. This latter process chooses different random gauges for each of the embedded instances. - for chip_run_idx in range(self.params.NUMBER_OF_CHIP_RUNS): + for _ in range(self.params.NUMBER_OF_CHIP_RUNS): # ******************************************************************* # Step 1: Randomly select an automorphism and embed it multiple times @@ -198,9 +199,9 @@ def quantum_annealing(self, game_state): permuted_embedding = [] - for each_embedding in self.embeddings[:number_of_embeddings]: # each_embedding is like [1093, 1098, 136]; 343 of these for three-vertex graph + for each_embedding in self.embeddings[:num_embeddings]: # each_embedding is like [1093, 1098, 136]; 343 of these for three-vertex graph this_embedding = [] - for each_vertex in range(number_of_problem_variables): # each_vertex ranges from 0 to 2 + for each_vertex in range(num_vertices): # each_vertex ranges from 0 to 2 this_embedding.append(each_embedding[inverted_automorphism_to_use[each_vertex]]) permuted_embedding.append(this_embedding) @@ -209,9 +210,9 @@ def quantum_annealing(self, game_state): embedding_to_use = {} - for embedding_idx in range(number_of_embeddings): - for each_vertex in range(number_of_problem_variables): # up to 0..1037 - embedding_to_use[number_of_problem_variables * embedding_idx + each_vertex] = \ + for embedding_idx in range(num_embeddings): + for each_vertex in range(num_vertices): # up to 0..1037 + embedding_to_use[num_vertices * embedding_idx + each_vertex] = \ [permuted_embedding[embedding_idx][each_vertex]] # ***************************************************************************************************** @@ -226,17 +227,17 @@ def quantum_annealing(self, game_state): full_h = {} full_j = {} - for embedding_idx in range(number_of_embeddings): - for each_vertex in range(number_of_problem_variables): - full_h[number_of_problem_variables * embedding_idx + each_vertex] = 0 + for embedding_idx in range(num_embeddings): + for each_vertex in range(num_vertices): + full_h[num_vertices * embedding_idx + each_vertex] = 0 for k, v in base_jay.items(): edge_under_automorph = (min(automorphism_to_use[k[0]], automorphism_to_use[k[1]]), max(automorphism_to_use[k[0]], automorphism_to_use[k[1]])) full_j[edge_under_automorph] = v - for j in range(1, number_of_embeddings): - full_j[(edge_under_automorph[0] + number_of_problem_variables * j, - edge_under_automorph[1] + number_of_problem_variables * j)] = v + for j in range(1, num_embeddings): + full_j[(edge_under_automorph[0] + num_vertices * j, + edge_under_automorph[1] + num_vertices * j)] = v # ************************************************************************** # Step 3: Choose random gauge, modify h, J parameters for full chip using it @@ -311,11 +312,11 @@ def quantum_annealing(self, game_state): # *********************************** # this should make a big fat stack of the results in BLUE variable ordering - all_samples_processed_blue = all_samples[:, range(number_of_problem_variables)] - for k in range(1, number_of_embeddings): + all_samples_processed_blue = all_samples[:, range(num_vertices)] + for k in range(1, num_embeddings): all_samples_processed_blue = np.vstack((all_samples_processed_blue, - all_samples[:, range(number_of_problem_variables * k, - number_of_problem_variables * (k + 1))])) + all_samples[:, range(num_vertices * k, + num_vertices * (k + 1))])) # ********************************************************************** # Step 9: Reorder columns to make them BLACK order instead of BLUE order @@ -327,23 +328,23 @@ def quantum_annealing(self, game_state): # Step 10: Add new samples to the stack, all in BLACK order # ********************************************************* - samples = np.vstack((samples, all_samples_processed_black)) + total_samples = np.vstack((total_samples, all_samples_processed_black)) # *************************************************************** # Step 11: Post process samples stack to extract return variables # *************************************************************** - samples = np.delete(samples, (0), axis=0) # delete first row of zeros + total_samples = np.delete(total_samples, (0), axis=0) # delete first row of zeros # replace columns where there are disconnected variables with truly random samples for idx in isolated_vertices: - samples[:, idx] = np.random.choice([1, -1], size=samples.shape[0]) + total_samples[:, idx] = np.random.choice([1, -1], size=total_samples.shape[0]) - sample_count = self.params.NUM_READS_QC * number_of_embeddings * self.params.NUMBER_OF_CHIP_RUNS + sample_count = self.params.NUM_READS_QC * num_embeddings * self.params.NUMBER_OF_CHIP_RUNS # this is a full matrix with zeros on the diagonal that uses all the samples correlation_matrix = \ - (np.einsum('si,sj->ij', samples, samples) / sample_count - + (np.einsum('si,sj->ij', total_samples, total_samples) / sample_count - np.eye(int(game_state['num_nodes']))) winner, score_difference, influence_vector = ( @@ -383,4 +384,4 @@ def lookup_table(self, game_state): 'winner': winner, 'score': None, 'influence_vector': None, 'correlation_matrix': None, 'parameters': self.params} - return return_dictionary \ No newline at end of file + return return_dictionary From ee97af56c0abbc78450f412df86c827aa0720fef Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Wed, 29 Jan 2025 15:35:35 -0800 Subject: [PATCH 10/40] just quantum_annealing to test --- tangled_adjudicate/utils/how_to_adjudicate_states.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tangled_adjudicate/utils/how_to_adjudicate_states.py b/tangled_adjudicate/utils/how_to_adjudicate_states.py index da5d02a..f209542 100644 --- a/tangled_adjudicate/utils/how_to_adjudicate_states.py +++ b/tangled_adjudicate/utils/how_to_adjudicate_states.py @@ -24,7 +24,7 @@ def main(): # respectively, that are of the sort that are closest to the draw line at score = +- 1/2 # solver_list = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing', 'lookup_table'] - solver_list = ['simulated_annealing', 'schrodinger_equation', 'lookup_table'] + solver_list = ['quantum_annealing'] precision_digits = 4 # just to clean up print output np.set_printoptions(suppress=True) # remove scientific notation From 1469ff949eeefd74d348a75de6037c5775be7b2b Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Wed, 29 Jan 2025 15:36:28 -0800 Subject: [PATCH 11/40] Work in Progress -- going through carefully --- .../adjudicators/quantum_annealing.py | 57 +++++++++++-------- 1 file changed, 34 insertions(+), 23 deletions(-) diff --git a/tangled_adjudicate/adjudicators/quantum_annealing.py b/tangled_adjudicate/adjudicators/quantum_annealing.py index 1d58b5a..5406f94 100644 --- a/tangled_adjudicate/adjudicators/quantum_annealing.py +++ b/tangled_adjudicate/adjudicators/quantum_annealing.py @@ -20,7 +20,7 @@ class QAParameters: use_shim: bool = False shim_iterations: int = 1 alpha_phi: float = 0.1 - use_mock: bool = False + use_mock: bool = True solver_name: Optional[str] = None graph_number: Optional[int] = None data_dir: Optional[str] = None @@ -35,7 +35,8 @@ def __init__(self) -> None: self.params = QAParameters() self.embeddings: List[List[int]] = [] self.automorphisms: List[Dict[int, int]] = [] - self.sampler: Optional[FixedEmbeddingComposite] = None + self.shim_stats: Dict[str] = {} + # self.sampler: Optional[FixedEmbeddingComposite] = None def setup(self, **kwargs) -> None: """Configure quantum annealing parameters and initialize D-Wave connection. @@ -83,23 +84,16 @@ def setup(self, **kwargs) -> None: raise ValueError(f"Directory not found: {kwargs['data_dir']}") self.params.data_dir = kwargs['data_dir'] - self._parameters = {'data_dir': self.params.data_dir} + # self._parameters = {'data_dir': self.params.data_dir} # we need these so always compute / load in self.automorphisms = get_automorphisms(self.params.graph_number, self.params.data_dir) - self.embeddings = get_embeddings( - self.params.graph_number, - self.params.solver_name, - self.params.data_dir - ) + self.embeddings = get_embeddings(self.params.graph_number, self.params.solver_name, self.params.data_dir) # Initialize sampler try: if self.params.use_mock: - base_sampler = MockDWaveSampler( - topology_type='zephyr', - topology_shape=[6, 4] - ) + base_sampler = MockDWaveSampler(topology_type='zephyr', topology_shape=[6, 4]) else: base_sampler = DWaveSampler(solver=self.params.solver_name) @@ -108,7 +102,13 @@ def setup(self, **kwargs) -> None: except Exception as e: raise RuntimeError(f"Failed to initialize D-Wave sampler: {str(e)}") - + + # initialize shim_stats if required + if self.params.use_shim: + self.shim_stats = {'qubit_magnetizations': [], + 'average_absolute_value_of_magnetization': [], + 'all_flux_bias_offsets': []} + # Store parameters self._parameters = self.params.__dict__ @@ -177,7 +177,13 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: num_vertices = game_state['num_nodes'] num_embeddings = len(self.embeddings) total_samples = np.zeros((1, num_vertices)) # Initial array for stacking - + + all_samples = None + indices_of_flips = None + + if self.params.use_mock and self.params.use_shim: + print('D-Wave mock sampler is not set up to use the shimming process, turn shim off if using mock!') + # Process each chip run for _ in range(self.params.num_chip_runs): # Select random automorphism @@ -185,36 +191,41 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: embedding_map = self._process_embedding(game_state, automorphism) # Create sampler with fixed embedding - sampler = FixedEmbeddingComposite( - self._base_sampler, - embedding=embedding_map - ) + sampler = FixedEmbeddingComposite(self._base_sampler, embedding=embedding_map) # Get Ising model ising_model = self._game_state_to_ising(game_state) # Set up sampling parameters - sample_kwargs = { + sampler_kwargs = { 'num_reads': self.params.num_reads, 'answer_mode': 'raw' } if not self.params.use_mock: - sample_kwargs.update({ + sampler_kwargs.update({ + 'fast_anneal': True, 'annealing_time': self.params.anneal_time / 1000, 'auto_scale': False }) - + + if self.params.use_shim: + sampler_kwargs.update({'readout_thermalization': 100., + 'auto_scale': False, + 'flux_drift_compensation': True, + 'flux_biases': [0] * base_sampler.properties['num_qubits']}) + # Perform sampling response = sampler.sample_ising( ising_model['h'], ising_model['j'], - **sample_kwargs + **sampler_kwargs ) # Process samples samples = np.array(response.record.sample) - + + # todo this is in the wrong order # Apply gauge transform if enabled if self.params.use_gauge_transform: flip_indices = np.random.choice( From d9a0f5beca4667e37a9bb0de6d9e2c4600ef4b35 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Thu, 30 Jan 2025 10:32:05 -0800 Subject: [PATCH 12/40] chnaged variable names to match erik --- tangled_adjudicate/adjudicators/adjudicate.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tangled_adjudicate/adjudicators/adjudicate.py b/tangled_adjudicate/adjudicators/adjudicate.py index 73f5263..3aec01f 100644 --- a/tangled_adjudicate/adjudicators/adjudicate.py +++ b/tangled_adjudicate/adjudicators/adjudicate.py @@ -194,8 +194,8 @@ def quantum_annealing(self, game_state): # Step 1: Randomly select an automorphism and embed it multiple times # ******************************************************************* - automorphism_to_use = random.choice(self.automorphisms) # eg {0:0, 1:2, 2:1} - inverted_automorphism_to_use = {v: k for k, v in automorphism_to_use.items()} # swaps key <-> values + automorphism = random.choice(self.automorphisms) # eg {0:0, 1:2, 2:1} + inverted_automorphism_to_use = {v: k for k, v in automorphism.items()} # swaps key <-> values permuted_embedding = [] @@ -208,11 +208,11 @@ def quantum_annealing(self, game_state): # given that permuted_embedding looks like [[1229, 1235, 563], [872, 242, 866], ...] # this next part converts into the format {0: [1229], 1: [1235], 2: [563], 3: [872], 4: [242], 5: [866]} - embedding_to_use = {} + embedding_map = {} for embedding_idx in range(num_embeddings): for each_vertex in range(num_vertices): # up to 0..1037 - embedding_to_use[num_vertices * embedding_idx + each_vertex] = \ + embedding_map[num_vertices * embedding_idx + each_vertex] = \ [permuted_embedding[embedding_idx][each_vertex]] # ***************************************************************************************************** @@ -232,8 +232,8 @@ def quantum_annealing(self, game_state): full_h[num_vertices * embedding_idx + each_vertex] = 0 for k, v in base_jay.items(): - edge_under_automorph = (min(automorphism_to_use[k[0]], automorphism_to_use[k[1]]), - max(automorphism_to_use[k[0]], automorphism_to_use[k[1]])) + edge_under_automorph = (min(automorphism[k[0]], automorphism[k[1]]), + max(automorphism[k[0]], automorphism[k[1]])) full_j[edge_under_automorph] = v for j in range(1, num_embeddings): full_j[(edge_under_automorph[0] + num_vertices * j, @@ -260,7 +260,7 @@ def quantum_annealing(self, game_state): sampler_kwargs.update({'h': full_h, 'J': full_j}) - sampler = FixedEmbeddingComposite(base_sampler, embedding=embedding_to_use) # applies the embedding + sampler = FixedEmbeddingComposite(base_sampler, embedding=embedding_map) # applies the embedding # ************************************************************************* # Step 5: Optionally start shimming process in the BLUE with RED STAR basis @@ -286,7 +286,7 @@ def quantum_annealing(self, game_state): shim_stats['average_absolute_value_of_magnetization'].append(np.sum([abs(k) for k in magnetization])/len(magnetization)) qubit_magnetization = [0] * base_sampler.properties['num_qubits'] - for k, v in embedding_to_use.items(): + for k, v in embedding_map.items(): qubit_magnetization[v[0]] = magnetization[k] # check shim_stats['qubit_magnetizations'].append(qubit_magnetization) @@ -322,7 +322,7 @@ def quantum_annealing(self, game_state): # Step 9: Reorder columns to make them BLACK order instead of BLUE order # ********************************************************************** - all_samples_processed_black = all_samples_processed_blue[:, [automorphism_to_use[i] for i in range(all_samples_processed_blue.shape[1])]] + all_samples_processed_black = all_samples_processed_blue[:, [automorphism[i] for i in range(all_samples_processed_blue.shape[1])]] # ********************************************************* # Step 10: Add new samples to the stack, all in BLACK order From f7d765f5d14048b05eecadc793814160f9f7013e Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Thu, 30 Jan 2025 10:32:19 -0800 Subject: [PATCH 13/40] WIP --- .../adjudicators/quantum_annealing.py | 234 +++++++++++++++++- 1 file changed, 222 insertions(+), 12 deletions(-) diff --git a/tangled_adjudicate/adjudicators/quantum_annealing.py b/tangled_adjudicate/adjudicators/quantum_annealing.py index 5406f94..4577321 100644 --- a/tangled_adjudicate/adjudicators/quantum_annealing.py +++ b/tangled_adjudicate/adjudicators/quantum_annealing.py @@ -133,26 +133,41 @@ def _apply_gauge_transform( def _process_embedding( self, game_state: GameState, - automorphism: Dict[int, int] + automorphism: Dict[int, int], + num_embeddings: int ) -> Dict[int, List[int]]: """Process embedding with given automorphism. Args: game_state: Current game state automorphism: Graph automorphism to apply + num_embeddings: number of embeddings to use; default is all of them Returns: Processed embedding mapping """ - inverted_automorphism = {v: k for k, v in automorphism.items()} + num_vertices = game_state['num_nodes'] - + + inverted_automorphism_to_use = {v: k for k, v in automorphism.items()} # swaps key <-> values + + permuted_embedding = [] + + for each_embedding in self.embeddings[:num_embeddings]: # each_embedding is like [1093, 1098, 136]; 343 of these for three-vertex graph + this_embedding = [] + for each_vertex in range(num_vertices): # each_vertex ranges from 0 to 2 + this_embedding.append(each_embedding[inverted_automorphism_to_use[each_vertex]]) + permuted_embedding.append(this_embedding) + + # given that permuted_embedding looks like [[1229, 1235, 563], [872, 242, 866], ...] + # this next part converts into the format {0: [1229], 1: [1235], 2: [563], 3: [872], 4: [242], 5: [866]} + embedding_map = {} - for embedding_idx, embedding in enumerate(self.embeddings): - for vertex in range(num_vertices): - logical_idx = num_vertices * embedding_idx + vertex - physical_qubit = embedding[inverted_automorphism[vertex]] - embedding_map[logical_idx] = [physical_qubit] + + for embedding_idx in range(num_embeddings): + for each_vertex in range(num_vertices): # up to 0..1037 + embedding_map[num_vertices * embedding_idx + each_vertex] = \ + [permuted_embedding[embedding_idx][each_vertex]] return embedding_map @@ -181,15 +196,210 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: all_samples = None indices_of_flips = None + # set up sampler kwargs if self.params.use_mock and self.params.use_shim: print('D-Wave mock sampler is not set up to use the shimming process, turn shim off if using mock!') - # Process each chip run + sampler_kwargs = { + 'num_reads': self.params.num_reads, + 'answer_mode': 'raw' + } + + if self.params.use_mock: + base_sampler = MockDWaveSampler(topology_type='zephyr', topology_shape=[6, 4]) + else: + base_sampler = DWaveSampler(solver=self.params.QC_SOLVER_TO_USE) + sampler_kwargs.update({ + 'fast_anneal': True, + 'annealing_time': self.params.anneal_time / 1000, + 'auto_scale': False + }) + + if self.params.use_shim: + shim_stats = {'qubit_magnetizations': [], + 'average_absolute_value_of_magnetization': [], + 'all_flux_bias_offsets': []} + sampler_kwargs.update({'readout_thermalization': 100., + 'auto_scale': False, + 'flux_drift_compensation': True, + 'flux_biases': [0] * base_sampler.properties['num_qubits']}) + shim_iterations = self.params.shim_iterations + else: + shim_iterations = 1 # if we don't shim, just run through shim step only once + + # ********************************************************** + # Step 0: convert game_state to the desired base Ising model + # ********************************************************** + + # for tangled, h_j=0 for all vertices j in the game graph, and J_ij is one of +1, -1, or 0 for all vertex + # pairs i,j. I named the "base" values (the actual problem defined on the game graph we are asked to solve) + # base_ising_model. + + base_ising_model = self._game_state_to_ising(game_state) + + # this finds any isolated vertices that may be in the graph -- we will replace the samples returned for these + # at the end with true 50/50 statistics, so we don't have to worry about them + + isolated_vertices = self._find_isolated_vertices(game_state) + + # We now enter a loop where each pass through the loop programs the chip to specific values of h and J but + # now for the entire chip. We do this by first selecting one automorphism and embedding it in multiple + # parallel ways across the entire chip, and then optionally applying a gauge transform across all the qubits + # used. This latter process chooses different random gauges for each of the embedded instances. + for _ in range(self.params.num_chip_runs): - # Select random automorphism + + # ******************************************************************* + # Step 1: Randomly select an automorphism and embed it multiple times + # ******************************************************************* + automorphism = np.random.choice(self.automorphisms) - embedding_map = self._process_embedding(game_state, automorphism) - + embedding_map = self._process_embedding(game_state, automorphism, num_embeddings) + + # ***************************************************************************************************** + # Step 2: Set h, J parameters for full chip using parallel embeddings of a randomly chosen automorphism + # ***************************************************************************************************** + + # compute full_h and full_j which are h, jay values for the entire chip assuming the above automorphism + # I am calling the problem definition and variable ordering before the automorphism the BLACK or BASE + # situation. After the automorphism the problem definition and variable labels change -- I'm calling the + # situation after the automorphism has been applied the BLUE situation. + + full_h = {} + full_j = {} + + for embedding_idx in range(num_embeddings): + for each_vertex in range(num_vertices): + full_h[num_vertices * embedding_idx + each_vertex] = 0 + + for k, v in base_ising_model['j'].items(): # is this correct? + edge_under_automorph = (min(automorphism[k[0]], automorphism[k[1]]), + max(automorphism[k[0]], automorphism[k[1]])) + full_j[edge_under_automorph] = v + for j in range(1, num_embeddings): + full_j[(edge_under_automorph[0] + num_vertices * j, + edge_under_automorph[1] + num_vertices * j)] = v + + # ************************************************************************** + # Step 3: Choose random gauge, modify h, J parameters for full chip using it + # ************************************************************************** + + # next we optionally apply a random gauge transformation. I call the situation after the gauge + # transformation has been applied the BLUE with RED STAR situation. + + if self.params.use_gauge_transform: + flip_map = [random.choice([-1, 1]) for _ in full_h] # random list of +1, -1 values of len # qubits + indices_of_flips = [i for i, x in enumerate(flip_map) if x == -1] # the indices of the -1 values + + for edge_key, j_val in full_j.items(): # for each edge and associated J value + full_j[edge_key] = j_val * flip_map[edge_key[0]] * flip_map[edge_key[1]] # Jij -> J_ij g_i g_j + + # ***************************************** + # Step 4: Choose sampler and its parameters + # ***************************************** + + sampler_kwargs.update({'h': full_h, + 'J': full_j}) + + sampler = FixedEmbeddingComposite(base_sampler, embedding=embedding_map) # applies the embedding + + # ************************************************************************* + # Step 5: Optionally start shimming process in the BLUE with RED STAR basis + # ************************************************************************* + + # all of this in the BLUE with RED STAR basis, ie post automorph, post gauge transform + for shim_iteration_idx in range(shim_iterations): + + # ************************************** + # Step 6: Generate samples from hardware + # ************************************** + + ss = sampler.sample_ising(**sampler_kwargs) + all_samples = ss.record.sample + + if self.params.use_shim: + + # ************************************************************* + # Step 6a: Compute average values of each qubit == magnetization + # ************************************************************* + + magnetization = np.sum(all_samples, axis=0)/self.params.NUM_READS_QC # BLUE with RED STAR label ordering + shim_stats['average_absolute_value_of_magnetization'].append(np.sum([abs(k) for k in magnetization])/len(magnetization)) + + qubit_magnetization = [0] * base_sampler.properties['num_qubits'] + for k, v in embedding_map.items(): + qubit_magnetization[v[0]] = magnetization[k] # check + + shim_stats['qubit_magnetizations'].append(qubit_magnetization) + + # ************************************** + # Step 6b: Adjust flux bias offset terms + # ************************************** + + for k in range(base_sampler.properties['num_qubits']): + sampler_kwargs['flux_biases'][k] -= self.params.ALPHA_PHI * qubit_magnetization[k] + + shim_stats['all_flux_bias_offsets'].append(sampler_kwargs['flux_biases']) + + # ***************************************************************************************************** + # Step 7: Reverse gauge transform, from BLUE with RED STAR to just BLUE, after shimming process is done + # ***************************************************************************************************** + + if self.params.use_gauge_transform: + all_samples[:, indices_of_flips] = -all_samples[:, indices_of_flips] + + # *********************************** + # Step 8: Stack samples in BLUE order + # *********************************** + + # this should make a big fat stack of the results in BLUE variable ordering + all_samples_processed_blue = all_samples[:, range(num_vertices)] + for k in range(1, num_embeddings): + all_samples_processed_blue = np.vstack((all_samples_processed_blue, + all_samples[:, range(num_vertices * k, + num_vertices * (k + 1))])) + + # ********************************************************************** + # Step 9: Reorder columns to make them BLACK order instead of BLUE order + # ********************************************************************** + + all_samples_processed_black = all_samples_processed_blue[:, [automorphism[i] for i in range(all_samples_processed_blue.shape[1])]] + + # ********************************************************* + # Step 10: Add new samples to the stack, all in BLACK order + # ********************************************************* + + total_samples = np.vstack((total_samples, all_samples_processed_black)) + + # *************************************************************** + # Step 11: Post process samples stack to extract return variables + # *************************************************************** + + total_samples = np.delete(total_samples, (0), axis=0) # delete first row of zeros + + # replace columns where there are disconnected variables with truly random samples + for idx in isolated_vertices: + total_samples[:, idx] = np.random.choice([1, -1], size=total_samples.shape[0]) + + sample_count = self.params.NUM_READS_QC * num_embeddings * self.params.NUMBER_OF_CHIP_RUNS + + # this is a full matrix with zeros on the diagonal that uses all the samples + correlation_matrix = \ + (np.einsum('si,sj->ij', total_samples, total_samples) / sample_count - + np.eye(int(game_state['num_nodes']))) + + winner, score_difference, influence_vector = ( + self.compute_winner_score_and_influence_from_correlation_matrix(game_state, correlation_matrix)) + + # todo make this compatible with output of erik's version + return_dictionary = {'game_state': game_state, 'adjudicator': 'quantum_annealing', + 'winner': winner, 'score': score_difference, 'influence_vector': influence_vector, + 'correlation_matrix': correlation_matrix, 'parameters': self.params} + + return return_dictionary + + + # Create sampler with fixed embedding sampler = FixedEmbeddingComposite(self._base_sampler, embedding=embedding_map) From 5ef74981445b0bde327baf8bf90e705bf8fb34e1 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Fri, 31 Jan 2025 12:22:17 -0800 Subject: [PATCH 14/40] fixed epsilon value --- tangled_adjudicate/adjudicators/adjudicator.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tangled_adjudicate/adjudicators/adjudicator.py b/tangled_adjudicate/adjudicators/adjudicator.py index 87379f4..7757d1f 100644 --- a/tangled_adjudicate/adjudicators/adjudicator.py +++ b/tangled_adjudicate/adjudicators/adjudicator.py @@ -113,7 +113,7 @@ def _compute_winner_score_and_influence( self, game_state: GameState, correlation_matrix: npt.NDArray[np.float64], - epsilon: float = 1e-6 + epsilon: float = 0.5 ) -> Tuple[Optional[str], Optional[float], npt.NDArray[np.float64]]: """Compute winner, score and influence from correlation matrix.""" if not isinstance(correlation_matrix, np.ndarray): @@ -130,8 +130,7 @@ def _compute_winner_score_and_influence( if game_state['player1_node'] is None or game_state['player2_node'] is None: return None, None, influence_vector - score = (influence_vector[game_state['player1_node']] - - influence_vector[game_state['player2_node']]) + score = influence_vector[game_state['player1_node']] - influence_vector[game_state['player2_node']] if score > epsilon: winner = 'red' From 67423c9953a0b98eb0db9062718a639209ff9782 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Fri, 31 Jan 2025 12:28:19 -0800 Subject: [PATCH 15/40] comment and PEP changes --- tangled_adjudicate/adjudicators/adjudicator.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tangled_adjudicate/adjudicators/adjudicator.py b/tangled_adjudicate/adjudicators/adjudicator.py index 7757d1f..0063b8f 100644 --- a/tangled_adjudicate/adjudicators/adjudicator.py +++ b/tangled_adjudicate/adjudicators/adjudicator.py @@ -6,7 +6,7 @@ class GameState(TypedDict): num_nodes: int - edges: List[Tuple[int, int, int]] # (node1, node2, weight) + edges: List[Tuple[int, int, int]] # (node1, node2, edge_label=0,1,2,3) player1_id: str player2_id: str turn_count: int @@ -68,8 +68,7 @@ def _validate_game_state(self, game_state: GameState) -> None: for edge in game_state['edges']: if len(edge) != 3: raise ValueError(f"Invalid edge format: {edge}") - if not (0 <= edge[0] < game_state['num_nodes'] and - 0 <= edge[1] < game_state['num_nodes']): + if not (0 <= edge[0] < game_state['num_nodes'] and 0 <= edge[1] < game_state['num_nodes']): raise ValueError(f"Edge vertices out of range: {edge}") def _game_state_to_ising(self, game_state: GameState) -> IsingModel: From 9c2c30f905758eb58422ff6252e789849c3441ee Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Fri, 31 Jan 2025 12:29:59 -0800 Subject: [PATCH 16/40] PEP --- tangled_adjudicate/adjudicators/lookup_table.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tangled_adjudicate/adjudicators/lookup_table.py b/tangled_adjudicate/adjudicators/lookup_table.py index 994c3f3..de2a49b 100644 --- a/tangled_adjudicate/adjudicators/lookup_table.py +++ b/tangled_adjudicate/adjudicators/lookup_table.py @@ -10,6 +10,7 @@ ) from .adjudicator import Adjudicator, GameState, AdjudicationResult + class LookupTableAdjudicator(Adjudicator): """Adjudicator implementation using pre-computed lookup tables.""" @@ -88,8 +89,7 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: self._validate_game_state(game_state) # Load lookup table if needed - if (self.results_dict is None or - len(next(iter(self.results_dict.keys()))) != game_state['num_nodes']): + if (self.results_dict is None or len(next(iter(self.results_dict.keys()))) != game_state['num_nodes']): self._load_lookup_table(game_state['num_nodes']) if not self.results_dict: From 54e37e2c8d095eaa894155c63db1ab6dfa28ebd0 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Fri, 31 Jan 2025 12:32:14 -0800 Subject: [PATCH 17/40] reverted to the original code but with the new wrappings --- .../adjudicators/quantum_annealing.py | 158 +++--------------- 1 file changed, 21 insertions(+), 137 deletions(-) diff --git a/tangled_adjudicate/adjudicators/quantum_annealing.py b/tangled_adjudicate/adjudicators/quantum_annealing.py index 4577321..552d9b6 100644 --- a/tangled_adjudicate/adjudicators/quantum_annealing.py +++ b/tangled_adjudicate/adjudicators/quantum_annealing.py @@ -16,12 +16,12 @@ class QAParameters: num_reads: int = 1000 anneal_time: float = 5.0 # ns num_chip_runs: int = 1 - use_gauge_transform: bool = True + use_gauge_transform: bool = False use_shim: bool = False shim_iterations: int = 1 alpha_phi: float = 0.1 use_mock: bool = True - solver_name: Optional[str] = None + solver_name: str = 'Advantage2_prototype2.6' graph_number: Optional[int] = None data_dir: Optional[str] = None @@ -36,8 +36,7 @@ def __init__(self) -> None: self.embeddings: List[List[int]] = [] self.automorphisms: List[Dict[int, int]] = [] self.shim_stats: Dict[str] = {} - # self.sampler: Optional[FixedEmbeddingComposite] = None - + def setup(self, **kwargs) -> None: """Configure quantum annealing parameters and initialize D-Wave connection. @@ -84,8 +83,6 @@ def setup(self, **kwargs) -> None: raise ValueError(f"Directory not found: {kwargs['data_dir']}") self.params.data_dir = kwargs['data_dir'] - # self._parameters = {'data_dir': self.params.data_dir} - # we need these so always compute / load in self.automorphisms = get_automorphisms(self.params.graph_number, self.params.data_dir) self.embeddings = get_embeddings(self.params.graph_number, self.params.solver_name, self.params.data_dir) @@ -111,25 +108,7 @@ def setup(self, **kwargs) -> None: # Store parameters self._parameters = self.params.__dict__ - - def _apply_gauge_transform( - self, - samples: np.ndarray, - flip_indices: List[int] - ) -> np.ndarray: - """Apply gauge transformation to samples. - - Args: - samples: Sample array to transform - flip_indices: Indices where spins should be flipped - - Returns: - Transformed sample array - """ - samples = samples.copy() - samples[:, flip_indices] = -samples[:, flip_indices] - return samples - + def _process_embedding( self, game_state: GameState, @@ -184,6 +163,7 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: ValueError: If the game state is invalid RuntimeError: If quantum annealing fails """ + if not self._base_sampler: raise RuntimeError("Sampler not initialized. Call setup() first.") @@ -200,15 +180,12 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: if self.params.use_mock and self.params.use_shim: print('D-Wave mock sampler is not set up to use the shimming process, turn shim off if using mock!') - sampler_kwargs = { + sampler_kwargs = { 'num_reads': self.params.num_reads, 'answer_mode': 'raw' } - if self.params.use_mock: - base_sampler = MockDWaveSampler(topology_type='zephyr', topology_shape=[6, 4]) - else: - base_sampler = DWaveSampler(solver=self.params.QC_SOLVER_TO_USE) + if not self.params.use_mock: sampler_kwargs.update({ 'fast_anneal': True, 'annealing_time': self.params.anneal_time / 1000, @@ -216,9 +193,6 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: }) if self.params.use_shim: - shim_stats = {'qubit_magnetizations': [], - 'average_absolute_value_of_magnetization': [], - 'all_flux_bias_offsets': []} sampler_kwargs.update({'readout_thermalization': 100., 'auto_scale': False, 'flux_drift_compensation': True, @@ -272,7 +246,7 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: for each_vertex in range(num_vertices): full_h[num_vertices * embedding_idx + each_vertex] = 0 - for k, v in base_ising_model['j'].items(): # is this correct? + for k, v in base_ising_model['j'].items(): edge_under_automorph = (min(automorphism[k[0]], automorphism[k[1]]), max(automorphism[k[0]], automorphism[k[1]])) full_j[edge_under_automorph] = v @@ -288,7 +262,7 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: # transformation has been applied the BLUE with RED STAR situation. if self.params.use_gauge_transform: - flip_map = [random.choice([-1, 1]) for _ in full_h] # random list of +1, -1 values of len # qubits + flip_map = [np.random.choice([-1, 1]) for _ in full_h] # random list of +1, -1 values of len # qubits indices_of_flips = [i for i, x in enumerate(flip_map) if x == -1] # the indices of the -1 values for edge_key, j_val in full_j.items(): # for each edge and associated J value @@ -301,7 +275,7 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: sampler_kwargs.update({'h': full_h, 'J': full_j}) - sampler = FixedEmbeddingComposite(base_sampler, embedding=embedding_map) # applies the embedding + sampler = FixedEmbeddingComposite(self._base_sampler, embedding=embedding_map) # applies the embedding # ************************************************************************* # Step 5: Optionally start shimming process in the BLUE with RED STAR basis @@ -323,23 +297,23 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: # Step 6a: Compute average values of each qubit == magnetization # ************************************************************* - magnetization = np.sum(all_samples, axis=0)/self.params.NUM_READS_QC # BLUE with RED STAR label ordering - shim_stats['average_absolute_value_of_magnetization'].append(np.sum([abs(k) for k in magnetization])/len(magnetization)) + magnetization = np.sum(all_samples, axis=0)/self.params.num_reads # BLUE with RED STAR label ordering + self.shim_stats['average_absolute_value_of_magnetization'].append(np.sum([abs(k) for k in magnetization])/len(magnetization)) - qubit_magnetization = [0] * base_sampler.properties['num_qubits'] + qubit_magnetization = [0] * self._base_sampler.properties['num_qubits'] for k, v in embedding_map.items(): qubit_magnetization[v[0]] = magnetization[k] # check - shim_stats['qubit_magnetizations'].append(qubit_magnetization) + self.shim_stats['qubit_magnetizations'].append(qubit_magnetization) # ************************************** # Step 6b: Adjust flux bias offset terms # ************************************** - for k in range(base_sampler.properties['num_qubits']): - sampler_kwargs['flux_biases'][k] -= self.params.ALPHA_PHI * qubit_magnetization[k] + for k in range(self._base_sampler.properties['num_qubits']): + sampler_kwargs['flux_biases'][k] -= self.params.alpha_phi * qubit_magnetization[k] - shim_stats['all_flux_bias_offsets'].append(sampler_kwargs['flux_biases']) + self.shim_stats['all_flux_bias_offsets'].append(sampler_kwargs['flux_biases']) # ***************************************************************************************************** # Step 7: Reverse gauge transform, from BLUE with RED STAR to just BLUE, after shimming process is done @@ -381,106 +355,16 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: for idx in isolated_vertices: total_samples[:, idx] = np.random.choice([1, -1], size=total_samples.shape[0]) - sample_count = self.params.NUM_READS_QC * num_embeddings * self.params.NUMBER_OF_CHIP_RUNS + sample_count = self.params.num_reads * num_embeddings * self.params.num_chip_runs # this is a full matrix with zeros on the diagonal that uses all the samples correlation_matrix = \ (np.einsum('si,sj->ij', total_samples, total_samples) / sample_count - - np.eye(int(game_state['num_nodes']))) - - winner, score_difference, influence_vector = ( - self.compute_winner_score_and_influence_from_correlation_matrix(game_state, correlation_matrix)) - - # todo make this compatible with output of erik's version - return_dictionary = {'game_state': game_state, 'adjudicator': 'quantum_annealing', - 'winner': winner, 'score': score_difference, 'influence_vector': influence_vector, - 'correlation_matrix': correlation_matrix, 'parameters': self.params} + np.eye(num_vertices)) - return return_dictionary - - - - # Create sampler with fixed embedding - sampler = FixedEmbeddingComposite(self._base_sampler, embedding=embedding_map) - - # Get Ising model - ising_model = self._game_state_to_ising(game_state) - - # Set up sampling parameters - sampler_kwargs = { - 'num_reads': self.params.num_reads, - 'answer_mode': 'raw' - } - - if not self.params.use_mock: - sampler_kwargs.update({ - 'fast_anneal': True, - 'annealing_time': self.params.anneal_time / 1000, - 'auto_scale': False - }) - - if self.params.use_shim: - sampler_kwargs.update({'readout_thermalization': 100., - 'auto_scale': False, - 'flux_drift_compensation': True, - 'flux_biases': [0] * base_sampler.properties['num_qubits']}) - - # Perform sampling - response = sampler.sample_ising( - ising_model['h'], - ising_model['j'], - **sampler_kwargs - ) - - # Process samples - samples = np.array(response.record.sample) - - # todo this is in the wrong order - # Apply gauge transform if enabled - if self.params.use_gauge_transform: - flip_indices = np.random.choice( - [0, 1], - size=samples.shape[1], - p=[0.5, 0.5] - ).nonzero()[0] - samples = self._apply_gauge_transform(samples, flip_indices) - - # Stack samples for all embeddings - processed_samples = samples[:, :num_vertices] - for k in range(1, num_embeddings): - processed_samples = np.vstack(( - processed_samples, - samples[:, k*num_vertices:(k+1)*num_vertices] - )) - - total_samples = np.vstack((total_samples, processed_samples)) - - # Remove initial zero row - total_samples = np.delete(total_samples, 0, axis=0) - - # Handle isolated vertices - isolated_vertices = self._find_isolated_vertices(game_state) - if isolated_vertices: - random_samples = np.random.choice( - [1, -1], - size=(total_samples.shape[0], len(isolated_vertices)) - ) - for i, vertex in enumerate(isolated_vertices): - total_samples[:, vertex] = random_samples[:, i] - - # Calculate correlation matrix - sample_count = (self.params.num_reads * num_embeddings * self.params.num_chip_runs) - - correlation_matrix = ( - np.einsum('si,sj->ij', total_samples, total_samples) / sample_count - - np.eye(num_vertices) - ) - # Compute results - winner, score, influence_vector = self._compute_winner_score_and_influence( - game_state, correlation_matrix - ) - + winner, score, influence_vector = self._compute_winner_score_and_influence(game_state, correlation_matrix) + return AdjudicationResult( game_state=game_state, adjudicator='quantum_annealing', From b543d6a7dbc40a32c7320f893272fe7e83c77062 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Fri, 31 Jan 2025 12:35:04 -0800 Subject: [PATCH 18/40] PEP --- tangled_adjudicate/adjudicators/schrodinger.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tangled_adjudicate/adjudicators/schrodinger.py b/tangled_adjudicate/adjudicators/schrodinger.py index ff3506f..7c0d377 100644 --- a/tangled_adjudicate/adjudicators/schrodinger.py +++ b/tangled_adjudicate/adjudicators/schrodinger.py @@ -4,6 +4,7 @@ from .adjudicator import Adjudicator, GameState, AdjudicationResult + class SchrodingerEquationAdjudicator(Adjudicator): """Adjudicator implementation using Schrödinger equation evolution.""" From 5178d5245efad5df2e155d59fc26fba126c67dfa Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Fri, 31 Jan 2025 12:39:12 -0800 Subject: [PATCH 19/40] PEP and removed the remove vertices stuff (not required for SA) --- .../adjudicators/simulated_annealing.py | 25 ++++++------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/tangled_adjudicate/adjudicators/simulated_annealing.py b/tangled_adjudicate/adjudicators/simulated_annealing.py index 73250a3..e8bb234 100644 --- a/tangled_adjudicate/adjudicators/simulated_annealing.py +++ b/tangled_adjudicate/adjudicators/simulated_annealing.py @@ -4,6 +4,7 @@ from .adjudicator import Adjudicator, GameState, AdjudicationResult + class SimulatedAnnealingAdjudicator(Adjudicator): """Adjudicator implementation using simulated annealing.""" @@ -82,24 +83,14 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: # Calculate correlation matrix samples = np.array(response.record.sample, dtype=float) - correlation_matrix = ( - np.einsum('si,sj->ij', samples, samples) / self.num_reads - - np.eye(game_state['num_nodes']) - ) - - # Handle isolated vertices - isolated_vertices = self._find_isolated_vertices(game_state) - if isolated_vertices: - samples = np.random.choice([1, -1], size=(self.num_reads, len(isolated_vertices))) - for i, vertex in enumerate(isolated_vertices): - correlation_matrix[:, vertex] = np.mean(samples[:, i]) - correlation_matrix[vertex, :] = np.mean(samples[:, i]) - correlation_matrix[vertex, vertex] = 0 - + + # creates symmetric matrix with zeros on diagonal (so that self-correlation of one is not counted) -- this is + # the standard for computing influence vector + correlation_matrix = (np.einsum('si,sj->ij', samples, samples) / self.num_reads - + np.eye(game_state['num_nodes'])) + # Compute results - winner, score, influence_vector = self._compute_winner_score_and_influence( - game_state, correlation_matrix - ) + winner, score, influence_vector = self._compute_winner_score_and_influence(game_state, correlation_matrix) return AdjudicationResult( game_state=game_state, From 3a2e57542971306b2678e66e0c3961ec96abf54a Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Fri, 31 Jan 2025 14:35:47 -0800 Subject: [PATCH 20/40] removed references to old adjudicators --- .../utils/how_to_adjudicate_states.py | 105 ++++-------------- 1 file changed, 23 insertions(+), 82 deletions(-) diff --git a/tangled_adjudicate/utils/how_to_adjudicate_states.py b/tangled_adjudicate/utils/how_to_adjudicate_states.py index f209542..ba07f1d 100644 --- a/tangled_adjudicate/utils/how_to_adjudicate_states.py +++ b/tangled_adjudicate/utils/how_to_adjudicate_states.py @@ -1,45 +1,36 @@ """ how to use provided solvers to adjudicate Tangled terminal states """ -import pickle import sys import os -import ast import time import numpy as np -from tangled_adjudicate.adjudicators.adjudicate import old_Adjudicator - from tangled_adjudicate.adjudicators.simulated_annealing import SimulatedAnnealingAdjudicator from tangled_adjudicate.adjudicators.quantum_annealing import QuantumAnnealingAdjudicator from tangled_adjudicate.adjudicators.lookup_table import LookupTableAdjudicator from tangled_adjudicate.adjudicators.schrodinger import SchrodingerEquationAdjudicator -from tangled_adjudicate.utils.parameters import Params -from tangled_adjudicate.utils.game_graph_properties import GraphProperties -from tangled_adjudicate.utils.generate_terminal_states import convert_state_string_to_game_state - def main(): # this code shows how to use the four different adjudicators # there are two example_game_state dictionaries provided, which are terminal states in graph_number 2 and 3 # respectively, that are of the sort that are closest to the draw line at score = +- 1/2 - # solver_list = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing', 'lookup_table'] - solver_list = ['quantum_annealing'] + # set graph_number + graph_number = 2 + + # choose solvers to use + solver_list = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing', 'lookup_table'] precision_digits = 4 # just to clean up print output np.set_printoptions(suppress=True) # remove scientific notation - params = Params() - old_adjudicator = old_Adjudicator(params) - args = {'data_dir': os.path.join(os.getcwd(), '..', 'data'), - 'graph_number': params.GRAPH_NUMBER, - 'solver_name': params.QC_SOLVER_TO_USE} + 'graph_number': graph_number} example_game_state = None # draw; score=0; ferromagnetic ring - if params.GRAPH_NUMBER == 2: + if graph_number == 2: example_game_state = {'num_nodes': 3, 'edges': [(0, 1, 2), (0, 2, 2), (1, 2, 2)], 'player1_id': 'player1', 'player2_id': 'player2', 'turn_count': 5, 'current_player_index': 1, 'player1_node': 1, 'player2_node': 2} @@ -47,10 +38,10 @@ def main(): # red wins, score +2/3; this is one of the states closest to the draw line # note that quantum_annealing in this default uses the D-Wave mock software solver and won't give # the right answer as its samples aren't unbiased -- if you want the quantum_annealing solver to - # run on hardware set self.USE_MOCK_DWAVE_SAMPLER = False in /utils/parameters.py and ensure you have + # run on hardware set QAParameters.use_mock = False in /adjudicators/quantum_annealing.py and ensure you have # hardware access and everything is set up - if params.GRAPH_NUMBER == 3: + if graph_number == 3: example_game_state = {'num_nodes': 4, 'edges': [(0, 1, 3), (0, 2, 1), (0, 3, 3), (1, 2, 1), (1, 3, 3), (2, 3, 1)], 'player1_id': 'player1', 'player2_id': 'player2', 'turn_count': 8, @@ -59,39 +50,10 @@ def main(): print('this introduction only has included game states for graphs 2 and 3. If you want a different' 'graph please add a new example_game_state here!') - # if 'simulated_annealing' in solver_list: - # sa_adjudicator = SimulatedAnnealingAdjudicator() - # sa_adjudicator.setup() - # start = time.time() - # new_sa_results = sa_adjudicator.adjudicate(example_game_state) - # print('elapsed time for simulated_annealing was', round(time.time() - start, precision_digits), 'seconds.') - # - # if 'quantum_annealing' in solver_list: - # qa_adjudicator = QuantumAnnealingAdjudicator() - # qa_adjudicator.setup(**args) - # new_qa_results = qa_adjudicator.adjudicate(example_game_state) - # - # if 'lookup_table' in solver_list: - # lt_adjudicator = LookupTableAdjudicator() - # lt_adjudicator.setup(**args) - # new_lt_results = lt_adjudicator.adjudicate(example_game_state) - # - # if 'schrodinger_equation' in solver_list: - # se_adjudicator = SchrodingerEquationAdjudicator() - # se_adjudicator.setup() - # new_se_results = se_adjudicator.adjudicate(example_game_state) - for solver_to_use in solver_list: start = time.time() - # equivalent to e.g. results = adjudicator.simulated_annealing(example_game_state) - old_results = getattr(old_adjudicator, solver_to_use)(example_game_state) - - print('elapsed time for old', solver_to_use, 'was', round(time.time() - start, precision_digits), 'seconds.') - - start = time.time() - adjudicator = None if solver_to_use == 'simulated_annealing': @@ -107,49 +69,28 @@ def main(): adjudicator = SchrodingerEquationAdjudicator() adjudicator.setup(**args) - new_results = adjudicator.adjudicate(example_game_state) - - print('elapsed time for new', solver_to_use, 'was', round(time.time() - start, precision_digits), 'seconds.') - - if old_results['correlation_matrix'] is None: - print('old correlation matrix:', None) - else: - print('old correlation matrix:') - print(np.round(old_results['correlation_matrix'], precision_digits)) + results = adjudicator.adjudicate(example_game_state) - if new_results['correlation_matrix'] is None: - print('new correlation matrix:', None) - else: - print('new correlation matrix:') - print(np.round(new_results['correlation_matrix'], precision_digits)) - - print('old winner:', old_results['winner']) - print('new winner:', new_results['winner']) + print('elapsed time for', solver_to_use, 'was', round(time.time() - start, precision_digits), 'seconds.') - if old_results['score'] is None: - print('old score:', old_results['score']) + if results['correlation_matrix'] is None: + print('correlation matrix:', None) else: - print('old score:', round(old_results['score'], precision_digits)) + print('correlation matrix:') + print(np.round(results['correlation_matrix'], precision_digits)) - if new_results['score'] is None: - print('new score:', new_results['score']) - else: - print('new score:', round(new_results['score'], precision_digits)) + print('winner:', results['winner']) - if old_results['influence_vector'] is None: - print('old influence vector:', None) + if results['score'] is None: + print('score:', results['score']) else: - print('old influence vector:', [round(old_results['influence_vector'][k], precision_digits) - for k in range(len(old_results['influence_vector']))]) + print('score:', round(results['score'], precision_digits)) - if new_results['influence_vector'] is None: - print('new influence vector:', None) + if results['influence_vector'] is None: + print('influence vector:', None) else: - print('new influence vector:', [round(new_results['influence_vector'][k], precision_digits) - for k in range(len(new_results['influence_vector']))]) - - print() - print() + print('influence vector:', [round(results['influence_vector'][k], precision_digits) + for k in range(len(results['influence_vector']))]) if __name__ == "__main__": From 53051e2b6c88cfae84e26194e1bc5d5f0fd96995 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Fri, 31 Jan 2025 21:09:40 -0800 Subject: [PATCH 21/40] changed a couple variable names and the function name of convert_my_game_state_to_erik_game_state --- tangled_adjudicate/utils/utilities.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/tangled_adjudicate/utils/utilities.py b/tangled_adjudicate/utils/utilities.py index 48ade0a..8a219ca 100644 --- a/tangled_adjudicate/utils/utilities.py +++ b/tangled_adjudicate/utils/utilities.py @@ -85,9 +85,11 @@ def convert_erik_game_state_to_my_game_state(game_state): return my_state -def convert_to_erik_game_state_for_adjudication(my_state, number_of_vertices, list_of_edge_tuples): +def convert_my_game_state_to_erik_game_state(my_state, number_of_vertices, list_of_edge_tuples): my_vertices = my_state[:number_of_vertices] + my_edges = my_state[number_of_vertices:] + turn_count = 0 try: @@ -102,16 +104,12 @@ def convert_to_erik_game_state_for_adjudication(my_state, number_of_vertices, li except ValueError: player_2_vertex = -1 - my_edges = my_state[number_of_vertices:] - turn_count += my_edges.count(1) + my_edges.count(2) + my_edges.count(3) # if turn_count is even, it's player 1 (red)'s turn current_player_idx = 1 if turn_count % 2 == 0 else 2 - erik_edges = [] - for k in range(len(list_of_edge_tuples)): - erik_edges.append((list_of_edge_tuples[k][0], list_of_edge_tuples[k][1], my_edges[k])) + erik_edges = [(list_of_edge_tuples[k][0], list_of_edge_tuples[k][1], my_edges[k]) for k in range(len(my_edges))] game_state = {'num_nodes': number_of_vertices, # 'edges': [(0, 1, 3), (0, 2, 1), (0, 3, 3), (1, 2, 1), (1, 3, 3), (2, 3, 1)], From f5208b05299aae650718fd37ac04317fb5305088 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Fri, 31 Jan 2025 21:11:05 -0800 Subject: [PATCH 22/40] removed all references to old_adjudicator --- .../utils/adjudicate_all_terminal_states.py | 51 +++++++++++++------ 1 file changed, 36 insertions(+), 15 deletions(-) diff --git a/tangled_adjudicate/utils/adjudicate_all_terminal_states.py b/tangled_adjudicate/utils/adjudicate_all_terminal_states.py index 7157885..ab6bfb7 100644 --- a/tangled_adjudicate/utils/adjudicate_all_terminal_states.py +++ b/tangled_adjudicate/utils/adjudicate_all_terminal_states.py @@ -1,17 +1,19 @@ """ generate and adjudicate all Tangled terminal states for tiny graphs """ - import sys import os import time import pickle import numpy as np -from tangled_adjudicate.adjudicators.adjudicate import Adjudicator +from tangled_adjudicate.adjudicators.simulated_annealing import SimulatedAnnealingAdjudicator +from tangled_adjudicate.adjudicators.quantum_annealing import QuantumAnnealingAdjudicator +from tangled_adjudicate.adjudicators.lookup_table import LookupTableAdjudicator +from tangled_adjudicate.adjudicators.schrodinger import SchrodingerEquationAdjudicator + from tangled_adjudicate.utils.generate_terminal_states import generate_all_tangled_terminal_states -from tangled_adjudicate.utils.parameters import Params -def generate_adjudication_results_for_all_terminal_states(solver_to_use): +def generate_adjudication_results_for_all_terminal_states(graph_number, solver_to_use): # uses up to three different adjudicators provided to evaluate all unique terminal states for tiny graphs # (in the default here, graphs 2 and 3). Note this only works for tiny graphs as the number of terminal states # grows like 3 ** edge_count. @@ -20,19 +22,35 @@ def generate_adjudication_results_for_all_terminal_states(solver_to_use): # that hasn't been called yet, it adds that key and its results. If you call it in a case where there are # already results, it will ask you if you want to overwrite them. - if solver_to_use not in ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing']: + if solver_to_use not in ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing', 'lookup_table']: sys.exit(print('the solver' + solver_to_use + 'is not in the allowed list -- please take a look!')) precision_digits = 4 # just to clean up print output np.set_printoptions(suppress=True) # remove scientific notation - params = Params() # your graph_number will be set here, make sure it's what you want! - adjudicator = Adjudicator(params) - game_states = generate_all_tangled_terminal_states(params.GRAPH_NUMBER) + adjudicator = None + + args = {'data_dir': os.path.join(os.getcwd(), '..', 'data'), + 'graph_number': graph_number} + + if solver_to_use == 'simulated_annealing': + adjudicator = SimulatedAnnealingAdjudicator() + else: + if solver_to_use == 'quantum_annealing': + adjudicator = QuantumAnnealingAdjudicator() + else: + if solver_to_use == 'lookup_table': + adjudicator = LookupTableAdjudicator() + else: + if solver_to_use == 'schrodinger_equation': + adjudicator = SchrodingerEquationAdjudicator() + + adjudicator.setup(**args) + + game_states = generate_all_tangled_terminal_states(graph_number) - file_name_prefix = "graph_" + str(params.GRAPH_NUMBER) data_dir = os.path.join(os.getcwd(), '..', 'data') - file_path = os.path.join(data_dir, file_name_prefix + "_terminal_states_adjudication_results.pkl") + file_path = os.path.join(data_dir, "graph_" + str(graph_number) + "_terminal_states_adjudication_results.pkl") if os.path.isfile(file_path): with open(file_path, "rb") as fp: @@ -42,16 +60,18 @@ def generate_adjudication_results_for_all_terminal_states(solver_to_use): # at this point, either we have loaded some adjudication_results from an existing file, or we have a new empty dict if solver_to_use in adjudication_results: # this means we loaded this in already - user_input = input('results already exist, overwrite (y/n)?') + user_input = input('results already exist for ' + solver_to_use + ', overwrite (y/n)?') if user_input.lower() != 'y': - sys.exit(print('exiting!')) + return None # now we proceed to compute and store result print('beginning adjudication using the ' + solver_to_use + ' solver...') start = time.time() adjudication_results[solver_to_use] = {} + for k, v in game_states.items(): - adjudication_results[solver_to_use][k] = getattr(adjudicator, solver_to_use)(v['game_state']) + adjudication_results[solver_to_use][k] = adjudicator.adjudicate(v['game_state']) + print('elapsed time was', round(time.time() - start, precision_digits), 'seconds.') # store it -- this should leave any previously loaded solver results intact @@ -61,10 +81,11 @@ def generate_adjudication_results_for_all_terminal_states(solver_to_use): def main(): - solver_list = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing'] + graph_number = 2 + solver_list = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing', 'lookup_table'] for solver_to_use in solver_list: - generate_adjudication_results_for_all_terminal_states(solver_to_use) + generate_adjudication_results_for_all_terminal_states(graph_number, solver_to_use) if __name__ == "__main__": From 50a6df4af1ac5bf7c482873d3518a9edce3a4b4a Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Fri, 31 Jan 2025 21:11:45 -0800 Subject: [PATCH 23/40] WIP need to integrate bug fixes from tangled-cruft/visualize_and_enumerate_ky --- .../utils/generate_terminal_states.py | 78 +++++++++++-------- 1 file changed, 46 insertions(+), 32 deletions(-) diff --git a/tangled_adjudicate/utils/generate_terminal_states.py b/tangled_adjudicate/utils/generate_terminal_states.py index 931a7b0..cb625a0 100644 --- a/tangled_adjudicate/utils/generate_terminal_states.py +++ b/tangled_adjudicate/utils/generate_terminal_states.py @@ -9,29 +9,44 @@ from tangled_adjudicate.utils.game_graph_properties import GraphProperties from tangled_adjudicate.utils.find_graph_automorphisms import get_automorphisms - - -def convert_state_string_to_game_state(graph, terminal_state_string): - - vertex_list = terminal_state_string[:graph.vertex_count] - edge_list = terminal_state_string[graph.vertex_count:] - edges = [(graph.edge_list[k][0], graph.edge_list[k][1], edge_list[k]) for k in range(len(edge_list))] - - turn_count = vertex_list.count(1) + vertex_list.count(2) + len(edge_list) - edge_list.count(0) - - # if turn_count is even, it's red's turn - if not turn_count % 2: - current_player_index = 1 - else: - current_player_index = 2 - - game_state = {'num_nodes': graph.vertex_count, 'edges': edges, - 'player1_id': 'player1', 'player2_id': 'player2', 'turn_count': turn_count, - 'current_player_index': current_player_index, - 'player1_node': vertex_list.index(1), 'player2_node': vertex_list.index(2)} - - return game_state - +from tangled_adjudicate.utils.utilities import convert_my_game_state_to_erik_game_state + + +# def convert_state_string_to_game_state(my_state, number_of_vertices, list_of_edge_tuples): +# +# my_vertices = my_state[:number_of_vertices] +# my_edges = my_state[number_of_vertices:] +# +# turn_count = 0 +# +# try: +# player_1_vertex = my_vertices.index(1) +# turn_count += 1 +# except ValueError: +# player_1_vertex = -1 +# +# try: +# player_2_vertex = my_vertices.index(2) +# turn_count += 1 +# except ValueError: +# player_2_vertex = -1 +# +# turn_count += my_edges.count(1) + my_edges.count(2) + my_edges.count(3) +# +# # if turn_count is even, it's player 1 (red)'s turn +# current_player_idx = 1 if turn_count % 2 == 0 else 2 +# +# erik_edges = [(list_of_edge_tuples[k][0], list_of_edge_tuples[k][1], my_edges[k]) for k in range(len(my_edges))] +# +# game_state = {'num_nodes': number_of_vertices, 'edges': erik_edges, +# 'player1_id': 'player1', 'player2_id': 'player2', 'turn_count': turn_count, +# 'current_player_index': current_player_idx, +# 'player1_node': player_1_vertex, 'player2_node': player_2_vertex} +# +# return game_state + + +# todo change this to the visualize_and_enumerate code def generate_all_tangled_terminal_states(graph_number): # this loads or generates all possible terminal game states for the graph indexed by graph_number and groups them @@ -47,8 +62,7 @@ def generate_all_tangled_terminal_states(graph_number): graph = GraphProperties(graph_number) script_dir = os.path.dirname(os.path.abspath(__file__)) # Get the directory of the current script data_dir = os.path.join(script_dir, '..', 'data') - file_path = os.path.join(data_dir, - "graph_" + str(graph_number) + "_unique_terminal_states.pkl") + file_path = os.path.join(data_dir, "graph_" + str(graph_number) + "_unique_terminal_states.pkl") if os.path.isfile(file_path): # if the file already exists, just load it with open(file_path, "rb") as fp: @@ -129,10 +143,10 @@ def generate_all_tangled_terminal_states(graph_number): game_states = {} - for each in terminal_states: - game_states[str(each)] = {} - game_states[str(each)]['game_state'] = convert_state_string_to_game_state(graph, each) - game_states[str(each)]['automorphisms'] = good_states[str(each)] + for my_game_state in terminal_states: + game_states[str(my_game_state)] = {} + game_states[str(my_game_state)]['game_state'] = convert_my_game_state_to_erik_game_state(my_game_state, graph.vertex_count, graph.edge_list) + game_states[str(my_game_state)]['automorphisms'] = good_states[str(my_game_state)] data_dir = os.path.join(os.getcwd(), '..', 'data') @@ -145,9 +159,9 @@ def generate_all_tangled_terminal_states(graph_number): def main(): # this generates all terminal states for graphs 2 and 3 - for graph_number in range(2, 4): - gs = generate_all_tangled_terminal_states(graph_number) - + gs2 = generate_all_tangled_terminal_states(graph_number=2) + gs3 = generate_all_tangled_terminal_states(graph_number=3) + print() if __name__ == "__main__": sys.exit(main()) From 9e15235b12287a61ef13d3f94afebc89372e0943 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Sat, 1 Feb 2025 09:59:46 -0800 Subject: [PATCH 24/40] Integrated bug fixes from tangled-cruft/visualize_and_enumerate_k4 --- .../utils/generate_terminal_states.py | 154 ++++++++---------- 1 file changed, 67 insertions(+), 87 deletions(-) diff --git a/tangled_adjudicate/utils/generate_terminal_states.py b/tangled_adjudicate/utils/generate_terminal_states.py index cb625a0..39f8bee 100644 --- a/tangled_adjudicate/utils/generate_terminal_states.py +++ b/tangled_adjudicate/utils/generate_terminal_states.py @@ -12,56 +12,27 @@ from tangled_adjudicate.utils.utilities import convert_my_game_state_to_erik_game_state -# def convert_state_string_to_game_state(my_state, number_of_vertices, list_of_edge_tuples): -# -# my_vertices = my_state[:number_of_vertices] -# my_edges = my_state[number_of_vertices:] -# -# turn_count = 0 -# -# try: -# player_1_vertex = my_vertices.index(1) -# turn_count += 1 -# except ValueError: -# player_1_vertex = -1 -# -# try: -# player_2_vertex = my_vertices.index(2) -# turn_count += 1 -# except ValueError: -# player_2_vertex = -1 -# -# turn_count += my_edges.count(1) + my_edges.count(2) + my_edges.count(3) -# -# # if turn_count is even, it's player 1 (red)'s turn -# current_player_idx = 1 if turn_count % 2 == 0 else 2 -# -# erik_edges = [(list_of_edge_tuples[k][0], list_of_edge_tuples[k][1], my_edges[k]) for k in range(len(my_edges))] -# -# game_state = {'num_nodes': number_of_vertices, 'edges': erik_edges, -# 'player1_id': 'player1', 'player2_id': 'player2', 'turn_count': turn_count, -# 'current_player_index': current_player_idx, -# 'player1_node': player_1_vertex, 'player2_node': player_2_vertex} -# -# return game_state - - -# todo change this to the visualize_and_enumerate code - def generate_all_tangled_terminal_states(graph_number): # this loads or generates all possible terminal game states for the graph indexed by graph_number and groups them # into lists where each member of the list is connected by an automorphism. Running this function requires either - # loading or generating an automorphism file.The dictionary game_states has as its key a string with the canonical + # loading or generating an automorphism file. The dictionary game_states has as its key a string with the canonical # member of each of these, with the further ['automorphisms'] key being a list of all the states that are symmetries # of the canonical key. The key ['game_state'] is the representation of the key as a game_state object. # # Note that this requires enumerating all possible terminal states, the number of which is # (vertex_count choose 2) * 2 * 3**edge_count, which grows exponentially with edge count. You can do this easily # for graph_number 1, 2, 3, 4, but 5 and up get stupidly large. + # + # graph_number 2 should have 27 keys, and each ['automorphisms'] sub-key should have 6 entries + # graph_number 3 should have 405 keys, and each ['automorphisms'] sub-key should have 12-24 entries (the reason + # why there aren't always 24 is that for some of these keys different automorphisms bring you to the same state) graph = GraphProperties(graph_number) + script_dir = os.path.dirname(os.path.abspath(__file__)) # Get the directory of the current script data_dir = os.path.join(script_dir, '..', 'data') + list_of_automorphisms = get_automorphisms(graph_number, data_dir=data_dir) + file_path = os.path.join(data_dir, "graph_" + str(graph_number) + "_unique_terminal_states.pkl") if os.path.isfile(file_path): # if the file already exists, just load it @@ -71,13 +42,11 @@ def generate_all_tangled_terminal_states(graph_number): # add check to make sure you don't ask for something too large print('***************************') user_input = input('There are ' + str(math.comb(graph.vertex_count, 2) * 2 * 3**graph.edge_count) + - ' terminal states -- proceed (y/n)?') + ' total non-unique terminal states -- proceed (y/n)?') if user_input.lower() != 'y': sys.exit(print('exiting...')) print('***************************') - list_of_automorphisms = get_automorphisms(graph_number, data_dir=data_dir) - possible_vertex_states = [] for positions in itertools.permutations(range(graph.vertex_count), 2): lst = [0] * graph.vertex_count @@ -90,65 +59,76 @@ def generate_all_tangled_terminal_states(graph_number): elements = [1, 2, 3] possible_edge_states = list(itertools.product(elements, repeat=graph.edge_count)) + # all_states is a list of lists enumerating ALL the game states all_states = [j + list(k) for k in possible_edge_states for j in possible_vertex_states] - same_group_of_states = {} + # this next part creates a dictionary where the keys are each of the elements of all_states and the values are + # lists of all the states connected to the key by an automorphism. Note that different automorphisms can lead + # to the same state, so at some point the list is converted to a set and then back to a list + + all_states_with_symmetries = {} + all_states_no_symmetries = {} + # iterate over all enumerated states for state in all_states: + + # create a list for all the symmetric states + list_of_states_connected_by_symmetry = [] + + # get indices of the red and blue vertices only_vertices = state[:graph.vertex_count] red_vertex_index = only_vertices.index(1) blue_vertex_index = only_vertices.index(2) - same_group_of_states[str(state)] = [] + + # iterate over all automorphisms for automorph in list_of_automorphisms: - new_red_vertex_index = automorph[red_vertex_index] - new_blue_vertex_index = automorph[blue_vertex_index] - transformed_each = [0] * graph.vertex_count - transformed_each[new_red_vertex_index] = 1 - transformed_each[new_blue_vertex_index] = 2 - - edge = np.zeros((graph.vertex_count, graph.vertex_count)) - new_edge = np.zeros((graph.vertex_count, graph.vertex_count)) - cnt = graph.vertex_count - for j in range(graph.vertex_count): - for i in range(j): - edge[i, j] = state[cnt] - cnt += 1 - - cnt = graph.vertex_count - for j in range(graph.vertex_count): - for i in range(j): - if automorph[i] < automorph[j]: - new_edge[i, j] = edge[automorph[i], automorph[j]] - else: - new_edge[i, j] = edge[automorph[j], automorph[i]] - cnt += 1 - - for j in range(graph.vertex_count): - for i in range(j): - transformed_each.append(int(new_edge[i, j])) - same_group_of_states[str(state)].append(transformed_each) - - good_states = {} - cnt = 0 - for k, v in same_group_of_states.items(): - if not cnt % (math.comb(graph.vertex_count, 2) * 2): # 4 choose 2 = 6 * 2 = 12 ..... 3 choose 2 = 3 *2 = 6 math.comb(graph.vertex_count, 2) * 2 - good_states[k] = v - cnt += 1 - - terminal_states = [] - for k, v in good_states.items(): - terminal_states.append(ast.literal_eval(k)) - - print('there are', len(terminal_states), 'unique terminal states. Writing to disk ...') + + # initialize the state we want to compute (transforming state under automorph) + state_transformed_under_automorph = [0] * graph.vertex_count + + # write transformed vertices into the transformed state -- this finishes the vertex part + state_transformed_under_automorph[automorph[red_vertex_index]] = 1 + state_transformed_under_automorph[automorph[blue_vertex_index]] = 2 + + # now we want to transform the edges under the automorphism + for edge_idx in range(graph.edge_count): + first_vertex = automorph[graph.edge_list[edge_idx][0]] + second_vertex = automorph[graph.edge_list[edge_idx][1]] + if first_vertex < second_vertex: + transformed_edge = (first_vertex, second_vertex) + else: + transformed_edge = (second_vertex, first_vertex) + + transformed_edge_idx = graph.edge_list.index(transformed_edge) + + state_transformed_under_automorph.append(state[graph.vertex_count + transformed_edge_idx]) + + list_of_states_connected_by_symmetry.append(str(state_transformed_under_automorph)) + + # remove duplicates + all_states_with_symmetries[str(state)] = list(dict.fromkeys(list_of_states_connected_by_symmetry)) + all_states_no_symmetries[str(state)] = list_of_states_connected_by_symmetry + + sorted_all_states_with_symmetries = dict(sorted(all_states_with_symmetries.items())) + + uniques = [] + duplicates = [] + + for k, v in sorted_all_states_with_symmetries.items(): + if k not in duplicates: + uniques.append(k) + for j in range(1, len(v)): + duplicates.append(v[j]) + + unique_terminal_states = [ast.literal_eval(k) for k in uniques] + print('there are', len(unique_terminal_states), 'unique terminal states. Writing to disk ...') game_states = {} - for my_game_state in terminal_states: + for my_game_state in unique_terminal_states: game_states[str(my_game_state)] = {} game_states[str(my_game_state)]['game_state'] = convert_my_game_state_to_erik_game_state(my_game_state, graph.vertex_count, graph.edge_list) - game_states[str(my_game_state)]['automorphisms'] = good_states[str(my_game_state)] - - data_dir = os.path.join(os.getcwd(), '..', 'data') + game_states[str(my_game_state)]['automorphisms'] = all_states_with_symmetries[str(my_game_state)] with open(os.path.join(data_dir, "graph_" + str(graph_number) + "_unique_terminal_states.pkl"), "wb") as fp: pickle.dump(game_states, fp) @@ -161,7 +141,7 @@ def main(): # this generates all terminal states for graphs 2 and 3 gs2 = generate_all_tangled_terminal_states(graph_number=2) gs3 = generate_all_tangled_terminal_states(graph_number=3) - print() + if __name__ == "__main__": sys.exit(main()) From 546331752e9aa5a40c783ac4116a69481f2565be Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Sat, 1 Feb 2025 10:03:32 -0800 Subject: [PATCH 25/40] fragile and not great but enough to check adjudication results --- .../utils/compare_adjudication_results.py | 26 ++++++++++++------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/tangled_adjudicate/utils/compare_adjudication_results.py b/tangled_adjudicate/utils/compare_adjudication_results.py index 168fe90..9ffe222 100644 --- a/tangled_adjudicate/utils/compare_adjudication_results.py +++ b/tangled_adjudicate/utils/compare_adjudication_results.py @@ -7,10 +7,8 @@ def compare_adjudication_results(graph_number, solvers_to_use): - # solvers_to_use is a list of solvers of length either 2 or 3 comprising 2 or 3 of - # ['schrodinger_equation', 'simulated_annealing', 'quantum_annealing'] - # - # indexing_solvers = {1: 'schrodinger_equation', 2: 'simulated_annealing', 3: 'quantum_annealing'} + # solvers_to_use is a list of solvers of length 2, 3, or 4 comprising 2, 3, or 4 of + # ['schrodinger_equation', 'simulated_annealing', 'quantum_annealing', 'lookup_table'] # load adjudication results obtained from running /utils/adjudicate_all_terminal_states.py data_dir = os.path.join(os.getcwd(), '..', 'data') @@ -42,7 +40,7 @@ def compare_adjudication_results(graph_number, solvers_to_use): for k0, value_dict in adjudication_results.items(): # k will be solver name string if k0 in solvers_to_use: # if we want to add this, add it for k1, v in value_dict.items(): - game_result[k1].append([k0, v['winner'], v['score']]) + game_result[k1].append([k0, v['winner'], v['score']]) # score will be None for lookup_table comparisons = {} for k, v in game_result.items(): # k is game state string @@ -60,7 +58,14 @@ def compare_adjudication_results(graph_number, solvers_to_use): to_plot = [] for k, v in scores.items(): - to_plot.append(v) + if v[0] is not None: + to_plot.append(v) + + if 'lookup_table' in solvers_to_use: + solvers_to_use.remove('lookup_table') + + if len(solvers_to_use) < 2: + print('need at least two of SA, QA, SE to generate score comparisons... lookup_table does not generate scores!') red_text = solvers_to_use[0] + ': red' blue_text = solvers_to_use[1] + ': blue' @@ -93,7 +98,7 @@ def compare_adjudication_results(graph_number, solvers_to_use): if graph_number == 3: if len(to_plot) == 2: - plt.hist(to_plot, range=[-2, 2], bins=400, color=['red', 'blue'], stacked=True) + plt.hist(to_plot, range=[-4, 4], bins=800, color=['red', 'blue'], stacked=True) else: plt.hist(to_plot, range=[-4, 4], bins=800, color=['red', 'blue', 'cyan'], stacked=True) @@ -115,10 +120,11 @@ def compare_adjudication_results(graph_number, solvers_to_use): def main(): - solvers_to_use = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing'] + solvers_to_use = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing', 'lookup_table'] + compare_adjudication_results(graph_number=2, solvers_to_use=solvers_to_use) - for graph_number in range(2, 4): - compare_adjudication_results(graph_number=graph_number, solvers_to_use=solvers_to_use) + solvers_to_use = ['simulated_annealing', 'quantum_annealing', 'lookup_table'] + compare_adjudication_results(graph_number=3, solvers_to_use=solvers_to_use) if __name__ == "__main__": From 3ad6120c32de7790a7e84b803895f444a3d37fbe Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Sat, 1 Feb 2025 10:07:43 -0800 Subject: [PATCH 26/40] moved over to new way of calling adjudicators and checked vs graph 2 and 3 results -- seems all good! --- tangled_adjudicate/utils/adjudicate_all_terminal_states.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tangled_adjudicate/utils/adjudicate_all_terminal_states.py b/tangled_adjudicate/utils/adjudicate_all_terminal_states.py index ab6bfb7..a9c749d 100644 --- a/tangled_adjudicate/utils/adjudicate_all_terminal_states.py +++ b/tangled_adjudicate/utils/adjudicate_all_terminal_states.py @@ -81,6 +81,9 @@ def generate_adjudication_results_for_all_terminal_states(graph_number, solver_t def main(): + # note: generating all schrodinger_equation adjudication results for graph 3 or bigger takes forever + # I spot checked new subclass version and all spot checks were good + graph_number = 2 solver_list = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing', 'lookup_table'] From 6dfad1498a3139f6c06619fa40035640b9952c36 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Sat, 1 Feb 2025 10:11:47 -0800 Subject: [PATCH 27/40] moved Params and MinimalAdjudicationParameters into adjudicate --- tangled_adjudicate/adjudicators/adjudicate.py | 45 +++++++++++++++++-- tangled_adjudicate/utils/parameters.py | 41 ----------------- 2 files changed, 42 insertions(+), 44 deletions(-) delete mode 100644 tangled_adjudicate/utils/parameters.py diff --git a/tangled_adjudicate/adjudicators/adjudicate.py b/tangled_adjudicate/adjudicators/adjudicate.py index 3aec01f..8ec6160 100644 --- a/tangled_adjudicate/adjudicators/adjudicate.py +++ b/tangled_adjudicate/adjudicators/adjudicate.py @@ -1,5 +1,4 @@ -""" Adjudicator class for Tangled game states using Schrödinger Equation, Simulated Annealing, D-Wave hardware, and -Look Up table """ +""" old style -- moved Params and MinimalAdjudicationParameters here -- to be deprecated""" import sys import os import random @@ -12,13 +11,53 @@ convert_erik_game_state_to_my_game_state) from tangled_adjudicate.utils.find_graph_automorphisms import get_automorphisms from tangled_adjudicate.utils.find_hardware_embeddings import get_embeddings -from tangled_adjudicate.utils.parameters import Params from tangled_adjudicate.schrodinger.schrodinger_functions import evolve_schrodinger from dwave.system import DWaveSampler, FixedEmbeddingComposite from dwave.system.testing import MockDWaveSampler +class Params(object): + def __init__(self): + self.GRAPH_NUMBER = 2 # this is the index of the graph to use; defined in /utils/game_graph_properties.py + # just a reminder which are which: + # 1 = 2 vertices + # 2 = 3 vertices in triangle + # 3 = 4 vertices, 6 edges + # 4 = 6 vertices, 1 hexagon, 6 edges + # 5 = 10 vertices, 15 edges, petersen graph + # 6 = 16 vertices, 32 edges, non-planar, tesseract + + self.EPSILON = 0.5 # this is the boundary between a draw and a win + + self.NUM_READS_SA = 1000 # this is for simulated annealing + + # These are parameters related to the use of QC hardware, if you're not using QC you can just leave these + # The defaults here are no shimming, no gauge transforms, only use M=1 automorphism, and collect a lot of + # samples (N=1000) + + self.USE_QC = True # set to False if you just want to use e.g. simulated annealer + self.USE_MOCK_DWAVE_SAMPLER = True # set to True if you want a software version of the hardware (doesn't sample like the HW tho so don't trust it, just for debugging) + self.QC_SOLVER_TO_USE = 'Advantage2_prototype2.6' # modify if you want to use a different QC + + self.NUMBER_OF_CHIP_RUNS = 1 # this is M + self.NUM_READS_QC = 1000 # this is N + self.ANNEAL_TIME_IN_NS = 5 # this is the fastest the QC can sweep + + self.USE_GAUGE_TRANSFORM = False + self.USE_SHIM = False + + self.ALPHA_PHI = 0.00001 + self.SHIM_ITERATIONS = 10 + + +class MinimalAdjudicationParameters(object): + def __init__(self): + self.EPSILON = 0.5 # this is the boundary between a draw and a win + self.USE_QC = False + self.NUM_READS_SA = 1000 # this is for simulated annealing + + class old_Adjudicator(object): def __init__(self, params): self.params = params diff --git a/tangled_adjudicate/utils/parameters.py b/tangled_adjudicate/utils/parameters.py deleted file mode 100644 index 2361743..0000000 --- a/tangled_adjudicate/utils/parameters.py +++ /dev/null @@ -1,41 +0,0 @@ -""" adjudication and support parameters """ - -class Params(object): - def __init__(self): - self.GRAPH_NUMBER = 2 # this is the index of the graph to use; defined in /utils/game_graph_properties.py - # just a reminder which are which: - # 1 = 2 vertices - # 2 = 3 vertices in triangle - # 3 = 4 vertices, 6 edges - # 4 = 6 vertices, 1 hexagon, 6 edges - # 5 = 10 vertices, 15 edges, petersen graph - # 6 = 16 vertices, 32 edges, non-planar, tesseract - - self.EPSILON = 0.5 # this is the boundary between a draw and a win - - self.NUM_READS_SA = 1000 # this is for simulated annealing - - # These are parameters related to the use of QC hardware, if you're not using QC you can just leave these - # The defaults here are no shimming, no gauge transforms, only use M=1 automorphism, and collect a lot of - # samples (N=1000) - - self.USE_QC = True # set to False if you just want to use e.g. simulated annealer - self.USE_MOCK_DWAVE_SAMPLER = True # set to True if you want a software version of the hardware (doesn't sample like the HW tho so don't trust it, just for debugging) - self.QC_SOLVER_TO_USE = 'Advantage2_prototype2.6' # modify if you want to use a different QC - - self.NUMBER_OF_CHIP_RUNS = 1 # this is M - self.NUM_READS_QC = 1000 # this is N - self.ANNEAL_TIME_IN_NS = 5 # this is the fastest the QC can sweep - - self.USE_GAUGE_TRANSFORM = False - self.USE_SHIM = False - - self.ALPHA_PHI = 0.00001 - self.SHIM_ITERATIONS = 10 - - -class MinimalAdjudicationParameters(object): - def __init__(self): - self.EPSILON = 0.5 # this is the boundary between a draw and a win - self.USE_QC = False - self.NUM_READS_SA = 1000 # this is for simulated annealing From 71bb9af67fb6b7277369c94a0130584689d3415a Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Sat, 1 Feb 2025 10:12:39 -0800 Subject: [PATCH 28/40] changed file name to indicate deprecation --- .../adjudicators/{adjudicate.py => deprecated_adjudicate.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tangled_adjudicate/adjudicators/{adjudicate.py => deprecated_adjudicate.py} (100%) diff --git a/tangled_adjudicate/adjudicators/adjudicate.py b/tangled_adjudicate/adjudicators/deprecated_adjudicate.py similarity index 100% rename from tangled_adjudicate/adjudicators/adjudicate.py rename to tangled_adjudicate/adjudicators/deprecated_adjudicate.py From 9acd462cc1ebace07e687f7eb63b38302b983fea Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Sat, 1 Feb 2025 10:16:40 -0800 Subject: [PATCH 29/40] removed cruft --- .../adjudicators/deprecated_adjudicate.py | 426 ------------------ tangled_adjudicate/utils/utilities.py | 46 -- 2 files changed, 472 deletions(-) delete mode 100644 tangled_adjudicate/adjudicators/deprecated_adjudicate.py diff --git a/tangled_adjudicate/adjudicators/deprecated_adjudicate.py b/tangled_adjudicate/adjudicators/deprecated_adjudicate.py deleted file mode 100644 index 8ec6160..0000000 --- a/tangled_adjudicate/adjudicators/deprecated_adjudicate.py +++ /dev/null @@ -1,426 +0,0 @@ -""" old style -- moved Params and MinimalAdjudicationParameters here -- to be deprecated""" -import sys -import os -import random -import pickle -import neal -import numpy as np - -from tangled_adjudicate.utils.utilities import (game_state_to_ising_model, game_state_is_terminal, - find_isolated_vertices, get_tso, build_results_dict, - convert_erik_game_state_to_my_game_state) -from tangled_adjudicate.utils.find_graph_automorphisms import get_automorphisms -from tangled_adjudicate.utils.find_hardware_embeddings import get_embeddings -from tangled_adjudicate.schrodinger.schrodinger_functions import evolve_schrodinger - -from dwave.system import DWaveSampler, FixedEmbeddingComposite -from dwave.system.testing import MockDWaveSampler - - -class Params(object): - def __init__(self): - self.GRAPH_NUMBER = 2 # this is the index of the graph to use; defined in /utils/game_graph_properties.py - # just a reminder which are which: - # 1 = 2 vertices - # 2 = 3 vertices in triangle - # 3 = 4 vertices, 6 edges - # 4 = 6 vertices, 1 hexagon, 6 edges - # 5 = 10 vertices, 15 edges, petersen graph - # 6 = 16 vertices, 32 edges, non-planar, tesseract - - self.EPSILON = 0.5 # this is the boundary between a draw and a win - - self.NUM_READS_SA = 1000 # this is for simulated annealing - - # These are parameters related to the use of QC hardware, if you're not using QC you can just leave these - # The defaults here are no shimming, no gauge transforms, only use M=1 automorphism, and collect a lot of - # samples (N=1000) - - self.USE_QC = True # set to False if you just want to use e.g. simulated annealer - self.USE_MOCK_DWAVE_SAMPLER = True # set to True if you want a software version of the hardware (doesn't sample like the HW tho so don't trust it, just for debugging) - self.QC_SOLVER_TO_USE = 'Advantage2_prototype2.6' # modify if you want to use a different QC - - self.NUMBER_OF_CHIP_RUNS = 1 # this is M - self.NUM_READS_QC = 1000 # this is N - self.ANNEAL_TIME_IN_NS = 5 # this is the fastest the QC can sweep - - self.USE_GAUGE_TRANSFORM = False - self.USE_SHIM = False - - self.ALPHA_PHI = 0.00001 - self.SHIM_ITERATIONS = 10 - - -class MinimalAdjudicationParameters(object): - def __init__(self): - self.EPSILON = 0.5 # this is the boundary between a draw and a win - self.USE_QC = False - self.NUM_READS_SA = 1000 # this is for simulated annealing - - -class old_Adjudicator(object): - def __init__(self, params): - self.params = params - self.results_dict = None - self.data_dir = os.path.join(os.getcwd(), '..', 'data') - if self.params.USE_QC: # if using QC, get embeddings and automorphisms - self.automorphisms = get_automorphisms(self.params.GRAPH_NUMBER, self.data_dir) - self.embeddings = get_embeddings(self.params.GRAPH_NUMBER, self.params.QC_SOLVER_TO_USE, self.data_dir) - - def compute_winner_score_and_influence_from_correlation_matrix(self, game_state, correlation_matrix): - # correlation_matrix is assumed to be symmetric matrix with zeros on diagonal (so that self-correlation of - # one is not counted) -- this is the standard for computing influence vector - # - # returns: - # winner: if game_state is terminal, string -- one of 'red' (player 1), 'blue' (player 2), 'draw' - # if game_state not terminal, returns None - # score: if game_state is terminal, returns a real number which is the score of the game (difference - # between two players' influences obtained from the influence vector) - # if game_state not terminal, returns None - # influence_vector: a vector of real numbers of length == number of vertices; this stores each vertex's - # influence, which is the sum over all elements of the correlation matrix it is part of - - influence_vector = np.sum(correlation_matrix, axis=0) - - if game_state_is_terminal(game_state): - score = influence_vector[game_state['player1_node']] - influence_vector[game_state['player2_node']] - - if score > self.params.EPSILON: # more positive than epsilon, red wins - winner = 'red' - else: - if score < -self.params.EPSILON: - winner = 'blue' - else: - winner = 'draw' - else: - score = None - winner = None - - return winner, score, influence_vector - - # all four solver functions input game_state, e.g.: - # - # game_state = {'num_nodes': 6, 'edges': [(0, 1, 1), (0, 2, 1), (0, 3, 2), (0, 4, 3), (0, 5, 2), (1, 2, 1), - # (1, 3, 2), (1, 4, 3), (1, 5, 3), (2, 3, 1), (2, 4, 2), (2, 5, 3), (3, 4, 2), (3, 5, 1), (4, 5, 2)], - # 'player1_id': 'player1', 'player2_id': 'player2', 'turn_count': 17, 'current_player_index': 1, - # 'player1_node': 1, 'player2_node': 3} - # - # and return a dictionary that contains the following keys: - # - # 'game_state': a copy of the input game_state dictionary - # 'adjudicator': a string, one of 'simulated_annealing', 'quantum_annealing', 'schrodinger_equation' - # 'winner': if both players have chosen vertices, a string, one of 'red', 'blue', 'draw', otherwise None - # 'score': if both players have chosen vertices, the difference in influence scores as a real number, otherwise None - # 'influence_vector': a vector of real numbers of length vertex_count (one real number per vertex in the game graph) - # 'correlation_matrix': symmetric real-valued matrix of spin-spin correlations with zeros on diagonals - # 'parameters': a copy of the parameters dictionary - - def simulated_annealing(self, game_state): - - h, jay = game_state_to_ising_model(game_state) - sampler = neal.SimulatedAnnealingSampler() - - # Approx match: (1) mean energy and (2) rate of local excitations for square-lattice high precision spin glass - # at 5ns (Advantage2 prototype 2.5) - - # Limits relaxation to local minima. Can vary by model/protocol/QPU. Assumes max(|J|) is scaled to 1. - beta_max = 3 - # Limits equilibration. Can vary by model/protocol/QPU - num_sweeps = 16 - beta_range = [1 / np.sqrt(np.sum([Jij ** 2 for Jij in jay.values()]) + 0.001), beta_max] # 0.001 for J==0 - seed = None # Choose seed=None if reproducibility is not desired - - # randomize_order=True implements standard symmetry-respecting Metropolis algorithm - ss = sampler.sample_ising(h, jay, beta_range=beta_range, num_reads=self.params.NUM_READS_SA, - num_sweeps=num_sweeps, randomize_order=True, seed=seed) - - samps = np.array(ss.record.sample, dtype=float) # casting may not be necessary. - - # creates symmetric matrix with zeros on diagonal (so that self-correlation of one is not counted) -- this is - # the standard for computing influence vector - correlation_matrix = (np.einsum('si,sj->ij', samps, samps) / self.params.NUM_READS_SA - - np.eye(int(game_state['num_nodes']))) - - winner, score_difference, influence_vector = ( - self.compute_winner_score_and_influence_from_correlation_matrix(game_state, correlation_matrix)) - - return_dictionary = {'game_state': game_state, 'adjudicator': 'simulated_annealing', - 'winner': winner, 'score': score_difference, 'influence_vector': influence_vector, - 'correlation_matrix': correlation_matrix, 'parameters': self.params} - - return return_dictionary - - def schrodinger_equation(self, game_state): - - h, jay = game_state_to_ising_model(game_state) - - s_min = 0.001 # beginning and ending anneal times - s_max = 0.999 - - correlation_matrix = ( - evolve_schrodinger(h, jay, s_min=s_min, s_max=s_max, tf=self.params.ANNEAL_TIME_IN_NS, - n_qubits=game_state['num_nodes'])) - # what's returned here is upper triangular with zeros on the diagonal, so we need to add the transpose - correlation_matrix = correlation_matrix + correlation_matrix.T - - winner, score_difference, influence_vector = ( - self.compute_winner_score_and_influence_from_correlation_matrix(game_state, correlation_matrix)) - - return_dictionary = {'game_state': game_state, 'adjudicator': 'schrodinger_equation', - 'winner': winner, 'score': score_difference, 'influence_vector': influence_vector, - 'correlation_matrix': correlation_matrix, 'parameters': self.params} - - return return_dictionary - - def quantum_annealing(self, game_state): - - num_vertices = game_state['num_nodes'] # e.g. 3 - num_embeddings = len(self.embeddings) # e.g. P=343 - total_samples = np.zeros((1, num_vertices)) # 0th layer to get vstack going, remove at the end - - shim_stats = None - all_samples = None - indices_of_flips = None - - if self.params.USE_MOCK_DWAVE_SAMPLER and self.params.USE_SHIM: - print('D-Wave mock sampler is not set up to use the shimming process, turn shim off if using mock!') - - sampler_kwargs = dict(num_reads=self.params.NUM_READS_QC, - answer_mode='raw') - - if self.params.USE_MOCK_DWAVE_SAMPLER: - base_sampler = MockDWaveSampler(topology_type='zephyr', topology_shape=[6, 4]) - else: - base_sampler = DWaveSampler(solver=self.params.QC_SOLVER_TO_USE) - sampler_kwargs.update({'fast_anneal': True, - 'annealing_time': self.params.ANNEAL_TIME_IN_NS / 1000}) - - if self.params.USE_SHIM: - shim_stats = {'qubit_magnetizations': [], - 'average_absolute_value_of_magnetization': [], - 'all_flux_bias_offsets': []} - sampler_kwargs.update({'readout_thermalization': 100., - 'auto_scale': False, - 'flux_drift_compensation': True, - 'flux_biases': [0] * base_sampler.properties['num_qubits']}) - shim_iterations = self.params.SHIM_ITERATIONS - else: - shim_iterations = 1 # if we don't shim, just run through shim step only once - - # ********************************************************** - # Step 0: convert game_state to the desired base Ising model - # ********************************************************** - - # for tangled, h_j=0 for all vertices j in the game graph, and J_ij is one of +1, -1, or 0 for all vertex - # pairs i,j. I named the "base" values (the actual problem defined on the game graph we are asked to solve) - # base_h (all zero) and base_jay (not all zero). - - base_h, base_jay = game_state_to_ising_model(game_state) - - # this finds any isolated vertices that may be in the graph -- we will replace the samples returned for these - # at the end with true 50/50 statistics, so we don't have to worry about them - - isolated_vertices = find_isolated_vertices(num_vertices, base_jay) - - # We now enter a loop where each pass through the loop programs the chip to specific values of h and J but - # now for the entire chip. We do this by first selecting one automorphism and embedding it in multiple - # parallel ways across the entire chip, and then optionally applying a gauge transform across all the qubits - # used. This latter process chooses different random gauges for each of the embedded instances. - - for _ in range(self.params.NUMBER_OF_CHIP_RUNS): - - # ******************************************************************* - # Step 1: Randomly select an automorphism and embed it multiple times - # ******************************************************************* - - automorphism = random.choice(self.automorphisms) # eg {0:0, 1:2, 2:1} - inverted_automorphism_to_use = {v: k for k, v in automorphism.items()} # swaps key <-> values - - permuted_embedding = [] - - for each_embedding in self.embeddings[:num_embeddings]: # each_embedding is like [1093, 1098, 136]; 343 of these for three-vertex graph - this_embedding = [] - for each_vertex in range(num_vertices): # each_vertex ranges from 0 to 2 - this_embedding.append(each_embedding[inverted_automorphism_to_use[each_vertex]]) - permuted_embedding.append(this_embedding) - - # given that permuted_embedding looks like [[1229, 1235, 563], [872, 242, 866], ...] - # this next part converts into the format {0: [1229], 1: [1235], 2: [563], 3: [872], 4: [242], 5: [866]} - - embedding_map = {} - - for embedding_idx in range(num_embeddings): - for each_vertex in range(num_vertices): # up to 0..1037 - embedding_map[num_vertices * embedding_idx + each_vertex] = \ - [permuted_embedding[embedding_idx][each_vertex]] - - # ***************************************************************************************************** - # Step 2: Set h, J parameters for full chip using parallel embeddings of a randomly chosen automorphism - # ***************************************************************************************************** - - # compute full_h and full_j which are h, jay values for the entire chip assuming the above automorphism - # I am calling the problem definition and variable ordering before the automorphism the BLACK or BASE - # situation. After the automorphism the problem definition and variable labels change -- I'm calling the - # situation after the automorphism has been applied the BLUE situation. - - full_h = {} - full_j = {} - - for embedding_idx in range(num_embeddings): - for each_vertex in range(num_vertices): - full_h[num_vertices * embedding_idx + each_vertex] = 0 - - for k, v in base_jay.items(): - edge_under_automorph = (min(automorphism[k[0]], automorphism[k[1]]), - max(automorphism[k[0]], automorphism[k[1]])) - full_j[edge_under_automorph] = v - for j in range(1, num_embeddings): - full_j[(edge_under_automorph[0] + num_vertices * j, - edge_under_automorph[1] + num_vertices * j)] = v - - # ************************************************************************** - # Step 3: Choose random gauge, modify h, J parameters for full chip using it - # ************************************************************************** - - # next we optionally apply a random gauge transformation. I call the situation after the gauge - # transformation has been applied the BLUE with RED STAR situation. - - if self.params.USE_GAUGE_TRANSFORM: - flip_map = [random.choice([-1, 1]) for _ in full_h] # random list of +1, -1 values of len # qubits - indices_of_flips = [i for i, x in enumerate(flip_map) if x == -1] # the indices of the -1 values - - for edge_key, j_val in full_j.items(): # for each edge and associated J value - full_j[edge_key] = j_val * flip_map[edge_key[0]] * flip_map[edge_key[1]] # Jij -> J_ij g_i g_j - - # ***************************************** - # Step 4: Choose sampler and its parameters - # ***************************************** - - sampler_kwargs.update({'h': full_h, - 'J': full_j}) - - sampler = FixedEmbeddingComposite(base_sampler, embedding=embedding_map) # applies the embedding - - # ************************************************************************* - # Step 5: Optionally start shimming process in the BLUE with RED STAR basis - # ************************************************************************* - - # all of this in the BLUE with RED STAR basis, ie post automorph, post gauge transform - for shim_iteration_idx in range(shim_iterations): - - # ************************************** - # Step 6: Generate samples from hardware - # ************************************** - - ss = sampler.sample_ising(**sampler_kwargs) - all_samples = ss.record.sample - - if self.params.USE_SHIM: - - # ************************************************************* - # Step 6a: Compute average values of each qubit == magnetization - # ************************************************************* - - magnetization = np.sum(all_samples, axis=0)/self.params.NUM_READS_QC # BLUE with RED STAR label ordering - shim_stats['average_absolute_value_of_magnetization'].append(np.sum([abs(k) for k in magnetization])/len(magnetization)) - - qubit_magnetization = [0] * base_sampler.properties['num_qubits'] - for k, v in embedding_map.items(): - qubit_magnetization[v[0]] = magnetization[k] # check - - shim_stats['qubit_magnetizations'].append(qubit_magnetization) - - # ************************************** - # Step 6b: Adjust flux bias offset terms - # ************************************** - - for k in range(base_sampler.properties['num_qubits']): - sampler_kwargs['flux_biases'][k] -= self.params.ALPHA_PHI * qubit_magnetization[k] - - shim_stats['all_flux_bias_offsets'].append(sampler_kwargs['flux_biases']) - - # ***************************************************************************************************** - # Step 7: Reverse gauge transform, from BLUE with RED STAR to just BLUE, after shimming process is done - # ***************************************************************************************************** - - if self.params.USE_GAUGE_TRANSFORM: - all_samples[:, indices_of_flips] = -all_samples[:, indices_of_flips] - - # *********************************** - # Step 8: Stack samples in BLUE order - # *********************************** - - # this should make a big fat stack of the results in BLUE variable ordering - all_samples_processed_blue = all_samples[:, range(num_vertices)] - for k in range(1, num_embeddings): - all_samples_processed_blue = np.vstack((all_samples_processed_blue, - all_samples[:, range(num_vertices * k, - num_vertices * (k + 1))])) - - # ********************************************************************** - # Step 9: Reorder columns to make them BLACK order instead of BLUE order - # ********************************************************************** - - all_samples_processed_black = all_samples_processed_blue[:, [automorphism[i] for i in range(all_samples_processed_blue.shape[1])]] - - # ********************************************************* - # Step 10: Add new samples to the stack, all in BLACK order - # ********************************************************* - - total_samples = np.vstack((total_samples, all_samples_processed_black)) - - # *************************************************************** - # Step 11: Post process samples stack to extract return variables - # *************************************************************** - - total_samples = np.delete(total_samples, (0), axis=0) # delete first row of zeros - - # replace columns where there are disconnected variables with truly random samples - for idx in isolated_vertices: - total_samples[:, idx] = np.random.choice([1, -1], size=total_samples.shape[0]) - - sample_count = self.params.NUM_READS_QC * num_embeddings * self.params.NUMBER_OF_CHIP_RUNS - - # this is a full matrix with zeros on the diagonal that uses all the samples - correlation_matrix = \ - (np.einsum('si,sj->ij', total_samples, total_samples) / sample_count - - np.eye(int(game_state['num_nodes']))) - - winner, score_difference, influence_vector = ( - self.compute_winner_score_and_influence_from_correlation_matrix(game_state, correlation_matrix)) - - return_dictionary = {'game_state': game_state, 'adjudicator': 'quantum_annealing', - 'winner': winner, 'score': score_difference, 'influence_vector': influence_vector, - 'correlation_matrix': correlation_matrix, 'parameters': self.params} - - return return_dictionary - - def lookup_table(self, game_state): - - if self.results_dict is None: - # If using graphs 2 or 3, you can use precomputed terminal state adjudications (faster for testing) - # str(game_state['num_nodes'] - 1) is a hack -- num_nodes=3 is graph 2 and num_nodes=4 is graph 3 - # as long as both are complete graphs - graph_number = game_state['num_nodes'] - 1 - - if graph_number not in [2, 3]: - sys.exit(print('lookup table only enabled for complete graphs on 3 and 4 vertices.')) - - script_dir = os.path.dirname(os.path.abspath(__file__)) # Get the directory of the current script - - file_path = os.path.join(script_dir, '..', 'data', - 'graph_' + str(graph_number) + '_terminal_state_outcomes.pkl') - if not os.path.exists(file_path): - get_tso(graph_number, file_path) - with open(file_path, 'rb') as fp: - results = pickle.load(fp) - self.results_dict = build_results_dict(results) - - my_state = convert_erik_game_state_to_my_game_state(game_state) - winner = self.results_dict[str(my_state)] - - return_dictionary = {'game_state': game_state, 'adjudicator': 'lookup_table', - 'winner': winner, 'score': None, 'influence_vector': None, - 'correlation_matrix': None, 'parameters': self.params} - - return return_dictionary diff --git a/tangled_adjudicate/utils/utilities.py b/tangled_adjudicate/utils/utilities.py index 8a219ca..0a51fd1 100644 --- a/tangled_adjudicate/utils/utilities.py +++ b/tangled_adjudicate/utils/utilities.py @@ -3,52 +3,6 @@ import networkx as nx -def game_state_to_ising_model(game_state): - # maps edge state to J value 0, 1 => J = 0; 2 => J = -1 FM; 3 => J = +1 AFM - edge_state_map = {0: 0, 1: 0, 2: -1, 3: 1} - - vertex_count = game_state['num_nodes'] - edge_list = [(each[0], each[1]) for each in game_state['edges']] - - h = {} - jay = {} - - for k in range(vertex_count): - h[k] = 0 - - for k in range(len(edge_list)): - jay[edge_list[k]] = edge_state_map[game_state['edges'][k][2]] - - return h, jay - - -def game_state_is_terminal(game_state): - # a state is terminal if both players have chosen vertices and all edges have been played - # game_state = {'num_nodes': 6, 'edges': [(0, 1, 1), (0, 2, 1), (0, 3, 2), (0, 4, 3), (0, 5, 2), (1, 2, 1), - # (1, 3, 2), (1, 4, 3), (1, 5, 3), (2, 3, 1), (2, 4, 2), (2, 5, 3), (3, 4, 2), (3, 5, 1), (4, 5, 2)], - # 'player1_id': 'player1', 'player2_id': 'player2', 'turn_count': 17, 'current_player_index': 1, - # 'player1_node': 1, 'player2_node': 3} - - edge_states = [each[2] for each in game_state['edges']] - - if edge_states.count(0) == 0 and game_state['player1_node'] != -1 and game_state['player2_node'] != -1: - return True - else: - return False - - -def find_isolated_vertices(n_var, base_jay): - # returns a list of isolated / disconnected vertices if there are any; returns empty list if not - my_graph = nx.Graph() - my_graph.add_nodes_from([k for k in range(n_var)]) - my_graph.add_edges_from([k for k, v in base_jay.items() if v != 0]) - - # Find isolated vertices (vertices with no edges) - isolated_vertices = list(nx.isolates(my_graph)) - - return isolated_vertices - - def get_tso(graph_number, file_path): # get terminal state outcomes if graph_number == 2: From 90187d602c9ba72348bf67fcffefc8c20c90deec Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Sat, 1 Feb 2025 10:18:10 -0800 Subject: [PATCH 30/40] PEP --- tangled_adjudicate/utils/utilities.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tangled_adjudicate/utils/utilities.py b/tangled_adjudicate/utils/utilities.py index 0a51fd1..2f461e3 100644 --- a/tangled_adjudicate/utils/utilities.py +++ b/tangled_adjudicate/utils/utilities.py @@ -1,6 +1,5 @@ """ a place to put utility functions """ import gdown -import networkx as nx def get_tso(graph_number, file_path): @@ -40,7 +39,7 @@ def convert_erik_game_state_to_my_game_state(game_state): def convert_my_game_state_to_erik_game_state(my_state, number_of_vertices, list_of_edge_tuples): - + # extract erik state from geordie state my_vertices = my_state[:number_of_vertices] my_edges = my_state[number_of_vertices:] @@ -66,10 +65,12 @@ def convert_my_game_state_to_erik_game_state(my_state, number_of_vertices, list_ erik_edges = [(list_of_edge_tuples[k][0], list_of_edge_tuples[k][1], my_edges[k]) for k in range(len(my_edges))] game_state = {'num_nodes': number_of_vertices, - # 'edges': [(0, 1, 3), (0, 2, 1), (0, 3, 3), (1, 2, 1), (1, 3, 3), (2, 3, 1)], 'edges': erik_edges, - 'player1_id': 'player1', 'player2_id': 'player2', 'turn_count': turn_count, - 'current_player_index': current_player_idx, 'player1_node': player_1_vertex, + 'player1_id': 'player1', + 'player2_id': 'player2', + 'turn_count': turn_count, + 'current_player_index': current_player_idx, + 'player1_node': player_1_vertex, 'player2_node': player_2_vertex} return game_state From 537c80a9976b6d01e50c5ee4cad7ffd0f5c3dce5 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Sat, 1 Feb 2025 10:20:48 -0800 Subject: [PATCH 31/40] changed # graphs to 11 --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 99f2ef0..d61ac2c 100644 --- a/README.md +++ b/README.md @@ -33,9 +33,9 @@ The full D-Wave setup instructions are [here](https://docs.ocean.dwavesys.com/en ## Tangled Game Graph Specification -A Tangled game graph is specified by a graph number, which label specific graphs included here. I've included ten graphs -numbered 1 through 10. Each graph requires specification of vertex count (how many vertices the graph has) and an -explicit edge list, which are included for these ten graphs. If you'd like to add a new graph, it's simple! Just add +A Tangled game graph is specified by a graph number, which label specific graphs included here. I've included eleven +graphs numbered 1 through 11. Each graph requires specification of vertex count (how many vertices the graph has) and +an explicit edge list, which are included for these 11 graphs. If you'd like to add a new graph, it's simple! Just add it to the GraphProperties class, found in the /utils/game_graph_properties.py file. ## Tangled Game State Specification: Expected Input Format For Adjudicators From 9336986a7d5578afb50b4e1d67c5279f742aab62 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Sun, 2 Feb 2025 13:45:25 -0800 Subject: [PATCH 32/40] num_reads to 10000 --- tangled_adjudicate/adjudicators/simulated_annealing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tangled_adjudicate/adjudicators/simulated_annealing.py b/tangled_adjudicate/adjudicators/simulated_annealing.py index e8bb234..e058330 100644 --- a/tangled_adjudicate/adjudicators/simulated_annealing.py +++ b/tangled_adjudicate/adjudicators/simulated_annealing.py @@ -11,7 +11,7 @@ class SimulatedAnnealingAdjudicator(Adjudicator): def __init__(self) -> None: """Initialize the adjudicator with default values.""" super().__init__() - self.num_reads: int = 1000 + self.num_reads: int = 10000 self.num_sweeps: int = 16 self.beta_max: float = 3.0 From b59f5a24cc76525f17208355a908c4e8dfad5a9d Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Sun, 2 Feb 2025 13:46:27 -0800 Subject: [PATCH 33/40] added graph 11 --- tangled_adjudicate/utils/game_graph_properties.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tangled_adjudicate/utils/game_graph_properties.py b/tangled_adjudicate/utils/game_graph_properties.py index 7968122..bafa1ee 100644 --- a/tangled_adjudicate/utils/game_graph_properties.py +++ b/tangled_adjudicate/utils/game_graph_properties.py @@ -2,8 +2,8 @@ import sys # A Tangled game graph is specified by a graph number, which label specific graphs included here. In this module there -# are 10 included graphs numbered 1 through 10. Each graph requires specification of vertex count (how many vertices -# the graph has) and an explicit edge list, which are included for these ten graphs. If you'd like to add a new graph, +# are 11 included graphs numbered 1 through 11. Each graph requires specification of vertex count (how many vertices +# the graph has) and an explicit edge list, which are included for these 11 graphs. If you'd like to add a new graph, # it's simple -- just add it to the GraphProperties class. @@ -157,6 +157,12 @@ def __init__(self, graph_number): (23, 33), (23, 41), (23, 35), (23, 43), (23, 37), (23, 45), (23, 39), (23, 47), (24, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35), (36, 37), (38, 39), (40, 41), (42, 43), (44, 45), (46, 47)] + elif graph_number == 11: + # minimal graph for testing; 3 vertices 2 edges + self.vertex_count = 3 + + self.edge_list = [(0, 1), (1, 2)] + else: print('Bad graph_number in GraphProperties initialization -- no graph corresponding to your choice exists.') @@ -166,7 +172,7 @@ def __init__(self, graph_number): def main(): # this is a debugging tool to make sure everything looks right! - for graph_number in range(1, 11): + for graph_number in range(1, 12): g = GraphProperties(graph_number=graph_number) print('****') print('graph', graph_number, 'has', g.vertex_count, 'vertices and', g.edge_count, 'edges.') From 6cbf399b6a62e7cae3f408aab0fcce864bb4532d Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Mon, 3 Feb 2025 08:18:06 -0800 Subject: [PATCH 34/40] moved sampler initialization into __init__ to make it a class variable --- .../adjudicators/simulated_annealing.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tangled_adjudicate/adjudicators/simulated_annealing.py b/tangled_adjudicate/adjudicators/simulated_annealing.py index e058330..c9fda3a 100644 --- a/tangled_adjudicate/adjudicators/simulated_annealing.py +++ b/tangled_adjudicate/adjudicators/simulated_annealing.py @@ -11,10 +11,11 @@ class SimulatedAnnealingAdjudicator(Adjudicator): def __init__(self) -> None: """Initialize the adjudicator with default values.""" super().__init__() - self.num_reads: int = 10000 + self.sampler = neal.SimulatedAnnealingSampler() + self.num_reads: int = 1000 self.num_sweeps: int = 16 self.beta_max: float = 3.0 - + def setup(self, **kwargs) -> None: """Configure the simulated annealing parameters. @@ -40,7 +41,7 @@ def setup(self, **kwargs) -> None: if not isinstance(kwargs['beta_max'], (int, float)) or kwargs['beta_max'] <= 0: raise ValueError("beta_max must be a positive number") self.beta_max = float(kwargs['beta_max']) - + self._parameters = { 'num_reads': self.num_reads, 'num_sweeps': self.num_sweeps, @@ -63,7 +64,7 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: # Convert game state to Ising model ising_model = self._game_state_to_ising(game_state) - sampler = neal.SimulatedAnnealingSampler() + # sampler = neal.SimulatedAnnealingSampler() # Calculate beta range based on coupling strengths beta_range = [ @@ -72,7 +73,7 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: ] # Perform simulated annealing - response = sampler.sample_ising( + response = self.sampler.sample_ising( ising_model['h'], ising_model['j'], beta_range=beta_range, From 195909359c325d0efa5371960c964c882482c29a Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Thu, 6 Feb 2025 10:45:15 -0800 Subject: [PATCH 35/40] changed default num_reads in SA to 10000 --- tangled_adjudicate/adjudicators/simulated_annealing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tangled_adjudicate/adjudicators/simulated_annealing.py b/tangled_adjudicate/adjudicators/simulated_annealing.py index c9fda3a..49eaf85 100644 --- a/tangled_adjudicate/adjudicators/simulated_annealing.py +++ b/tangled_adjudicate/adjudicators/simulated_annealing.py @@ -12,7 +12,7 @@ def __init__(self) -> None: """Initialize the adjudicator with default values.""" super().__init__() self.sampler = neal.SimulatedAnnealingSampler() - self.num_reads: int = 1000 + self.num_reads: int = 10000 self.num_sweeps: int = 16 self.beta_max: float = 3.0 From 139e1530653e6d6da96346b0ae7492dfe4bc860a Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Sat, 8 Feb 2025 11:37:12 -0800 Subject: [PATCH 36/40] added moser spindle as graph 12 --- .../utils/game_graph_properties.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/tangled_adjudicate/utils/game_graph_properties.py b/tangled_adjudicate/utils/game_graph_properties.py index bafa1ee..f68d819 100644 --- a/tangled_adjudicate/utils/game_graph_properties.py +++ b/tangled_adjudicate/utils/game_graph_properties.py @@ -2,15 +2,15 @@ import sys # A Tangled game graph is specified by a graph number, which label specific graphs included here. In this module there -# are 11 included graphs numbered 1 through 11. Each graph requires specification of vertex count (how many vertices +# are 12 included graphs numbered 1 through 12. Each graph requires specification of vertex count (how many vertices # the graph has) and an explicit edge list, which are included for these 11 graphs. If you'd like to add a new graph, # it's simple -- just add it to the GraphProperties class. class GraphProperties(object): def __init__(self, graph_number): - # graph_number is an integer, currently in the range 1 to 10, that labels which graph we are using. - # to add a new graph, simply define a new graph_number (say 11) and provide its vertex_count and edge_list + # graph_number is an integer, currently in the range 1 to 12, that labels which graph we are using. + # to add a new graph, simply define a new graph_number (say 13) and provide its vertex_count and edge_list # following the pattern here. if graph_number == 1: @@ -163,6 +163,17 @@ def __init__(self, graph_number): self.edge_list = [(0, 1), (1, 2)] + elif graph_number == 12: + # moser spindle; smaller than petersen graph, only 8 automorphisms; 7 vertices, 11 edges + self.vertex_count = 7 + + self.edge_list = [(0, 1), (0, 2), (0, 3), + (1, 4), (1, 5), + (2, 4), (2, 6), + (3, 5), (3, 6), + (4, 6), + (5, 6)] + else: print('Bad graph_number in GraphProperties initialization -- no graph corresponding to your choice exists.') From 1af743a8c5748edffce27e28dcd57cb38aedde0e Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Mon, 17 Mar 2025 10:02:29 -0700 Subject: [PATCH 37/40] added RSG and snark graphs --- .../utils/game_graph_properties.py | 126 ++++++++++++++++-- 1 file changed, 115 insertions(+), 11 deletions(-) diff --git a/tangled_adjudicate/utils/game_graph_properties.py b/tangled_adjudicate/utils/game_graph_properties.py index f68d819..d0ec913 100644 --- a/tangled_adjudicate/utils/game_graph_properties.py +++ b/tangled_adjudicate/utils/game_graph_properties.py @@ -2,15 +2,15 @@ import sys # A Tangled game graph is specified by a graph number, which label specific graphs included here. In this module there -# are 12 included graphs numbered 1 through 12. Each graph requires specification of vertex count (how many vertices -# the graph has) and an explicit edge list, which are included for these 11 graphs. If you'd like to add a new graph, +# are 16 included graphs numbered 1 through 16. Each graph requires specification of vertex count (how many vertices +# the graph has) and an explicit edge list, which are included for these 16 graphs. If you'd like to add a new graph, # it's simple -- just add it to the GraphProperties class. class GraphProperties(object): def __init__(self, graph_number): - # graph_number is an integer, currently in the range 1 to 12, that labels which graph we are using. - # to add a new graph, simply define a new graph_number (say 13) and provide its vertex_count and edge_list + # graph_number is an integer, currently in the range 1 to 16, that labels which graph we are using. + # to add a new graph, simply define a new graph_number (say 17) and provide its vertex_count and edge_list # following the pattern here. if graph_number == 1: @@ -167,12 +167,116 @@ def __init__(self, graph_number): # moser spindle; smaller than petersen graph, only 8 automorphisms; 7 vertices, 11 edges self.vertex_count = 7 - self.edge_list = [(0, 1), (0, 2), (0, 3), - (1, 4), (1, 5), - (2, 4), (2, 6), - (3, 5), (3, 6), - (4, 6), - (5, 6)] + self.edge_list = [(0, 1), (0, 4), (0, 6), + (1, 2), (1, 5), + (2, 3), (2, 5), + (3, 4), (3, 5), + (3, 6), + (4, 6)] + + elif graph_number == 13: + # second Blanusa snark, 4 automorphisms, mirror symmetric; leftmost = 0, rightmost = 1; 18 vertices, 27 edges + + self.vertex_count = 18 + + self.edge_list = [(0, 2), (0, 4), (0, 16), + (1, 3), (1, 5), (1, 17), + (2, 3), (2, 6), + (3, 8), + (4, 5), (4, 9), + (5, 11), + (6, 9), (6, 14), + (7, 10), (7, 12), (7, 13), + (8, 11), (8, 15), + (9, 12), + (10, 14), (10, 15), + (11, 13), + (12, 16), + (13, 17), + (14, 16), + (15, 17)] + + elif graph_number == 14: + # first Loupekine snark, 8 automorphisms, mirror symmetric; leftmost = 15, rightmost = 12; 22 vertices, 33 edges + + self.vertex_count = 22 + + self.edge_list = [(0,1), (0,2), (0,9), # original 1<->2, 1<->3, 1<->10 + (1,3), (1,20), # original 2<->4, 2<->21 + (2,5), (2,6), # original 3<->6, 3<->7 + (3,4), (3,6), # original 4<->5, 4<->7 + (4,5), (4,7), # original 5<->6, 5<->8 + (5,16), # original 6<->17 + (6,10), # original 7<->11 + (7,12), (7,14), # original 8<->13, 8<->15 + (8,9), (8,11), (8,14), # original 9<->10, 9<->12, 9<->15 + (9,13), # original 10<->14 + (10,11), (10,15), # original 11<->12, 11<->16 + (11,12), # original 12<->13 + (12,13), # original 13<->14 + (13,21), # original 14<->22 + (14,17), # original 15<->18 + (15,18), (15,19), # original 16<->19, 16<->20 + (16,17), (16,19), # original 17<->18, 17<->20 + (17,18), # original 18<->19 + (18,20), # original 19<->21 + (19,21), # original 20<->22 + (20,21)] # original 21<->22 + + elif graph_number == 15: + # Szekeres snark; 50 vertices, 75 edges; vertex 1 left, vertex 4 right + + self.vertex_count = 50 + + self.edge_list = [(0, 6), (0, 9), (0, 12), (1, 15), (1, 18), (1, 21), (2, 24), (2, 27), (2, 30), (3, 33), (3, 36), (3, 39), (4, 42), (4, 45), (4, 48), (5, 6), (5, 10), (5, 22), (6, 7), (7, 8), (7, 38), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12), (11, 25), (12, 13), (13, 41), (14, 15), (14, 19), (14, 31), (15, 16), (16, 17), (16, 47), (17, 18), (17, 22), (18, 19), (19, 20), (20, 21), (20, 34), (21, 22), (23, 24), (23, 28), (23, 40), (24, 25), (25, 26), (26, 27), (26, 31), (27, 28), (28, 29), (29, 30), (29, 43), (30, 31), (32, 33), (32, 37), (32, 49), (33, 34), (34, 35), (35, 36), (35, 40), (36, 37), (37, 38), (38, 39), (39, 40), (41, 42), (41, 46), (42, 43), (43, 44), (44, 45), (44, 49), (45, 46), (46, 47), (47, 48), (48, 49)] + + elif graph_number == 16: + # Descartes snark; 210 vertices, 315 edges; vertex 1 left, vertex 4 right + + self.vertex_count = 210 + + self.edge_list = [(0, 18), (0, 33), (0, 38), (1, 2), (1, 18), (1, 22), (2, 3), (2, 8), (3, 4), (3, 7), + (4, 12), (4, 30), (5, 6), (5, 8), (5, 13), (6, 7), (6, 11), (7, 10), (8, 9), (9, 10), + (9, 11), (10, 14), (11, 12), (12, 15), (13, 14), (13, 209), (14, 15), (15, 193), + (16, 20), (16, 21), (16, 38), (17, 18), (17, 19), (17, 20), (19, 21), (19, 37), + (20, 73), (21, 22), (22, 74), (23, 25), (23, 26), (23, 34), (24, 26), (24, 29), + (24, 31), (25, 29), (25, 39), (26, 41), (27, 28), (27, 36), (27, 44), (28, 35), + (28, 42), (29, 32), (30, 31), (30, 34), (31, 40), (32, 33), (32, 41), (33, 34), + (35, 39), (35, 41), (36, 45), (36, 105), (37, 38), (37, 72), (39, 40), (40, 99), + (42, 43), (42, 50), (43, 46), (43, 47), (44, 47), (44, 49), (45, 46), (45, 48), + (46, 49), (47, 48), (48, 52), (49, 51), (50, 51), (50, 58), (51, 52), (52, 53), + (53, 54), (53, 88), (54, 55), (54, 84), (55, 56), (55, 91), (56, 57), (56, 66), + (57, 58), (57, 63), (58, 62), (59, 60), (59, 62), (59, 181), (60, 63), (60, 65), + (61, 62), (61, 64), (61, 65), (63, 64), (64, 180), (65, 66), (66, 179), (67, 68), + (67, 75), (67, 151), (68, 69), (68, 148), (69, 70), (69, 83), (70, 71), (70, 81), + (71, 72), (71, 76), (72, 73), (73, 74), (74, 75), (75, 155), (76, 77), (76, 79), + (77, 78), (77, 120), (78, 81), (78, 82), (79, 80), (79, 82), (80, 81), (80, 121), + (82, 83), (83, 122), (84, 85), (84, 86), (85, 89), (85, 171), (86, 87), (86, 90), + (87, 88), (87, 170), (88, 89), (89, 90), (90, 91), (91, 172), (92, 94), (92, 99), + (92, 105), (93, 95), (93, 96), (93, 100), (94, 96), (94, 97), (95, 97), (95, 104), + (96, 103), (97, 98), (98, 101), (98, 102), (99, 100), (100, 101), (101, 135), + (102, 103), (102, 114), (103, 104), (104, 105), (106, 107), (106, 114), (106, 119), + (107, 110), (107, 111), (108, 109), (108, 111), (108, 115), (109, 110), (109, 118), + (110, 113), (111, 112), (112, 113), (112, 117), (113, 116), (114, 115), (115, 116), + (116, 133), (117, 118), (117, 123), (118, 119), (119, 120), (120, 121), (121, 122), + (122, 125), (123, 124), (123, 147), (124, 125), (124, 142), (125, 140), (126, 127), + (126, 132), (126, 133), (127, 128), (127, 129), (128, 131), (128, 136), (129, 130), + (129, 138), (130, 131), (130, 134), (131, 132), (132, 139), (133, 134), (134, 135), + (135, 136), (136, 137), (137, 138), (137, 192), (138, 139), (139, 164), (140, 141), + (140, 178), (141, 143), (141, 144), (142, 144), (142, 145), (143, 145), (143, 147), + (144, 146), (145, 177), (146, 147), (146, 176), (148, 149), (148, 173), (149, 152), + (149, 153), (150, 151), (150, 152), (150, 174), (151, 153), (152, 155), (153, 154), + (154, 155), (154, 175), (156, 159), (156, 160), (156, 163), (157, 158), (157, 159), + (157, 165), (158, 160), (158, 168), (159, 162), (160, 161), (161, 162), (161, 166), + (162, 167), (163, 164), (163, 169), (164, 165), (165, 166), (166, 190), (167, 168), + (167, 175), (168, 169), (169, 170), (170, 171), (171, 172), (172, 173), (173, 174), + (174, 175), (176, 177), (176, 208), (177, 178), (178, 179), (179, 180), (180, 181), + (181, 198), (182, 184), (182, 186), (182, 189), (183, 186), (183, 187), (183, 191), + (184, 185), (184, 187), (185, 192), (185, 193), (186, 194), (187, 188), (188, 189), + (188, 195), (189, 190), (190, 191), (191, 192), (193, 194), (194, 195), (195, 206), + (196, 197), (196, 198), (196, 208), (197, 201), (197, 203), (198, 200), (199, 200), + (199, 203), (199, 204), (200, 202), (201, 202), (201, 204), (202, 209), (203, 207), + (204, 205), (205, 206), (205, 208), (206, 207), (207, 209)] else: @@ -183,7 +287,7 @@ def __init__(self, graph_number): def main(): # this is a debugging tool to make sure everything looks right! - for graph_number in range(1, 12): + for graph_number in range(1, 17): g = GraphProperties(graph_number=graph_number) print('****') print('graph', graph_number, 'has', g.vertex_count, 'vertices and', g.edge_count, 'edges.') From 4fd8b3d7ec97434f48570ddff68eb12cfccccdfd Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Mon, 24 Mar 2025 09:42:11 -0700 Subject: [PATCH 38/40] fixed output num_reads --- tangled_adjudicate/adjudicators/simulated_annealing.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tangled_adjudicate/adjudicators/simulated_annealing.py b/tangled_adjudicate/adjudicators/simulated_annealing.py index 49eaf85..28d5c9c 100644 --- a/tangled_adjudicate/adjudicators/simulated_annealing.py +++ b/tangled_adjudicate/adjudicators/simulated_annealing.py @@ -12,7 +12,7 @@ def __init__(self) -> None: """Initialize the adjudicator with default values.""" super().__init__() self.sampler = neal.SimulatedAnnealingSampler() - self.num_reads: int = 10000 + self.num_reads: int = 100000 self.num_sweeps: int = 16 self.beta_max: float = 3.0 @@ -61,7 +61,9 @@ def adjudicate(self, game_state: GameState) -> AdjudicationResult: ValueError: If the game state is invalid """ self._validate_game_state(game_state) - + # this is just so that the data structure returned stores correct number, as this could have been changed + self._parameters['num_reads'] = self.num_reads + # Convert game state to Ising model ising_model = self._game_state_to_ising(game_state) # sampler = neal.SimulatedAnnealingSampler() From d39a486fc01c4919e308db43a326f969c45bfdf7 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Mon, 24 Mar 2025 09:43:10 -0700 Subject: [PATCH 39/40] added conversion of input state from string in convert_my_game_state_to_erik_game_state --- tangled_adjudicate/utils/utilities.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tangled_adjudicate/utils/utilities.py b/tangled_adjudicate/utils/utilities.py index 2f461e3..4874ebc 100644 --- a/tangled_adjudicate/utils/utilities.py +++ b/tangled_adjudicate/utils/utilities.py @@ -1,4 +1,5 @@ """ a place to put utility functions """ +import ast import gdown @@ -40,6 +41,9 @@ def convert_erik_game_state_to_my_game_state(game_state): def convert_my_game_state_to_erik_game_state(my_state, number_of_vertices, list_of_edge_tuples): # extract erik state from geordie state + if isinstance(my_state, str): + my_state = ast.literal_eval(my_state) + my_vertices = my_state[:number_of_vertices] my_edges = my_state[number_of_vertices:] From 5514b50c118f172b5892fa401f96b4f4e70fbbc4 Mon Sep 17 00:00:00 2001 From: Geordie Rose Date: Mon, 24 Mar 2025 09:44:10 -0700 Subject: [PATCH 40/40] added graphs 17 and 18 (cube and 3-prism) --- .../utils/game_graph_properties.py | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tangled_adjudicate/utils/game_graph_properties.py b/tangled_adjudicate/utils/game_graph_properties.py index d0ec913..f03ce06 100644 --- a/tangled_adjudicate/utils/game_graph_properties.py +++ b/tangled_adjudicate/utils/game_graph_properties.py @@ -278,6 +278,30 @@ def __init__(self, graph_number): (199, 203), (199, 204), (200, 202), (201, 202), (201, 204), (202, 209), (203, 207), (204, 205), (205, 206), (205, 208), (206, 207), (207, 209)] + elif graph_number == 17: + # cube graph; 8 vertices, 12 edges + + self.vertex_count = 8 + + self.edge_list = [(0, 1), (0, 2), (0, 4), + (1, 3), (1, 5), + (2, 3), (2, 6), + (3, 7), + (4, 5), (4, 6), + (5, 7), + (6, 7)] + + elif graph_number == 18: + # 3-prism graph; 6 vertices, 9 edges + + self.vertex_count = 6 + + self.edge_list = [(0, 1), (0, 2), (0, 3), + (1, 2), (1, 4), + (2, 5), + (3, 4), (3, 5), + (4, 5)] + else: print('Bad graph_number in GraphProperties initialization -- no graph corresponding to your choice exists.')