diff --git a/.bumpversion.cfg b/.bumpversion.cfg new file mode 100644 index 0000000..1541736 --- /dev/null +++ b/.bumpversion.cfg @@ -0,0 +1,8 @@ +[bumpversion] +current_version = 0.0.1 +commit = False +tag = False + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" diff --git a/README.md b/README.md index 99f2ef0..d61ac2c 100644 --- a/README.md +++ b/README.md @@ -33,9 +33,9 @@ The full D-Wave setup instructions are [here](https://docs.ocean.dwavesys.com/en ## Tangled Game Graph Specification -A Tangled game graph is specified by a graph number, which label specific graphs included here. I've included ten graphs -numbered 1 through 10. Each graph requires specification of vertex count (how many vertices the graph has) and an -explicit edge list, which are included for these ten graphs. If you'd like to add a new graph, it's simple! Just add +A Tangled game graph is specified by a graph number, which label specific graphs included here. I've included eleven +graphs numbered 1 through 11. Each graph requires specification of vertex count (how many vertices the graph has) and +an explicit edge list, which are included for these 11 graphs. If you'd like to add a new graph, it's simple! Just add it to the GraphProperties class, found in the /utils/game_graph_properties.py file. ## Tangled Game State Specification: Expected Input Format For Adjudicators diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..a4b3e92 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,27 @@ +[tool.poetry] +name = "tangled-adjudicate" +version = "0.0.1" +description = "Tangled adjudicators" +authors = ["Geordie Rose "] +license = "MIT" +homepage = "https://www.snowdropquantum.com/" +packages = [ + { include = "tangled_adjudicate" }, + { include = "tests" }, +] + +[tool.poetry.dependencies] +python = "^3.8" # You may want to adjust this based on your needs +dwave-ocean-sdk = "*" +dwave-neal = ">=0.6.0" +matplotlib = "*" +gdown = "*" + +[tool.poetry.group.dev.dependencies] +# Add development dependencies here if needed +# pytest = "^7.0.0" + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" + diff --git a/setup.py b/setup.py deleted file mode 100644 index c32a1d1..0000000 --- a/setup.py +++ /dev/null @@ -1,13 +0,0 @@ -from setuptools import setup - -setup( - name='tangled-adjudicate', - version='0.0.1', - packages=['tests', 'tangled_adjudicate', 'tangled_adjudicate.utils', 'tangled_adjudicate.schrodinger', 'tangled_adjudicate.adjudicators'], - url='https://www.snowdropquantum.com/', - license='MIT', - author='Geordie Rose', - author_email='geordie@snowdropquantum.com', - description='Tangled adjudicators', - install_requires=['dwave-ocean-sdk', 'matplotlib', 'gdown'] -) diff --git a/tangled_adjudicate/__init__.py b/tangled_adjudicate/__init__.py index e69de29..2fea5db 100644 --- a/tangled_adjudicate/__init__.py +++ b/tangled_adjudicate/__init__.py @@ -0,0 +1,5 @@ +from .adjudicators.adjudicator import Adjudicator, GameState, AdjudicationResult +from .adjudicators.lookup_table import LookupTableAdjudicator +from .adjudicators.schrodinger import SchrodingerEquationAdjudicator +from .adjudicators.simulated_annealing import SimulatedAnnealingAdjudicator +from .adjudicators.quantum_annealing import QuantumAnnealingAdjudicator diff --git a/tangled_adjudicate/adjudicators/adjudicate.py b/tangled_adjudicate/adjudicators/adjudicate.py deleted file mode 100644 index e90fa53..0000000 --- a/tangled_adjudicate/adjudicators/adjudicate.py +++ /dev/null @@ -1,386 +0,0 @@ -""" Adjudicator class for Tangled game states using Schrödinger Equation, Simulated Annealing, D-Wave hardware, and -Look Up table """ -import sys -import os -import random -import pickle -import neal -import numpy as np - -from tangled_adjudicate.utils.utilities import (game_state_to_ising_model, game_state_is_terminal, - find_isolated_vertices, get_tso, build_results_dict, - convert_erik_game_state_to_my_game_state) -from tangled_adjudicate.utils.find_graph_automorphisms import get_automorphisms -from tangled_adjudicate.utils.find_hardware_embeddings import get_embeddings -from tangled_adjudicate.utils.parameters import Params -from tangled_adjudicate.schrodinger.schrodinger_functions import evolve_schrodinger - -from dwave.system import DWaveSampler, FixedEmbeddingComposite -from dwave.system.testing import MockDWaveSampler - - -class Adjudicator(object): - def __init__(self, params): - self.params = params - self.results_dict = None - if self.params.USE_QC: # if using QC, get embeddings and automorphisms - self.automorphisms = get_automorphisms(self.params.GRAPH_NUMBER) - self.embeddings = get_embeddings(self.params.GRAPH_NUMBER, self.params.QC_SOLVER_TO_USE) - - def compute_winner_score_and_influence_from_correlation_matrix(self, game_state, correlation_matrix): - # correlation_matrix is assumed to be symmetric matrix with zeros on diagonal (so that self-correlation of - # one is not counted) -- this is the standard for computing influence vector - # - # returns: - # winner: if game_state is terminal, string -- one of 'red' (player 1), 'blue' (player 2), 'draw' - # if game_state not terminal, returns None - # score: if game_state is terminal, returns a real number which is the score of the game (difference - # between two players' influences obtained from the influence vector) - # if game_state not terminal, returns None - # influence_vector: a vector of real numbers of length == number of vertices; this stores each vertex's - # influence, which is the sum over all elements of the correlation matrix it is part of - - influence_vector = np.sum(correlation_matrix, axis=0) - - if game_state_is_terminal(game_state): - score = influence_vector[game_state['player1_node']] - influence_vector[game_state['player2_node']] - - if score > self.params.EPSILON: # more positive than epsilon, red wins - winner = 'red' - else: - if score < -self.params.EPSILON: - winner = 'blue' - else: - winner = 'draw' - else: - score = None - winner = None - - return winner, score, influence_vector - - # all four solver functions input game_state, e.g.: - # - # game_state = {'num_nodes': 6, 'edges': [(0, 1, 1), (0, 2, 1), (0, 3, 2), (0, 4, 3), (0, 5, 2), (1, 2, 1), - # (1, 3, 2), (1, 4, 3), (1, 5, 3), (2, 3, 1), (2, 4, 2), (2, 5, 3), (3, 4, 2), (3, 5, 1), (4, 5, 2)], - # 'player1_id': 'player1', 'player2_id': 'player2', 'turn_count': 17, 'current_player_index': 1, - # 'player1_node': 1, 'player2_node': 3} - # - # and return a dictionary that contains the following keys: - # - # 'game_state': a copy of the input game_state dictionary - # 'adjudicator': a string, one of 'simulated_annealing', 'quantum_annealing', 'schrodinger_equation' - # 'winner': if both players have chosen vertices, a string, one of 'red', 'blue', 'draw', otherwise None - # 'score': if both players have chosen vertices, the difference in influence scores as a real number, otherwise None - # 'influence_vector': a vector of real numbers of length vertex_count (one real number per vertex in the game graph) - # 'correlation_matrix': symmetric real-valued matrix of spin-spin correlations with zeros on diagonals - # 'parameters': a copy of the parameters dictionary - - def simulated_annealing(self, game_state): - - h, jay = game_state_to_ising_model(game_state) - sampler = neal.SimulatedAnnealingSampler() - - # Approx match: (1) mean energy and (2) rate of local excitations for square-lattice high precision spin glass - # at 5ns (Advantage2 prototype 2.5) - - # Limits relaxation to local minima. Can vary by model/protocol/QPU. Assumes max(|J|) is scaled to 1. - beta_max = 3 - # Limits equilibration. Can vary by model/protocol/QPU - num_sweeps = 16 - beta_range = [1 / np.sqrt(np.sum([Jij ** 2 for Jij in jay.values()]) + 0.001), beta_max] # 0.001 for J==0 - seed = None # Choose seed=None if reproducibility is not desired - - # randomize_order=True implements standard symmetry-respecting Metropolis algorithm - ss = sampler.sample_ising(h, jay, beta_range=beta_range, num_reads=self.params.NUM_READS_SA, - num_sweeps=num_sweeps, randomize_order=True, seed=seed) - - samps = np.array(ss.record.sample, dtype=float) # casting may not be necessary. - - # creates symmetric matrix with zeros on diagonal (so that self-correlation of one is not counted) -- this is - # the standard for computing influence vector - correlation_matrix = (np.einsum('si,sj->ij', samps, samps) / self.params.NUM_READS_SA - - np.eye(int(game_state['num_nodes']))) - - winner, score_difference, influence_vector = ( - self.compute_winner_score_and_influence_from_correlation_matrix(game_state, correlation_matrix)) - - return_dictionary = {'game_state': game_state, 'adjudicator': 'simulated_annealing', - 'winner': winner, 'score': score_difference, 'influence_vector': influence_vector, - 'correlation_matrix': correlation_matrix, 'parameters': self.params} - - return return_dictionary - - def schrodinger_equation(self, game_state): - - h, jay = game_state_to_ising_model(game_state) - - s_min = 0.001 # beginning and ending anneal times - s_max = 0.999 - - correlation_matrix = ( - evolve_schrodinger(h, jay, s_min=s_min, s_max=s_max, tf=self.params.ANNEAL_TIME_IN_NS, - n_qubits=game_state['num_nodes'])) - # what's returned here is upper triangular with zeros on the diagonal, so we need to add the transpose - correlation_matrix = correlation_matrix + correlation_matrix.T - - winner, score_difference, influence_vector = ( - self.compute_winner_score_and_influence_from_correlation_matrix(game_state, correlation_matrix)) - - return_dictionary = {'game_state': game_state, 'adjudicator': 'schrodinger_equation', - 'winner': winner, 'score': score_difference, 'influence_vector': influence_vector, - 'correlation_matrix': correlation_matrix, 'parameters': self.params} - - return return_dictionary - - def quantum_annealing(self, game_state): - - number_of_embeddings = len(self.embeddings) # e.g. P=343 - number_of_problem_variables = game_state['num_nodes'] # e.g. 3 - - samples = np.zeros((1, number_of_problem_variables)) # 0th layer to get vstack going, remove at the end - shim_stats = None - all_samples = None - indices_of_flips = None - - if self.params.USE_MOCK_DWAVE_SAMPLER and self.params.USE_SHIM: - print('D-Wave mock sampler is not set up to use the shimming process, turn shim off if using mock!') - - sampler_kwargs = dict(num_reads=self.params.NUM_READS_QC, - answer_mode='raw') - - if self.params.USE_MOCK_DWAVE_SAMPLER: - base_sampler = MockDWaveSampler(topology_type='zephyr', topology_shape=[6, 4]) - else: - base_sampler = DWaveSampler(solver=self.params.QC_SOLVER_TO_USE) - sampler_kwargs.update({'fast_anneal': True, - 'annealing_time': self.params.ANNEAL_TIME_IN_NS / 1000}) - - if self.params.USE_SHIM: - shim_stats = {'qubit_magnetizations': [], - 'average_absolute_value_of_magnetization': [], - 'all_flux_bias_offsets': []} - sampler_kwargs.update({'readout_thermalization': 100., - 'auto_scale': False, - 'flux_drift_compensation': True, - 'flux_biases': [0] * base_sampler.properties['num_qubits']}) - shim_iterations = self.params.SHIM_ITERATIONS - else: - shim_iterations = 1 # if we don't shim, just run through shim step only once - - # ********************************************************** - # Step 0: convert game_state to the desired base Ising model - # ********************************************************** - - # for tangled, h_j=0 for all vertices j in the game graph, and J_ij is one of +1, -1, or 0 for all vertex - # pairs i,j. I named the "base" values (the actual problem defined on the game graph we are asked to solve) - # base_h (all zero) and base_jay (not all zero). - - base_h, base_jay = game_state_to_ising_model(game_state) - - # this finds any isolated vertices that may be in the graph -- we will replace the samples returned for these - # at the end with true 50/50 statistics, so we don't have to worry about them - - isolated_vertices = find_isolated_vertices(number_of_problem_variables, base_jay) - - # We now enter a loop where each pass through the loop programs the chip to specific values of h and J but - # now for the entire chip. We do this by first selecting one automorphism and embedding it in multiple - # parallel ways across the entire chip, and then optionally applying a gauge transform across all the qubits - # used. This latter process chooses different random gauges for each of the embedded instances. - - for chip_run_idx in range(self.params.NUMBER_OF_CHIP_RUNS): - - # ******************************************************************* - # Step 1: Randomly select an automorphism and embed it multiple times - # ******************************************************************* - - automorphism_to_use = random.choice(self.automorphisms) # eg {0:0, 1:2, 2:1} - inverted_automorphism_to_use = {v: k for k, v in automorphism_to_use.items()} # swaps key <-> values - - permuted_embedding = [] - - for each_embedding in self.embeddings[:number_of_embeddings]: # each_embedding is like [1093, 1098, 136]; 343 of these for three-vertex graph - this_embedding = [] - for each_vertex in range(number_of_problem_variables): # each_vertex ranges from 0 to 2 - this_embedding.append(each_embedding[inverted_automorphism_to_use[each_vertex]]) - permuted_embedding.append(this_embedding) - - # given that permuted_embedding looks like [[1229, 1235, 563], [872, 242, 866], ...] - # this next part converts into the format {0: [1229], 1: [1235], 2: [563], 3: [872], 4: [242], 5: [866]} - - embedding_to_use = {} - - for embedding_idx in range(number_of_embeddings): - for each_vertex in range(number_of_problem_variables): # up to 0..1037 - embedding_to_use[number_of_problem_variables * embedding_idx + each_vertex] = \ - [permuted_embedding[embedding_idx][each_vertex]] - - # ***************************************************************************************************** - # Step 2: Set h, J parameters for full chip using parallel embeddings of a randomly chosen automorphism - # ***************************************************************************************************** - - # compute full_h and full_j which are h, jay values for the entire chip assuming the above automorphism - # I am calling the problem definition and variable ordering before the automorphism the BLACK or BASE - # situation. After the automorphism the problem definition and variable labels change -- I'm calling the - # situation after the automorphism has been applied the BLUE situation. - - full_h = {} - full_j = {} - - for embedding_idx in range(number_of_embeddings): - for each_vertex in range(number_of_problem_variables): - full_h[number_of_problem_variables * embedding_idx + each_vertex] = 0 - - for k, v in base_jay.items(): - edge_under_automorph = (min(automorphism_to_use[k[0]], automorphism_to_use[k[1]]), - max(automorphism_to_use[k[0]], automorphism_to_use[k[1]])) - full_j[edge_under_automorph] = v - for j in range(1, number_of_embeddings): - full_j[(edge_under_automorph[0] + number_of_problem_variables * j, - edge_under_automorph[1] + number_of_problem_variables * j)] = v - - # ************************************************************************** - # Step 3: Choose random gauge, modify h, J parameters for full chip using it - # ************************************************************************** - - # next we optionally apply a random gauge transformation. I call the situation after the gauge - # transformation has been applied the BLUE with RED STAR situation. - - if self.params.USE_GAUGE_TRANSFORM: - flip_map = [random.choice([-1, 1]) for _ in full_h] # random list of +1, -1 values of len # qubits - indices_of_flips = [i for i, x in enumerate(flip_map) if x == -1] # the indices of the -1 values - - for edge_key, j_val in full_j.items(): # for each edge and associated J value - full_j[edge_key] = j_val * flip_map[edge_key[0]] * flip_map[edge_key[1]] # Jij -> J_ij g_i g_j - - # ***************************************** - # Step 4: Choose sampler and its parameters - # ***************************************** - - sampler_kwargs.update({'h': full_h, - 'J': full_j}) - - sampler = FixedEmbeddingComposite(base_sampler, embedding=embedding_to_use) # applies the embedding - - # ************************************************************************* - # Step 5: Optionally start shimming process in the BLUE with RED STAR basis - # ************************************************************************* - - # all of this in the BLUE with RED STAR basis, ie post automorph, post gauge transform - for shim_iteration_idx in range(shim_iterations): - - # ************************************** - # Step 6: Generate samples from hardware - # ************************************** - - ss = sampler.sample_ising(**sampler_kwargs) - all_samples = ss.record.sample - - if self.params.USE_SHIM: - - # ************************************************************* - # Step 6a: Compute average values of each qubit == magnetization - # ************************************************************* - - magnetization = np.sum(all_samples, axis=0)/self.params.NUM_READS_QC # BLUE with RED STAR label ordering - shim_stats['average_absolute_value_of_magnetization'].append(np.sum([abs(k) for k in magnetization])/len(magnetization)) - - qubit_magnetization = [0] * base_sampler.properties['num_qubits'] - for k, v in embedding_to_use.items(): - qubit_magnetization[v[0]] = magnetization[k] # check - - shim_stats['qubit_magnetizations'].append(qubit_magnetization) - - # ************************************** - # Step 6b: Adjust flux bias offset terms - # ************************************** - - for k in range(base_sampler.properties['num_qubits']): - sampler_kwargs['flux_biases'][k] -= self.params.ALPHA_PHI * qubit_magnetization[k] - - shim_stats['all_flux_bias_offsets'].append(sampler_kwargs['flux_biases']) - - # ***************************************************************************************************** - # Step 7: Reverse gauge transform, from BLUE with RED STAR to just BLUE, after shimming process is done - # ***************************************************************************************************** - - if self.params.USE_GAUGE_TRANSFORM: - all_samples[:, indices_of_flips] = -all_samples[:, indices_of_flips] - - # *********************************** - # Step 8: Stack samples in BLUE order - # *********************************** - - # this should make a big fat stack of the results in BLUE variable ordering - all_samples_processed_blue = all_samples[:, range(number_of_problem_variables)] - for k in range(1, number_of_embeddings): - all_samples_processed_blue = np.vstack((all_samples_processed_blue, - all_samples[:, range(number_of_problem_variables * k, - number_of_problem_variables * (k + 1))])) - - # ********************************************************************** - # Step 9: Reorder columns to make them BLACK order instead of BLUE order - # ********************************************************************** - - all_samples_processed_black = all_samples_processed_blue[:, [automorphism_to_use[i] for i in range(all_samples_processed_blue.shape[1])]] - - # ********************************************************* - # Step 10: Add new samples to the stack, all in BLACK order - # ********************************************************* - - samples = np.vstack((samples, all_samples_processed_black)) - - # *************************************************************** - # Step 11: Post process samples stack to extract return variables - # *************************************************************** - - samples = np.delete(samples, (0), axis=0) # delete first row of zeros - - # replace columns where there are disconnected variables with truly random samples - for idx in isolated_vertices: - samples[:, idx] = np.random.choice([1, -1], size=samples.shape[0]) - - sample_count = self.params.NUM_READS_QC * number_of_embeddings * self.params.NUMBER_OF_CHIP_RUNS - - # this is a full matrix with zeros on the diagonal that uses all the samples - correlation_matrix = \ - (np.einsum('si,sj->ij', samples, samples) / sample_count - - np.eye(int(game_state['num_nodes']))) - - winner, score_difference, influence_vector = ( - self.compute_winner_score_and_influence_from_correlation_matrix(game_state, correlation_matrix)) - - return_dictionary = {'game_state': game_state, 'adjudicator': 'quantum_annealing', - 'winner': winner, 'score': score_difference, 'influence_vector': influence_vector, - 'correlation_matrix': correlation_matrix, 'parameters': self.params} - - return return_dictionary - - def lookup_table(self, game_state): - - if self.results_dict is None: - # If using graphs 2 or 3, you can use precomputed terminal state adjudications (faster for testing) - # str(game_state['num_nodes'] - 1) is a hack -- num_nodes=3 is graph 2 and num_nodes=4 is graph 3 - # as long as both are complete graphs - graph_number = game_state['num_nodes'] - 1 - - if graph_number not in [2, 3]: - sys.exit(print('lookup table only enabled for complete graphs on 3 and 4 vertices.')) - - script_dir = os.path.dirname(os.path.abspath(__file__)) # Get the directory of the current script - - file_path = os.path.join(script_dir, '..', 'data', - 'graph_' + str(graph_number) + '_terminal_state_outcomes.pkl') - if not os.path.exists(file_path): - get_tso(graph_number, file_path) - with open(file_path, 'rb') as fp: - results = pickle.load(fp) - self.results_dict = build_results_dict(results) - - my_state = convert_erik_game_state_to_my_game_state(game_state) - winner = self.results_dict[str(my_state)] - - return_dictionary = {'game_state': game_state, 'adjudicator': 'lookup_table', - 'winner': winner, 'score': None, 'influence_vector': None, - 'correlation_matrix': None, 'parameters': self.params} - - return return_dictionary \ No newline at end of file diff --git a/tangled_adjudicate/adjudicators/adjudicator.py b/tangled_adjudicate/adjudicators/adjudicator.py new file mode 100644 index 0000000..0063b8f --- /dev/null +++ b/tangled_adjudicate/adjudicators/adjudicator.py @@ -0,0 +1,141 @@ +from abc import ABC, abstractmethod +from typing import Any, TypedDict, List, Tuple, Optional, Dict, Union, Set +import numpy as np +import numpy.typing as npt + + +class GameState(TypedDict): + num_nodes: int + edges: List[Tuple[int, int, int]] # (node1, node2, edge_label=0,1,2,3) + player1_id: str + player2_id: str + turn_count: int + current_player_index: int + player1_node: Optional[int] + player2_node: Optional[int] + + +class AdjudicationResult(TypedDict): + game_state: GameState + adjudicator: str + winner: Optional[str] # 'red', 'blue', 'draw', or None + score: Optional[float] + influence_vector: Optional[npt.NDArray[np.float64]] + correlation_matrix: Optional[npt.NDArray[np.float64]] + parameters: Dict[str, Union[str, int, float, bool]] + + +class IsingModel(TypedDict): + h: Dict[int, float] # Local fields + j: Dict[Tuple[int, int], float] # Coupling strengths + + +class Adjudicator(ABC): + """Base interface for game state adjudication implementations.""" + + def __init__(self) -> None: + """Initialize base adjudicator.""" + self._parameters: Dict[str, Any] = {} + self.j_map = {0: 0.0, # edge (i, j) uncolored , J_ij=0 + 1: 0.0, # edge (i, j) colored gray, J_ij=0 + 2: -1.0, # edge (i, j) colored green, FM coupling, J_ij=-1.0 + 3: 1.0} # edge (i, j) colored purple, AFM coupling, J_ij=+1.0 + + @abstractmethod + def setup(self, **kwargs) -> None: + """Optional setup method for implementation-specific initialization.""" + pass + + @abstractmethod + def adjudicate(self, game_state: GameState) -> AdjudicationResult: + """Adjudicate the given game state.""" + pass + + def _validate_game_state(self, game_state: GameState) -> None: + """Validate the game state structure and contents.""" + required_keys = { + 'num_nodes', 'edges', 'player1_id', 'player2_id', + 'turn_count', 'current_player_index', 'player1_node', 'player2_node' + } + + if not all(key in game_state for key in required_keys): + missing_keys = required_keys - set(game_state.keys()) + raise ValueError(f"Game state missing required keys: {missing_keys}") + + if game_state['num_nodes'] < 1: + raise ValueError("Number of nodes must be positive") + + for edge in game_state['edges']: + if len(edge) != 3: + raise ValueError(f"Invalid edge format: {edge}") + if not (0 <= edge[0] < game_state['num_nodes'] and 0 <= edge[1] < game_state['num_nodes']): + raise ValueError(f"Edge vertices out of range: {edge}") + + def _game_state_to_ising(self, game_state: GameState) -> IsingModel: + """Convert game state to Ising model parameters. + + Args: + game_state: The current game state + + Returns: + IsingModel containing h (local fields) and j (coupling strengths) + """ + h = {i: 0.0 for i in range(game_state['num_nodes'])} + j = {} + + for edge in game_state['edges']: + v1, v2, edge_label = edge + if v1 > v2: + v1, v2 = v2, v1 + j[(v1, v2)] = float(self.j_map[edge_label]) + + return IsingModel(h=h, j=j) + + def _find_isolated_vertices(self, game_state: GameState) -> Set[int]: + """Find vertices with no connections in the graph. + + Args: + game_state: The current game state + + Returns: + Set of isolated vertex indices + """ + connected_vertices = set() + for edge in game_state['edges']: + connected_vertices.add(edge[0]) + connected_vertices.add(edge[1]) + + all_vertices = set(range(game_state['num_nodes'])) + return all_vertices - connected_vertices + + def _compute_winner_score_and_influence( + self, + game_state: GameState, + correlation_matrix: npt.NDArray[np.float64], + epsilon: float = 0.5 + ) -> Tuple[Optional[str], Optional[float], npt.NDArray[np.float64]]: + """Compute winner, score and influence from correlation matrix.""" + if not isinstance(correlation_matrix, np.ndarray): + raise ValueError("Correlation matrix must be a numpy array") + + if correlation_matrix.shape[0] != correlation_matrix.shape[1]: + raise ValueError("Correlation matrix must be square") + + if correlation_matrix.shape[0] != game_state['num_nodes']: + raise ValueError("Correlation matrix size must match number of nodes") + + influence_vector = np.sum(correlation_matrix, axis=0) + + if game_state['player1_node'] is None or game_state['player2_node'] is None: + return None, None, influence_vector + + score = influence_vector[game_state['player1_node']] - influence_vector[game_state['player2_node']] + + if score > epsilon: + winner = 'red' + elif score < -epsilon: + winner = 'blue' + else: + winner = 'draw' + + return winner, score, influence_vector diff --git a/tangled_adjudicate/adjudicators/lookup_table.py b/tangled_adjudicate/adjudicators/lookup_table.py new file mode 100644 index 0000000..de2a49b --- /dev/null +++ b/tangled_adjudicate/adjudicators/lookup_table.py @@ -0,0 +1,117 @@ +import os +import pickle +from typing import Dict, Optional +import numpy as np + +from ..utils.utilities import ( + convert_erik_game_state_to_my_game_state, + get_tso, + build_results_dict +) +from .adjudicator import Adjudicator, GameState, AdjudicationResult + + +class LookupTableAdjudicator(Adjudicator): + """Adjudicator implementation using pre-computed lookup tables.""" + + def __init__(self) -> None: + """Initialize the lookup table adjudicator.""" + super().__init__() + self.data_dir: Optional[str] = None + self.results_dict: Optional[Dict[str, str]] = None + + def setup(self, **kwargs) -> None: + """Configure lookup table parameters. + + Args: + data_dir: Directory containing lookup table data files + + Raises: + ValueError: If parameters are invalid or data directory doesn't exist + """ + if 'data_dir' in kwargs: + if not isinstance(kwargs['data_dir'], str): + raise ValueError("data_dir must be a string") + if not os.path.isdir(kwargs['data_dir']): + raise ValueError(f"Directory not found: {kwargs['data_dir']}") + self.data_dir = kwargs['data_dir'] + + self._parameters = {'data_dir': self.data_dir} + + def _load_lookup_table(self, num_nodes: int) -> None: + """Load the appropriate lookup table for the given graph size. + + Args: + num_nodes: Number of nodes in the graph + + Raises: + ValueError: If lookup table is not available for this graph size + RuntimeError: If lookup table file cannot be loaded + """ + if num_nodes not in [3, 4]: + raise ValueError( + "Lookup table only available for complete graphs with 3 or 4 vertices" + ) + + if not self.data_dir: + raise RuntimeError("Data directory not set. Call setup() first.") + + graph_number = num_nodes - 1 # Convert from num_nodes to graph_number + file_path = os.path.join( + self.data_dir, + f'graph_{graph_number}_terminal_state_outcomes.pkl' + ) + + # Generate lookup table if it doesn't exist + if not os.path.exists(file_path): + get_tso(graph_number, file_path) + + try: + with open(file_path, 'rb') as fp: + results = pickle.load(fp) + self.results_dict = build_results_dict(results) + except Exception as e: + raise RuntimeError(f"Failed to load lookup table: {str(e)}") + + def adjudicate(self, game_state: GameState) -> AdjudicationResult: + """Adjudicate the game state using the lookup table. + + Args: + game_state: The current game state + + Returns: + AdjudicationResult containing the adjudication details + + Raises: + ValueError: If the game state is invalid or unsupported + RuntimeError: If lookup table is not loaded + """ + self._validate_game_state(game_state) + + # Load lookup table if needed + if (self.results_dict is None or len(next(iter(self.results_dict.keys()))) != game_state['num_nodes']): + self._load_lookup_table(game_state['num_nodes']) + + if not self.results_dict: + raise RuntimeError("Failed to load lookup table") + + # Convert game state to lookup format + lookup_state = convert_erik_game_state_to_my_game_state(game_state) + + try: + winner = self.results_dict[str(lookup_state)] + except KeyError: + raise ValueError( + f"Game state not found in lookup table: {lookup_state}" + ) + + return AdjudicationResult( + game_state=game_state, + adjudicator='lookup_table', + winner=winner, + score=None, # Lookup table doesn't provide scores + influence_vector=None, + correlation_matrix=None, + parameters=self._parameters + ) + \ No newline at end of file diff --git a/tangled_adjudicate/adjudicators/quantum_annealing.py b/tangled_adjudicate/adjudicators/quantum_annealing.py new file mode 100644 index 0000000..552d9b6 --- /dev/null +++ b/tangled_adjudicate/adjudicators/quantum_annealing.py @@ -0,0 +1,376 @@ +import os +from typing import Dict, Any, List, Optional +import numpy as np +from dataclasses import dataclass +from dwave.system import DWaveSampler, FixedEmbeddingComposite +from dwave.system.testing import MockDWaveSampler + +from ..utils.find_graph_automorphisms import get_automorphisms +from ..utils.find_hardware_embeddings import get_embeddings +from .adjudicator import Adjudicator, GameState, AdjudicationResult + + +@dataclass +class QAParameters: + """Parameters for quantum annealing.""" + num_reads: int = 1000 + anneal_time: float = 5.0 # ns + num_chip_runs: int = 1 + use_gauge_transform: bool = False + use_shim: bool = False + shim_iterations: int = 1 + alpha_phi: float = 0.1 + use_mock: bool = True + solver_name: str = 'Advantage2_prototype2.6' + graph_number: Optional[int] = None + data_dir: Optional[str] = None + + +class QuantumAnnealingAdjudicator(Adjudicator): + """Adjudicator implementation using D-Wave quantum annealing.""" + + def __init__(self) -> None: + """Initialize the quantum annealing adjudicator.""" + super().__init__() + self.params = QAParameters() + self.embeddings: List[List[int]] = [] + self.automorphisms: List[Dict[int, int]] = [] + self.shim_stats: Dict[str] = {} + + def setup(self, **kwargs) -> None: + """Configure quantum annealing parameters and initialize D-Wave connection. + + Args: + num_reads: Number of annealing reads per run + anneal_time: Annealing time in nanoseconds + num_chip_runs: Number of separate chip programming runs + use_gauge_transform: Whether to apply gauge transformations + use_shim: Whether to use shimming process + shim_iterations: Number of shimming iterations if shimming is used + alpha_phi: Learning rate for flux bias offsets + use_mock: Whether to use mock D-Wave sampler (for testing) + solver_name: Name of D-Wave solver to use + graph_number: Graph number for embedding lookup + + Raises: + ValueError: If parameters are invalid + RuntimeError: If D-Wave connection fails + """ + # Update parameters from kwargs + for key, value in kwargs.items(): + if hasattr(self.params, key): + setattr(self.params, key, value) + else: + raise ValueError(f"Unknown parameter: {key}") + + # Validate parameters + if self.params.num_reads <= 0: + raise ValueError("num_reads must be positive") + if self.params.anneal_time <= 0: + raise ValueError("anneal_time must be positive") + if self.params.num_chip_runs <= 0: + raise ValueError("num_chip_runs must be positive") + if self.params.shim_iterations <= 0: + raise ValueError("shim_iterations must be positive") + if self.params.alpha_phi <= 0 or self.params.alpha_phi > 1: + raise ValueError("alpha_phi must be in (0, 1]") + + # load directory for automorphisms & embeddings + if 'data_dir' in kwargs: + if not isinstance(kwargs['data_dir'], str): + raise ValueError("data_dir must be a string") + if not os.path.isdir(kwargs['data_dir']): + raise ValueError(f"Directory not found: {kwargs['data_dir']}") + self.params.data_dir = kwargs['data_dir'] + + # we need these so always compute / load in + self.automorphisms = get_automorphisms(self.params.graph_number, self.params.data_dir) + self.embeddings = get_embeddings(self.params.graph_number, self.params.solver_name, self.params.data_dir) + + # Initialize sampler + try: + if self.params.use_mock: + base_sampler = MockDWaveSampler(topology_type='zephyr', topology_shape=[6, 4]) + else: + base_sampler = DWaveSampler(solver=self.params.solver_name) + + # Store for later use in adjudicate + self._base_sampler = base_sampler + + except Exception as e: + raise RuntimeError(f"Failed to initialize D-Wave sampler: {str(e)}") + + # initialize shim_stats if required + if self.params.use_shim: + self.shim_stats = {'qubit_magnetizations': [], + 'average_absolute_value_of_magnetization': [], + 'all_flux_bias_offsets': []} + + # Store parameters + self._parameters = self.params.__dict__ + + def _process_embedding( + self, + game_state: GameState, + automorphism: Dict[int, int], + num_embeddings: int + ) -> Dict[int, List[int]]: + """Process embedding with given automorphism. + + Args: + game_state: Current game state + automorphism: Graph automorphism to apply + num_embeddings: number of embeddings to use; default is all of them + + Returns: + Processed embedding mapping + """ + + num_vertices = game_state['num_nodes'] + + inverted_automorphism_to_use = {v: k for k, v in automorphism.items()} # swaps key <-> values + + permuted_embedding = [] + + for each_embedding in self.embeddings[:num_embeddings]: # each_embedding is like [1093, 1098, 136]; 343 of these for three-vertex graph + this_embedding = [] + for each_vertex in range(num_vertices): # each_vertex ranges from 0 to 2 + this_embedding.append(each_embedding[inverted_automorphism_to_use[each_vertex]]) + permuted_embedding.append(this_embedding) + + # given that permuted_embedding looks like [[1229, 1235, 563], [872, 242, 866], ...] + # this next part converts into the format {0: [1229], 1: [1235], 2: [563], 3: [872], 4: [242], 5: [866]} + + embedding_map = {} + + for embedding_idx in range(num_embeddings): + for each_vertex in range(num_vertices): # up to 0..1037 + embedding_map[num_vertices * embedding_idx + each_vertex] = \ + [permuted_embedding[embedding_idx][each_vertex]] + + return embedding_map + + def adjudicate(self, game_state: GameState) -> AdjudicationResult: + """Adjudicate the game state using quantum annealing. + + Args: + game_state: The current game state + + Returns: + AdjudicationResult containing the adjudication details + + Raises: + ValueError: If the game state is invalid + RuntimeError: If quantum annealing fails + """ + + if not self._base_sampler: + raise RuntimeError("Sampler not initialized. Call setup() first.") + + self._validate_game_state(game_state) + + num_vertices = game_state['num_nodes'] + num_embeddings = len(self.embeddings) + total_samples = np.zeros((1, num_vertices)) # Initial array for stacking + + all_samples = None + indices_of_flips = None + + # set up sampler kwargs + if self.params.use_mock and self.params.use_shim: + print('D-Wave mock sampler is not set up to use the shimming process, turn shim off if using mock!') + + sampler_kwargs = { + 'num_reads': self.params.num_reads, + 'answer_mode': 'raw' + } + + if not self.params.use_mock: + sampler_kwargs.update({ + 'fast_anneal': True, + 'annealing_time': self.params.anneal_time / 1000, + 'auto_scale': False + }) + + if self.params.use_shim: + sampler_kwargs.update({'readout_thermalization': 100., + 'auto_scale': False, + 'flux_drift_compensation': True, + 'flux_biases': [0] * base_sampler.properties['num_qubits']}) + shim_iterations = self.params.shim_iterations + else: + shim_iterations = 1 # if we don't shim, just run through shim step only once + + # ********************************************************** + # Step 0: convert game_state to the desired base Ising model + # ********************************************************** + + # for tangled, h_j=0 for all vertices j in the game graph, and J_ij is one of +1, -1, or 0 for all vertex + # pairs i,j. I named the "base" values (the actual problem defined on the game graph we are asked to solve) + # base_ising_model. + + base_ising_model = self._game_state_to_ising(game_state) + + # this finds any isolated vertices that may be in the graph -- we will replace the samples returned for these + # at the end with true 50/50 statistics, so we don't have to worry about them + + isolated_vertices = self._find_isolated_vertices(game_state) + + # We now enter a loop where each pass through the loop programs the chip to specific values of h and J but + # now for the entire chip. We do this by first selecting one automorphism and embedding it in multiple + # parallel ways across the entire chip, and then optionally applying a gauge transform across all the qubits + # used. This latter process chooses different random gauges for each of the embedded instances. + + for _ in range(self.params.num_chip_runs): + + # ******************************************************************* + # Step 1: Randomly select an automorphism and embed it multiple times + # ******************************************************************* + + automorphism = np.random.choice(self.automorphisms) + embedding_map = self._process_embedding(game_state, automorphism, num_embeddings) + + # ***************************************************************************************************** + # Step 2: Set h, J parameters for full chip using parallel embeddings of a randomly chosen automorphism + # ***************************************************************************************************** + + # compute full_h and full_j which are h, jay values for the entire chip assuming the above automorphism + # I am calling the problem definition and variable ordering before the automorphism the BLACK or BASE + # situation. After the automorphism the problem definition and variable labels change -- I'm calling the + # situation after the automorphism has been applied the BLUE situation. + + full_h = {} + full_j = {} + + for embedding_idx in range(num_embeddings): + for each_vertex in range(num_vertices): + full_h[num_vertices * embedding_idx + each_vertex] = 0 + + for k, v in base_ising_model['j'].items(): + edge_under_automorph = (min(automorphism[k[0]], automorphism[k[1]]), + max(automorphism[k[0]], automorphism[k[1]])) + full_j[edge_under_automorph] = v + for j in range(1, num_embeddings): + full_j[(edge_under_automorph[0] + num_vertices * j, + edge_under_automorph[1] + num_vertices * j)] = v + + # ************************************************************************** + # Step 3: Choose random gauge, modify h, J parameters for full chip using it + # ************************************************************************** + + # next we optionally apply a random gauge transformation. I call the situation after the gauge + # transformation has been applied the BLUE with RED STAR situation. + + if self.params.use_gauge_transform: + flip_map = [np.random.choice([-1, 1]) for _ in full_h] # random list of +1, -1 values of len # qubits + indices_of_flips = [i for i, x in enumerate(flip_map) if x == -1] # the indices of the -1 values + + for edge_key, j_val in full_j.items(): # for each edge and associated J value + full_j[edge_key] = j_val * flip_map[edge_key[0]] * flip_map[edge_key[1]] # Jij -> J_ij g_i g_j + + # ***************************************** + # Step 4: Choose sampler and its parameters + # ***************************************** + + sampler_kwargs.update({'h': full_h, + 'J': full_j}) + + sampler = FixedEmbeddingComposite(self._base_sampler, embedding=embedding_map) # applies the embedding + + # ************************************************************************* + # Step 5: Optionally start shimming process in the BLUE with RED STAR basis + # ************************************************************************* + + # all of this in the BLUE with RED STAR basis, ie post automorph, post gauge transform + for shim_iteration_idx in range(shim_iterations): + + # ************************************** + # Step 6: Generate samples from hardware + # ************************************** + + ss = sampler.sample_ising(**sampler_kwargs) + all_samples = ss.record.sample + + if self.params.use_shim: + + # ************************************************************* + # Step 6a: Compute average values of each qubit == magnetization + # ************************************************************* + + magnetization = np.sum(all_samples, axis=0)/self.params.num_reads # BLUE with RED STAR label ordering + self.shim_stats['average_absolute_value_of_magnetization'].append(np.sum([abs(k) for k in magnetization])/len(magnetization)) + + qubit_magnetization = [0] * self._base_sampler.properties['num_qubits'] + for k, v in embedding_map.items(): + qubit_magnetization[v[0]] = magnetization[k] # check + + self.shim_stats['qubit_magnetizations'].append(qubit_magnetization) + + # ************************************** + # Step 6b: Adjust flux bias offset terms + # ************************************** + + for k in range(self._base_sampler.properties['num_qubits']): + sampler_kwargs['flux_biases'][k] -= self.params.alpha_phi * qubit_magnetization[k] + + self.shim_stats['all_flux_bias_offsets'].append(sampler_kwargs['flux_biases']) + + # ***************************************************************************************************** + # Step 7: Reverse gauge transform, from BLUE with RED STAR to just BLUE, after shimming process is done + # ***************************************************************************************************** + + if self.params.use_gauge_transform: + all_samples[:, indices_of_flips] = -all_samples[:, indices_of_flips] + + # *********************************** + # Step 8: Stack samples in BLUE order + # *********************************** + + # this should make a big fat stack of the results in BLUE variable ordering + all_samples_processed_blue = all_samples[:, range(num_vertices)] + for k in range(1, num_embeddings): + all_samples_processed_blue = np.vstack((all_samples_processed_blue, + all_samples[:, range(num_vertices * k, + num_vertices * (k + 1))])) + + # ********************************************************************** + # Step 9: Reorder columns to make them BLACK order instead of BLUE order + # ********************************************************************** + + all_samples_processed_black = all_samples_processed_blue[:, [automorphism[i] for i in range(all_samples_processed_blue.shape[1])]] + + # ********************************************************* + # Step 10: Add new samples to the stack, all in BLACK order + # ********************************************************* + + total_samples = np.vstack((total_samples, all_samples_processed_black)) + + # *************************************************************** + # Step 11: Post process samples stack to extract return variables + # *************************************************************** + + total_samples = np.delete(total_samples, (0), axis=0) # delete first row of zeros + + # replace columns where there are disconnected variables with truly random samples + for idx in isolated_vertices: + total_samples[:, idx] = np.random.choice([1, -1], size=total_samples.shape[0]) + + sample_count = self.params.num_reads * num_embeddings * self.params.num_chip_runs + + # this is a full matrix with zeros on the diagonal that uses all the samples + correlation_matrix = \ + (np.einsum('si,sj->ij', total_samples, total_samples) / sample_count - + np.eye(num_vertices)) + + # Compute results + winner, score, influence_vector = self._compute_winner_score_and_influence(game_state, correlation_matrix) + + return AdjudicationResult( + game_state=game_state, + adjudicator='quantum_annealing', + winner=winner, + score=score, + influence_vector=influence_vector, + correlation_matrix=correlation_matrix, + parameters=self._parameters + ) diff --git a/tangled_adjudicate/adjudicators/schrodinger.py b/tangled_adjudicate/adjudicators/schrodinger.py new file mode 100644 index 0000000..7c0d377 --- /dev/null +++ b/tangled_adjudicate/adjudicators/schrodinger.py @@ -0,0 +1,104 @@ +from typing import Dict, Any +import numpy as np +from ..schrodinger.schrodinger_functions import evolve_schrodinger + +from .adjudicator import Adjudicator, GameState, AdjudicationResult + + +class SchrodingerEquationAdjudicator(Adjudicator): + """Adjudicator implementation using Schrödinger equation evolution.""" + + def __init__(self) -> None: + """Initialize the adjudicator with default values.""" + super().__init__() + self.anneal_time: float = 5.0 # ns + self.s_min: float = 0.001 + self.s_max: float = 0.999 + + def setup(self, **kwargs) -> None: + """Configure the Schrödinger equation parameters. + + Args: + anneal_time: Annealing time in nanoseconds (default: 5.0) + s_min: Minimum annealing parameter (default: 0.001) + s_max: Maximum annealing parameter (default: 0.999) + + Raises: + ValueError: If parameters are invalid + """ + if 'anneal_time' in kwargs: + if not isinstance(kwargs['anneal_time'], (int, float)) or kwargs['anneal_time'] <= 0: + raise ValueError("anneal_time must be a positive number") + self.anneal_time = float(kwargs['anneal_time']) + + if 's_min' in kwargs: + if not isinstance(kwargs['s_min'], (int, float)) or not 0 <= kwargs['s_min'] < 1: + raise ValueError("s_min must be in [0, 1)") + self.s_min = float(kwargs['s_min']) + + if 's_max' in kwargs: + if not isinstance(kwargs['s_max'], (int, float)) or not 0 < kwargs['s_max'] <= 1: + raise ValueError("s_max must be in (0, 1]") + self.s_max = float(kwargs['s_max']) + + if self.s_min >= self.s_max: + raise ValueError("s_min must be less than s_max") + + self._parameters = { + 'anneal_time': self.anneal_time, + 's_min': self.s_min, + 's_max': self.s_max + } + + def adjudicate(self, game_state: GameState) -> AdjudicationResult: + """Adjudicate the game state using Schrödinger equation evolution. + + Args: + game_state: The current game state + + Returns: + AdjudicationResult containing the adjudication details + + Raises: + ValueError: If the game state is invalid + """ + self._validate_game_state(game_state) + + # Convert game state to Ising model + ising_model = self._game_state_to_ising(game_state) + + # Evolve Schrödinger equation + correlation_matrix = evolve_schrodinger( + ising_model['h'], + ising_model['j'], + s_min=self.s_min, + s_max=self.s_max, + tf=self.anneal_time, + n_qubits=game_state['num_nodes'] + ) + + # Make symmetric (evolve_schrodinger returns upper triangular) + correlation_matrix = correlation_matrix + correlation_matrix.T + + # Handle isolated vertices + isolated_vertices = self._find_isolated_vertices(game_state) + if isolated_vertices: + for vertex in isolated_vertices: + correlation_matrix[:, vertex] = 0 + correlation_matrix[vertex, :] = 0 + + # Compute results + winner, score, influence_vector = self._compute_winner_score_and_influence( + game_state, correlation_matrix + ) + + return AdjudicationResult( + game_state=game_state, + adjudicator='schrodinger_equation', + winner=winner, + score=score, + influence_vector=influence_vector, + correlation_matrix=correlation_matrix, + parameters=self._parameters + ) + \ No newline at end of file diff --git a/tangled_adjudicate/adjudicators/simulated_annealing.py b/tangled_adjudicate/adjudicators/simulated_annealing.py new file mode 100644 index 0000000..28d5c9c --- /dev/null +++ b/tangled_adjudicate/adjudicators/simulated_annealing.py @@ -0,0 +1,107 @@ +from typing import Dict, Any +import neal +import numpy as np + +from .adjudicator import Adjudicator, GameState, AdjudicationResult + + +class SimulatedAnnealingAdjudicator(Adjudicator): + """Adjudicator implementation using simulated annealing.""" + + def __init__(self) -> None: + """Initialize the adjudicator with default values.""" + super().__init__() + self.sampler = neal.SimulatedAnnealingSampler() + self.num_reads: int = 100000 + self.num_sweeps: int = 16 + self.beta_max: float = 3.0 + + def setup(self, **kwargs) -> None: + """Configure the simulated annealing parameters. + + Args: + num_reads: Number of annealing reads (default: 1000) + num_sweeps: Number of sweeps per read (default: 16) + beta_max: Maximum inverse temperature (default: 3.0) + + Raises: + ValueError: If parameters are invalid + """ + if 'num_reads' in kwargs: + if not isinstance(kwargs['num_reads'], int) or kwargs['num_reads'] <= 0: + raise ValueError("num_reads must be a positive integer") + self.num_reads = kwargs['num_reads'] + + if 'num_sweeps' in kwargs: + if not isinstance(kwargs['num_sweeps'], int) or kwargs['num_sweeps'] <= 0: + raise ValueError("num_sweeps must be a positive integer") + self.num_sweeps = kwargs['num_sweeps'] + + if 'beta_max' in kwargs: + if not isinstance(kwargs['beta_max'], (int, float)) or kwargs['beta_max'] <= 0: + raise ValueError("beta_max must be a positive number") + self.beta_max = float(kwargs['beta_max']) + + self._parameters = { + 'num_reads': self.num_reads, + 'num_sweeps': self.num_sweeps, + 'beta_max': self.beta_max + } + + def adjudicate(self, game_state: GameState) -> AdjudicationResult: + """Adjudicate the game state using simulated annealing. + + Args: + game_state: The current game state + + Returns: + AdjudicationResult containing the adjudication details + + Raises: + ValueError: If the game state is invalid + """ + self._validate_game_state(game_state) + # this is just so that the data structure returned stores correct number, as this could have been changed + self._parameters['num_reads'] = self.num_reads + + # Convert game state to Ising model + ising_model = self._game_state_to_ising(game_state) + # sampler = neal.SimulatedAnnealingSampler() + + # Calculate beta range based on coupling strengths + beta_range = [ + 1 / np.sqrt(np.sum([Jij ** 2 for Jij in ising_model['j'].values()]) + 0.001), + self.beta_max + ] + + # Perform simulated annealing + response = self.sampler.sample_ising( + ising_model['h'], + ising_model['j'], + beta_range=beta_range, + num_reads=self.num_reads, + num_sweeps=self.num_sweeps, + randomize_order=True + ) + + # Calculate correlation matrix + samples = np.array(response.record.sample, dtype=float) + + # creates symmetric matrix with zeros on diagonal (so that self-correlation of one is not counted) -- this is + # the standard for computing influence vector + correlation_matrix = (np.einsum('si,sj->ij', samples, samples) / self.num_reads - + np.eye(game_state['num_nodes'])) + + # Compute results + winner, score, influence_vector = self._compute_winner_score_and_influence(game_state, correlation_matrix) + + return AdjudicationResult( + game_state=game_state, + adjudicator='simulated_annealing', + winner=winner, + score=score, + influence_vector=influence_vector, + correlation_matrix=correlation_matrix, + parameters=self._parameters + ) + \ No newline at end of file diff --git a/tangled_adjudicate/utils/adjudicate_all_terminal_states.py b/tangled_adjudicate/utils/adjudicate_all_terminal_states.py index 7157885..a9c749d 100644 --- a/tangled_adjudicate/utils/adjudicate_all_terminal_states.py +++ b/tangled_adjudicate/utils/adjudicate_all_terminal_states.py @@ -1,17 +1,19 @@ """ generate and adjudicate all Tangled terminal states for tiny graphs """ - import sys import os import time import pickle import numpy as np -from tangled_adjudicate.adjudicators.adjudicate import Adjudicator +from tangled_adjudicate.adjudicators.simulated_annealing import SimulatedAnnealingAdjudicator +from tangled_adjudicate.adjudicators.quantum_annealing import QuantumAnnealingAdjudicator +from tangled_adjudicate.adjudicators.lookup_table import LookupTableAdjudicator +from tangled_adjudicate.adjudicators.schrodinger import SchrodingerEquationAdjudicator + from tangled_adjudicate.utils.generate_terminal_states import generate_all_tangled_terminal_states -from tangled_adjudicate.utils.parameters import Params -def generate_adjudication_results_for_all_terminal_states(solver_to_use): +def generate_adjudication_results_for_all_terminal_states(graph_number, solver_to_use): # uses up to three different adjudicators provided to evaluate all unique terminal states for tiny graphs # (in the default here, graphs 2 and 3). Note this only works for tiny graphs as the number of terminal states # grows like 3 ** edge_count. @@ -20,19 +22,35 @@ def generate_adjudication_results_for_all_terminal_states(solver_to_use): # that hasn't been called yet, it adds that key and its results. If you call it in a case where there are # already results, it will ask you if you want to overwrite them. - if solver_to_use not in ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing']: + if solver_to_use not in ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing', 'lookup_table']: sys.exit(print('the solver' + solver_to_use + 'is not in the allowed list -- please take a look!')) precision_digits = 4 # just to clean up print output np.set_printoptions(suppress=True) # remove scientific notation - params = Params() # your graph_number will be set here, make sure it's what you want! - adjudicator = Adjudicator(params) - game_states = generate_all_tangled_terminal_states(params.GRAPH_NUMBER) + adjudicator = None + + args = {'data_dir': os.path.join(os.getcwd(), '..', 'data'), + 'graph_number': graph_number} + + if solver_to_use == 'simulated_annealing': + adjudicator = SimulatedAnnealingAdjudicator() + else: + if solver_to_use == 'quantum_annealing': + adjudicator = QuantumAnnealingAdjudicator() + else: + if solver_to_use == 'lookup_table': + adjudicator = LookupTableAdjudicator() + else: + if solver_to_use == 'schrodinger_equation': + adjudicator = SchrodingerEquationAdjudicator() + + adjudicator.setup(**args) + + game_states = generate_all_tangled_terminal_states(graph_number) - file_name_prefix = "graph_" + str(params.GRAPH_NUMBER) data_dir = os.path.join(os.getcwd(), '..', 'data') - file_path = os.path.join(data_dir, file_name_prefix + "_terminal_states_adjudication_results.pkl") + file_path = os.path.join(data_dir, "graph_" + str(graph_number) + "_terminal_states_adjudication_results.pkl") if os.path.isfile(file_path): with open(file_path, "rb") as fp: @@ -42,16 +60,18 @@ def generate_adjudication_results_for_all_terminal_states(solver_to_use): # at this point, either we have loaded some adjudication_results from an existing file, or we have a new empty dict if solver_to_use in adjudication_results: # this means we loaded this in already - user_input = input('results already exist, overwrite (y/n)?') + user_input = input('results already exist for ' + solver_to_use + ', overwrite (y/n)?') if user_input.lower() != 'y': - sys.exit(print('exiting!')) + return None # now we proceed to compute and store result print('beginning adjudication using the ' + solver_to_use + ' solver...') start = time.time() adjudication_results[solver_to_use] = {} + for k, v in game_states.items(): - adjudication_results[solver_to_use][k] = getattr(adjudicator, solver_to_use)(v['game_state']) + adjudication_results[solver_to_use][k] = adjudicator.adjudicate(v['game_state']) + print('elapsed time was', round(time.time() - start, precision_digits), 'seconds.') # store it -- this should leave any previously loaded solver results intact @@ -61,10 +81,14 @@ def generate_adjudication_results_for_all_terminal_states(solver_to_use): def main(): - solver_list = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing'] + # note: generating all schrodinger_equation adjudication results for graph 3 or bigger takes forever + # I spot checked new subclass version and all spot checks were good + + graph_number = 2 + solver_list = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing', 'lookup_table'] for solver_to_use in solver_list: - generate_adjudication_results_for_all_terminal_states(solver_to_use) + generate_adjudication_results_for_all_terminal_states(graph_number, solver_to_use) if __name__ == "__main__": diff --git a/tangled_adjudicate/utils/compare_adjudication_results.py b/tangled_adjudicate/utils/compare_adjudication_results.py index 168fe90..9ffe222 100644 --- a/tangled_adjudicate/utils/compare_adjudication_results.py +++ b/tangled_adjudicate/utils/compare_adjudication_results.py @@ -7,10 +7,8 @@ def compare_adjudication_results(graph_number, solvers_to_use): - # solvers_to_use is a list of solvers of length either 2 or 3 comprising 2 or 3 of - # ['schrodinger_equation', 'simulated_annealing', 'quantum_annealing'] - # - # indexing_solvers = {1: 'schrodinger_equation', 2: 'simulated_annealing', 3: 'quantum_annealing'} + # solvers_to_use is a list of solvers of length 2, 3, or 4 comprising 2, 3, or 4 of + # ['schrodinger_equation', 'simulated_annealing', 'quantum_annealing', 'lookup_table'] # load adjudication results obtained from running /utils/adjudicate_all_terminal_states.py data_dir = os.path.join(os.getcwd(), '..', 'data') @@ -42,7 +40,7 @@ def compare_adjudication_results(graph_number, solvers_to_use): for k0, value_dict in adjudication_results.items(): # k will be solver name string if k0 in solvers_to_use: # if we want to add this, add it for k1, v in value_dict.items(): - game_result[k1].append([k0, v['winner'], v['score']]) + game_result[k1].append([k0, v['winner'], v['score']]) # score will be None for lookup_table comparisons = {} for k, v in game_result.items(): # k is game state string @@ -60,7 +58,14 @@ def compare_adjudication_results(graph_number, solvers_to_use): to_plot = [] for k, v in scores.items(): - to_plot.append(v) + if v[0] is not None: + to_plot.append(v) + + if 'lookup_table' in solvers_to_use: + solvers_to_use.remove('lookup_table') + + if len(solvers_to_use) < 2: + print('need at least two of SA, QA, SE to generate score comparisons... lookup_table does not generate scores!') red_text = solvers_to_use[0] + ': red' blue_text = solvers_to_use[1] + ': blue' @@ -93,7 +98,7 @@ def compare_adjudication_results(graph_number, solvers_to_use): if graph_number == 3: if len(to_plot) == 2: - plt.hist(to_plot, range=[-2, 2], bins=400, color=['red', 'blue'], stacked=True) + plt.hist(to_plot, range=[-4, 4], bins=800, color=['red', 'blue'], stacked=True) else: plt.hist(to_plot, range=[-4, 4], bins=800, color=['red', 'blue', 'cyan'], stacked=True) @@ -115,10 +120,11 @@ def compare_adjudication_results(graph_number, solvers_to_use): def main(): - solvers_to_use = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing'] + solvers_to_use = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing', 'lookup_table'] + compare_adjudication_results(graph_number=2, solvers_to_use=solvers_to_use) - for graph_number in range(2, 4): - compare_adjudication_results(graph_number=graph_number, solvers_to_use=solvers_to_use) + solvers_to_use = ['simulated_annealing', 'quantum_annealing', 'lookup_table'] + compare_adjudication_results(graph_number=3, solvers_to_use=solvers_to_use) if __name__ == "__main__": diff --git a/tangled_adjudicate/utils/find_hardware_embeddings.py b/tangled_adjudicate/utils/find_hardware_embeddings.py index 5f29aa3..b27eea9 100644 --- a/tangled_adjudicate/utils/find_hardware_embeddings.py +++ b/tangled_adjudicate/utils/find_hardware_embeddings.py @@ -150,7 +150,7 @@ def raster_embedding_search(hardware_graph, subgraph, raster_breadth=2, delete_u return embmat -def get_embeddings(source_graph_number, qc_solver_to_use): +def get_embeddings(source_graph_number, qc_solver_to_use, data_dir): # generates multiple parallel embeddings into hardware for your graph # the smaller the graph, the longer this takes -- e.g. source_graph_number == 1 takes about 4 minutes # @@ -166,9 +166,7 @@ def get_embeddings(source_graph_number, qc_solver_to_use): file_name = ('embeddings_graph_number_' + str(source_graph_number) + '_raster_breadth_' + str(raster_breadth) + '_gridsize_' + str(grid_size) + '_qc_' + qc_solver_to_use + '.pkl') - data_dir = os.path.join(os.getcwd(), '..', 'data') # checks to see if /data exists; if not, creates it - - if not os.path.isdir(data_dir): + if not os.path.isdir(data_dir): # checks to see if /data exists; if not, creates it os.mkdir(data_dir) file_path = os.path.join(data_dir, file_name) diff --git a/tangled_adjudicate/utils/game_graph_properties.py b/tangled_adjudicate/utils/game_graph_properties.py index 7968122..f03ce06 100644 --- a/tangled_adjudicate/utils/game_graph_properties.py +++ b/tangled_adjudicate/utils/game_graph_properties.py @@ -2,15 +2,15 @@ import sys # A Tangled game graph is specified by a graph number, which label specific graphs included here. In this module there -# are 10 included graphs numbered 1 through 10. Each graph requires specification of vertex count (how many vertices -# the graph has) and an explicit edge list, which are included for these ten graphs. If you'd like to add a new graph, +# are 16 included graphs numbered 1 through 16. Each graph requires specification of vertex count (how many vertices +# the graph has) and an explicit edge list, which are included for these 16 graphs. If you'd like to add a new graph, # it's simple -- just add it to the GraphProperties class. class GraphProperties(object): def __init__(self, graph_number): - # graph_number is an integer, currently in the range 1 to 10, that labels which graph we are using. - # to add a new graph, simply define a new graph_number (say 11) and provide its vertex_count and edge_list + # graph_number is an integer, currently in the range 1 to 16, that labels which graph we are using. + # to add a new graph, simply define a new graph_number (say 17) and provide its vertex_count and edge_list # following the pattern here. if graph_number == 1: @@ -157,6 +157,151 @@ def __init__(self, graph_number): (23, 33), (23, 41), (23, 35), (23, 43), (23, 37), (23, 45), (23, 39), (23, 47), (24, 25), (26, 27), (28, 29), (30, 31), (32, 33), (34, 35), (36, 37), (38, 39), (40, 41), (42, 43), (44, 45), (46, 47)] + elif graph_number == 11: + # minimal graph for testing; 3 vertices 2 edges + self.vertex_count = 3 + + self.edge_list = [(0, 1), (1, 2)] + + elif graph_number == 12: + # moser spindle; smaller than petersen graph, only 8 automorphisms; 7 vertices, 11 edges + self.vertex_count = 7 + + self.edge_list = [(0, 1), (0, 4), (0, 6), + (1, 2), (1, 5), + (2, 3), (2, 5), + (3, 4), (3, 5), + (3, 6), + (4, 6)] + + elif graph_number == 13: + # second Blanusa snark, 4 automorphisms, mirror symmetric; leftmost = 0, rightmost = 1; 18 vertices, 27 edges + + self.vertex_count = 18 + + self.edge_list = [(0, 2), (0, 4), (0, 16), + (1, 3), (1, 5), (1, 17), + (2, 3), (2, 6), + (3, 8), + (4, 5), (4, 9), + (5, 11), + (6, 9), (6, 14), + (7, 10), (7, 12), (7, 13), + (8, 11), (8, 15), + (9, 12), + (10, 14), (10, 15), + (11, 13), + (12, 16), + (13, 17), + (14, 16), + (15, 17)] + + elif graph_number == 14: + # first Loupekine snark, 8 automorphisms, mirror symmetric; leftmost = 15, rightmost = 12; 22 vertices, 33 edges + + self.vertex_count = 22 + + self.edge_list = [(0,1), (0,2), (0,9), # original 1<->2, 1<->3, 1<->10 + (1,3), (1,20), # original 2<->4, 2<->21 + (2,5), (2,6), # original 3<->6, 3<->7 + (3,4), (3,6), # original 4<->5, 4<->7 + (4,5), (4,7), # original 5<->6, 5<->8 + (5,16), # original 6<->17 + (6,10), # original 7<->11 + (7,12), (7,14), # original 8<->13, 8<->15 + (8,9), (8,11), (8,14), # original 9<->10, 9<->12, 9<->15 + (9,13), # original 10<->14 + (10,11), (10,15), # original 11<->12, 11<->16 + (11,12), # original 12<->13 + (12,13), # original 13<->14 + (13,21), # original 14<->22 + (14,17), # original 15<->18 + (15,18), (15,19), # original 16<->19, 16<->20 + (16,17), (16,19), # original 17<->18, 17<->20 + (17,18), # original 18<->19 + (18,20), # original 19<->21 + (19,21), # original 20<->22 + (20,21)] # original 21<->22 + + elif graph_number == 15: + # Szekeres snark; 50 vertices, 75 edges; vertex 1 left, vertex 4 right + + self.vertex_count = 50 + + self.edge_list = [(0, 6), (0, 9), (0, 12), (1, 15), (1, 18), (1, 21), (2, 24), (2, 27), (2, 30), (3, 33), (3, 36), (3, 39), (4, 42), (4, 45), (4, 48), (5, 6), (5, 10), (5, 22), (6, 7), (7, 8), (7, 38), (8, 9), (8, 13), (9, 10), (10, 11), (11, 12), (11, 25), (12, 13), (13, 41), (14, 15), (14, 19), (14, 31), (15, 16), (16, 17), (16, 47), (17, 18), (17, 22), (18, 19), (19, 20), (20, 21), (20, 34), (21, 22), (23, 24), (23, 28), (23, 40), (24, 25), (25, 26), (26, 27), (26, 31), (27, 28), (28, 29), (29, 30), (29, 43), (30, 31), (32, 33), (32, 37), (32, 49), (33, 34), (34, 35), (35, 36), (35, 40), (36, 37), (37, 38), (38, 39), (39, 40), (41, 42), (41, 46), (42, 43), (43, 44), (44, 45), (44, 49), (45, 46), (46, 47), (47, 48), (48, 49)] + + elif graph_number == 16: + # Descartes snark; 210 vertices, 315 edges; vertex 1 left, vertex 4 right + + self.vertex_count = 210 + + self.edge_list = [(0, 18), (0, 33), (0, 38), (1, 2), (1, 18), (1, 22), (2, 3), (2, 8), (3, 4), (3, 7), + (4, 12), (4, 30), (5, 6), (5, 8), (5, 13), (6, 7), (6, 11), (7, 10), (8, 9), (9, 10), + (9, 11), (10, 14), (11, 12), (12, 15), (13, 14), (13, 209), (14, 15), (15, 193), + (16, 20), (16, 21), (16, 38), (17, 18), (17, 19), (17, 20), (19, 21), (19, 37), + (20, 73), (21, 22), (22, 74), (23, 25), (23, 26), (23, 34), (24, 26), (24, 29), + (24, 31), (25, 29), (25, 39), (26, 41), (27, 28), (27, 36), (27, 44), (28, 35), + (28, 42), (29, 32), (30, 31), (30, 34), (31, 40), (32, 33), (32, 41), (33, 34), + (35, 39), (35, 41), (36, 45), (36, 105), (37, 38), (37, 72), (39, 40), (40, 99), + (42, 43), (42, 50), (43, 46), (43, 47), (44, 47), (44, 49), (45, 46), (45, 48), + (46, 49), (47, 48), (48, 52), (49, 51), (50, 51), (50, 58), (51, 52), (52, 53), + (53, 54), (53, 88), (54, 55), (54, 84), (55, 56), (55, 91), (56, 57), (56, 66), + (57, 58), (57, 63), (58, 62), (59, 60), (59, 62), (59, 181), (60, 63), (60, 65), + (61, 62), (61, 64), (61, 65), (63, 64), (64, 180), (65, 66), (66, 179), (67, 68), + (67, 75), (67, 151), (68, 69), (68, 148), (69, 70), (69, 83), (70, 71), (70, 81), + (71, 72), (71, 76), (72, 73), (73, 74), (74, 75), (75, 155), (76, 77), (76, 79), + (77, 78), (77, 120), (78, 81), (78, 82), (79, 80), (79, 82), (80, 81), (80, 121), + (82, 83), (83, 122), (84, 85), (84, 86), (85, 89), (85, 171), (86, 87), (86, 90), + (87, 88), (87, 170), (88, 89), (89, 90), (90, 91), (91, 172), (92, 94), (92, 99), + (92, 105), (93, 95), (93, 96), (93, 100), (94, 96), (94, 97), (95, 97), (95, 104), + (96, 103), (97, 98), (98, 101), (98, 102), (99, 100), (100, 101), (101, 135), + (102, 103), (102, 114), (103, 104), (104, 105), (106, 107), (106, 114), (106, 119), + (107, 110), (107, 111), (108, 109), (108, 111), (108, 115), (109, 110), (109, 118), + (110, 113), (111, 112), (112, 113), (112, 117), (113, 116), (114, 115), (115, 116), + (116, 133), (117, 118), (117, 123), (118, 119), (119, 120), (120, 121), (121, 122), + (122, 125), (123, 124), (123, 147), (124, 125), (124, 142), (125, 140), (126, 127), + (126, 132), (126, 133), (127, 128), (127, 129), (128, 131), (128, 136), (129, 130), + (129, 138), (130, 131), (130, 134), (131, 132), (132, 139), (133, 134), (134, 135), + (135, 136), (136, 137), (137, 138), (137, 192), (138, 139), (139, 164), (140, 141), + (140, 178), (141, 143), (141, 144), (142, 144), (142, 145), (143, 145), (143, 147), + (144, 146), (145, 177), (146, 147), (146, 176), (148, 149), (148, 173), (149, 152), + (149, 153), (150, 151), (150, 152), (150, 174), (151, 153), (152, 155), (153, 154), + (154, 155), (154, 175), (156, 159), (156, 160), (156, 163), (157, 158), (157, 159), + (157, 165), (158, 160), (158, 168), (159, 162), (160, 161), (161, 162), (161, 166), + (162, 167), (163, 164), (163, 169), (164, 165), (165, 166), (166, 190), (167, 168), + (167, 175), (168, 169), (169, 170), (170, 171), (171, 172), (172, 173), (173, 174), + (174, 175), (176, 177), (176, 208), (177, 178), (178, 179), (179, 180), (180, 181), + (181, 198), (182, 184), (182, 186), (182, 189), (183, 186), (183, 187), (183, 191), + (184, 185), (184, 187), (185, 192), (185, 193), (186, 194), (187, 188), (188, 189), + (188, 195), (189, 190), (190, 191), (191, 192), (193, 194), (194, 195), (195, 206), + (196, 197), (196, 198), (196, 208), (197, 201), (197, 203), (198, 200), (199, 200), + (199, 203), (199, 204), (200, 202), (201, 202), (201, 204), (202, 209), (203, 207), + (204, 205), (205, 206), (205, 208), (206, 207), (207, 209)] + + elif graph_number == 17: + # cube graph; 8 vertices, 12 edges + + self.vertex_count = 8 + + self.edge_list = [(0, 1), (0, 2), (0, 4), + (1, 3), (1, 5), + (2, 3), (2, 6), + (3, 7), + (4, 5), (4, 6), + (5, 7), + (6, 7)] + + elif graph_number == 18: + # 3-prism graph; 6 vertices, 9 edges + + self.vertex_count = 6 + + self.edge_list = [(0, 1), (0, 2), (0, 3), + (1, 2), (1, 4), + (2, 5), + (3, 4), (3, 5), + (4, 5)] + else: print('Bad graph_number in GraphProperties initialization -- no graph corresponding to your choice exists.') @@ -166,7 +311,7 @@ def __init__(self, graph_number): def main(): # this is a debugging tool to make sure everything looks right! - for graph_number in range(1, 11): + for graph_number in range(1, 17): g = GraphProperties(graph_number=graph_number) print('****') print('graph', graph_number, 'has', g.vertex_count, 'vertices and', g.edge_count, 'edges.') diff --git a/tangled_adjudicate/utils/generate_terminal_states.py b/tangled_adjudicate/utils/generate_terminal_states.py index 931a7b0..39f8bee 100644 --- a/tangled_adjudicate/utils/generate_terminal_states.py +++ b/tangled_adjudicate/utils/generate_terminal_states.py @@ -9,46 +9,31 @@ from tangled_adjudicate.utils.game_graph_properties import GraphProperties from tangled_adjudicate.utils.find_graph_automorphisms import get_automorphisms - - -def convert_state_string_to_game_state(graph, terminal_state_string): - - vertex_list = terminal_state_string[:graph.vertex_count] - edge_list = terminal_state_string[graph.vertex_count:] - edges = [(graph.edge_list[k][0], graph.edge_list[k][1], edge_list[k]) for k in range(len(edge_list))] - - turn_count = vertex_list.count(1) + vertex_list.count(2) + len(edge_list) - edge_list.count(0) - - # if turn_count is even, it's red's turn - if not turn_count % 2: - current_player_index = 1 - else: - current_player_index = 2 - - game_state = {'num_nodes': graph.vertex_count, 'edges': edges, - 'player1_id': 'player1', 'player2_id': 'player2', 'turn_count': turn_count, - 'current_player_index': current_player_index, - 'player1_node': vertex_list.index(1), 'player2_node': vertex_list.index(2)} - - return game_state +from tangled_adjudicate.utils.utilities import convert_my_game_state_to_erik_game_state def generate_all_tangled_terminal_states(graph_number): # this loads or generates all possible terminal game states for the graph indexed by graph_number and groups them # into lists where each member of the list is connected by an automorphism. Running this function requires either - # loading or generating an automorphism file.The dictionary game_states has as its key a string with the canonical + # loading or generating an automorphism file. The dictionary game_states has as its key a string with the canonical # member of each of these, with the further ['automorphisms'] key being a list of all the states that are symmetries # of the canonical key. The key ['game_state'] is the representation of the key as a game_state object. # # Note that this requires enumerating all possible terminal states, the number of which is # (vertex_count choose 2) * 2 * 3**edge_count, which grows exponentially with edge count. You can do this easily # for graph_number 1, 2, 3, 4, but 5 and up get stupidly large. + # + # graph_number 2 should have 27 keys, and each ['automorphisms'] sub-key should have 6 entries + # graph_number 3 should have 405 keys, and each ['automorphisms'] sub-key should have 12-24 entries (the reason + # why there aren't always 24 is that for some of these keys different automorphisms bring you to the same state) graph = GraphProperties(graph_number) + script_dir = os.path.dirname(os.path.abspath(__file__)) # Get the directory of the current script data_dir = os.path.join(script_dir, '..', 'data') - file_path = os.path.join(data_dir, - "graph_" + str(graph_number) + "_unique_terminal_states.pkl") + list_of_automorphisms = get_automorphisms(graph_number, data_dir=data_dir) + + file_path = os.path.join(data_dir, "graph_" + str(graph_number) + "_unique_terminal_states.pkl") if os.path.isfile(file_path): # if the file already exists, just load it with open(file_path, "rb") as fp: @@ -57,13 +42,11 @@ def generate_all_tangled_terminal_states(graph_number): # add check to make sure you don't ask for something too large print('***************************') user_input = input('There are ' + str(math.comb(graph.vertex_count, 2) * 2 * 3**graph.edge_count) + - ' terminal states -- proceed (y/n)?') + ' total non-unique terminal states -- proceed (y/n)?') if user_input.lower() != 'y': sys.exit(print('exiting...')) print('***************************') - list_of_automorphisms = get_automorphisms(graph_number, data_dir=data_dir) - possible_vertex_states = [] for positions in itertools.permutations(range(graph.vertex_count), 2): lst = [0] * graph.vertex_count @@ -76,65 +59,76 @@ def generate_all_tangled_terminal_states(graph_number): elements = [1, 2, 3] possible_edge_states = list(itertools.product(elements, repeat=graph.edge_count)) + # all_states is a list of lists enumerating ALL the game states all_states = [j + list(k) for k in possible_edge_states for j in possible_vertex_states] - same_group_of_states = {} + # this next part creates a dictionary where the keys are each of the elements of all_states and the values are + # lists of all the states connected to the key by an automorphism. Note that different automorphisms can lead + # to the same state, so at some point the list is converted to a set and then back to a list + + all_states_with_symmetries = {} + all_states_no_symmetries = {} + # iterate over all enumerated states for state in all_states: + + # create a list for all the symmetric states + list_of_states_connected_by_symmetry = [] + + # get indices of the red and blue vertices only_vertices = state[:graph.vertex_count] red_vertex_index = only_vertices.index(1) blue_vertex_index = only_vertices.index(2) - same_group_of_states[str(state)] = [] + + # iterate over all automorphisms for automorph in list_of_automorphisms: - new_red_vertex_index = automorph[red_vertex_index] - new_blue_vertex_index = automorph[blue_vertex_index] - transformed_each = [0] * graph.vertex_count - transformed_each[new_red_vertex_index] = 1 - transformed_each[new_blue_vertex_index] = 2 - - edge = np.zeros((graph.vertex_count, graph.vertex_count)) - new_edge = np.zeros((graph.vertex_count, graph.vertex_count)) - cnt = graph.vertex_count - for j in range(graph.vertex_count): - for i in range(j): - edge[i, j] = state[cnt] - cnt += 1 - - cnt = graph.vertex_count - for j in range(graph.vertex_count): - for i in range(j): - if automorph[i] < automorph[j]: - new_edge[i, j] = edge[automorph[i], automorph[j]] - else: - new_edge[i, j] = edge[automorph[j], automorph[i]] - cnt += 1 - - for j in range(graph.vertex_count): - for i in range(j): - transformed_each.append(int(new_edge[i, j])) - same_group_of_states[str(state)].append(transformed_each) - - good_states = {} - cnt = 0 - for k, v in same_group_of_states.items(): - if not cnt % (math.comb(graph.vertex_count, 2) * 2): # 4 choose 2 = 6 * 2 = 12 ..... 3 choose 2 = 3 *2 = 6 math.comb(graph.vertex_count, 2) * 2 - good_states[k] = v - cnt += 1 - - terminal_states = [] - for k, v in good_states.items(): - terminal_states.append(ast.literal_eval(k)) - - print('there are', len(terminal_states), 'unique terminal states. Writing to disk ...') - game_states = {} + # initialize the state we want to compute (transforming state under automorph) + state_transformed_under_automorph = [0] * graph.vertex_count + + # write transformed vertices into the transformed state -- this finishes the vertex part + state_transformed_under_automorph[automorph[red_vertex_index]] = 1 + state_transformed_under_automorph[automorph[blue_vertex_index]] = 2 + + # now we want to transform the edges under the automorphism + for edge_idx in range(graph.edge_count): + first_vertex = automorph[graph.edge_list[edge_idx][0]] + second_vertex = automorph[graph.edge_list[edge_idx][1]] + if first_vertex < second_vertex: + transformed_edge = (first_vertex, second_vertex) + else: + transformed_edge = (second_vertex, first_vertex) + + transformed_edge_idx = graph.edge_list.index(transformed_edge) + + state_transformed_under_automorph.append(state[graph.vertex_count + transformed_edge_idx]) - for each in terminal_states: - game_states[str(each)] = {} - game_states[str(each)]['game_state'] = convert_state_string_to_game_state(graph, each) - game_states[str(each)]['automorphisms'] = good_states[str(each)] + list_of_states_connected_by_symmetry.append(str(state_transformed_under_automorph)) + + # remove duplicates + all_states_with_symmetries[str(state)] = list(dict.fromkeys(list_of_states_connected_by_symmetry)) + all_states_no_symmetries[str(state)] = list_of_states_connected_by_symmetry + + sorted_all_states_with_symmetries = dict(sorted(all_states_with_symmetries.items())) + + uniques = [] + duplicates = [] + + for k, v in sorted_all_states_with_symmetries.items(): + if k not in duplicates: + uniques.append(k) + for j in range(1, len(v)): + duplicates.append(v[j]) + + unique_terminal_states = [ast.literal_eval(k) for k in uniques] + print('there are', len(unique_terminal_states), 'unique terminal states. Writing to disk ...') + + game_states = {} - data_dir = os.path.join(os.getcwd(), '..', 'data') + for my_game_state in unique_terminal_states: + game_states[str(my_game_state)] = {} + game_states[str(my_game_state)]['game_state'] = convert_my_game_state_to_erik_game_state(my_game_state, graph.vertex_count, graph.edge_list) + game_states[str(my_game_state)]['automorphisms'] = all_states_with_symmetries[str(my_game_state)] with open(os.path.join(data_dir, "graph_" + str(graph_number) + "_unique_terminal_states.pkl"), "wb") as fp: pickle.dump(game_states, fp) @@ -145,8 +139,8 @@ def generate_all_tangled_terminal_states(graph_number): def main(): # this generates all terminal states for graphs 2 and 3 - for graph_number in range(2, 4): - gs = generate_all_tangled_terminal_states(graph_number) + gs2 = generate_all_tangled_terminal_states(graph_number=2) + gs3 = generate_all_tangled_terminal_states(graph_number=3) if __name__ == "__main__": diff --git a/tangled_adjudicate/utils/how_to_adjudicate_states.py b/tangled_adjudicate/utils/how_to_adjudicate_states.py index 942671b..ba07f1d 100644 --- a/tangled_adjudicate/utils/how_to_adjudicate_states.py +++ b/tangled_adjudicate/utils/how_to_adjudicate_states.py @@ -1,15 +1,13 @@ """ how to use provided solvers to adjudicate Tangled terminal states """ -import pickle import sys import os -import ast import time import numpy as np -from tangled_adjudicate.adjudicators.adjudicate import Adjudicator -from tangled_adjudicate.utils.parameters import Params -from tangled_adjudicate.utils.game_graph_properties import GraphProperties -from tangled_adjudicate.utils.generate_terminal_states import convert_state_string_to_game_state +from tangled_adjudicate.adjudicators.simulated_annealing import SimulatedAnnealingAdjudicator +from tangled_adjudicate.adjudicators.quantum_annealing import QuantumAnnealingAdjudicator +from tangled_adjudicate.adjudicators.lookup_table import LookupTableAdjudicator +from tangled_adjudicate.adjudicators.schrodinger import SchrodingerEquationAdjudicator def main(): @@ -17,19 +15,22 @@ def main(): # there are two example_game_state dictionaries provided, which are terminal states in graph_number 2 and 3 # respectively, that are of the sort that are closest to the draw line at score = +- 1/2 - # solver_list = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing', 'look_up'] - solver_list = ['simulated_annealing', 'lookup_table'] + # set graph_number + graph_number = 2 + + # choose solvers to use + solver_list = ['simulated_annealing', 'schrodinger_equation', 'quantum_annealing', 'lookup_table'] precision_digits = 4 # just to clean up print output np.set_printoptions(suppress=True) # remove scientific notation - params = Params() - adjudicator = Adjudicator(params) + args = {'data_dir': os.path.join(os.getcwd(), '..', 'data'), + 'graph_number': graph_number} example_game_state = None # draw; score=0; ferromagnetic ring - if params.GRAPH_NUMBER == 2: + if graph_number == 2: example_game_state = {'num_nodes': 3, 'edges': [(0, 1, 2), (0, 2, 2), (1, 2, 2)], 'player1_id': 'player1', 'player2_id': 'player2', 'turn_count': 5, 'current_player_index': 1, 'player1_node': 1, 'player2_node': 2} @@ -37,10 +38,10 @@ def main(): # red wins, score +2/3; this is one of the states closest to the draw line # note that quantum_annealing in this default uses the D-Wave mock software solver and won't give # the right answer as its samples aren't unbiased -- if you want the quantum_annealing solver to - # run on hardware set self.USE_MOCK_DWAVE_SAMPLER = False in /utils/parameters.py and ensure you have + # run on hardware set QAParameters.use_mock = False in /adjudicators/quantum_annealing.py and ensure you have # hardware access and everything is set up - if params.GRAPH_NUMBER == 3: + if graph_number == 3: example_game_state = {'num_nodes': 4, 'edges': [(0, 1, 3), (0, 2, 1), (0, 3, 3), (1, 2, 1), (1, 3, 3), (2, 3, 1)], 'player1_id': 'player1', 'player2_id': 'player2', 'turn_count': 8, @@ -53,8 +54,22 @@ def main(): start = time.time() - # equivalent to e.g. results = adjudicator.simulated_annealing(example_game_state) - results = getattr(adjudicator, solver_to_use)(example_game_state) + adjudicator = None + + if solver_to_use == 'simulated_annealing': + adjudicator = SimulatedAnnealingAdjudicator() + else: + if solver_to_use == 'quantum_annealing': + adjudicator = QuantumAnnealingAdjudicator() + else: + if solver_to_use == 'lookup_table': + adjudicator = LookupTableAdjudicator() + else: + if solver_to_use == 'schrodinger_equation': + adjudicator = SchrodingerEquationAdjudicator() + + adjudicator.setup(**args) + results = adjudicator.adjudicate(example_game_state) print('elapsed time for', solver_to_use, 'was', round(time.time() - start, precision_digits), 'seconds.') @@ -77,8 +92,6 @@ def main(): print('influence vector:', [round(results['influence_vector'][k], precision_digits) for k in range(len(results['influence_vector']))]) - print() - if __name__ == "__main__": sys.exit(main()) diff --git a/tangled_adjudicate/utils/parameters.py b/tangled_adjudicate/utils/parameters.py deleted file mode 100644 index dce9240..0000000 --- a/tangled_adjudicate/utils/parameters.py +++ /dev/null @@ -1,41 +0,0 @@ -""" adjudication and support parameters """ - -class Params(object): - def __init__(self): - self.GRAPH_NUMBER = 2 # this is the index of the graph to use; defined in /utils/game_graph_properties.py - # just a reminder which are which: - # 1 = 2 vertices - # 2 = 3 vertices in triangle - # 3 = 4 vertices, 6 edges - # 4 = 6 vertices, 1 hexagon, 6 edges - # 5 = 10 vertices, 15 edges, petersen graph - # 6 = 16 vertices, 32 edges, non-planar, tesseract - - self.EPSILON = 0.5 # this is the boundary between a draw and a win - - self.NUM_READS_SA = 1000 # this is for simulated annealing - - # These are parameters related to the use of QC hardware, if you're not using QC you can just leave these - # The defaults here are no shimming, no gauge transforms, only use M=1 automorphism, and collect a lot of - # samples (N=1000) - - self.USE_QC = False # set to False if you just want to use e.g. simulated annealer - self.USE_MOCK_DWAVE_SAMPLER = False # set to True if you want a software version of the hardware (doesn't sample like the HW tho so don't trust it, just for debugging) - self.QC_SOLVER_TO_USE = 'Advantage2_prototype2.6' # modify if you want to use a different QC - - self.NUMBER_OF_CHIP_RUNS = 1 # this is M - self.NUM_READS_QC = 1000 # this is N - self.ANNEAL_TIME_IN_NS = 5 # this is the fastest the QC can sweep - - self.USE_GAUGE_TRANSFORM = False - self.USE_SHIM = False - - self.ALPHA_PHI = 0.00001 - self.SHIM_ITERATIONS = 10 - - -class MinimalAdjudicationParameters(object): - def __init__(self): - self.EPSILON = 0.5 # this is the boundary between a draw and a win - self.USE_QC = False - self.NUM_READS_SA = 1000 # this is for simulated annealing diff --git a/tangled_adjudicate/utils/utilities.py b/tangled_adjudicate/utils/utilities.py index 48ade0a..4874ebc 100644 --- a/tangled_adjudicate/utils/utilities.py +++ b/tangled_adjudicate/utils/utilities.py @@ -1,52 +1,6 @@ """ a place to put utility functions """ +import ast import gdown -import networkx as nx - - -def game_state_to_ising_model(game_state): - # maps edge state to J value 0, 1 => J = 0; 2 => J = -1 FM; 3 => J = +1 AFM - edge_state_map = {0: 0, 1: 0, 2: -1, 3: 1} - - vertex_count = game_state['num_nodes'] - edge_list = [(each[0], each[1]) for each in game_state['edges']] - - h = {} - jay = {} - - for k in range(vertex_count): - h[k] = 0 - - for k in range(len(edge_list)): - jay[edge_list[k]] = edge_state_map[game_state['edges'][k][2]] - - return h, jay - - -def game_state_is_terminal(game_state): - # a state is terminal if both players have chosen vertices and all edges have been played - # game_state = {'num_nodes': 6, 'edges': [(0, 1, 1), (0, 2, 1), (0, 3, 2), (0, 4, 3), (0, 5, 2), (1, 2, 1), - # (1, 3, 2), (1, 4, 3), (1, 5, 3), (2, 3, 1), (2, 4, 2), (2, 5, 3), (3, 4, 2), (3, 5, 1), (4, 5, 2)], - # 'player1_id': 'player1', 'player2_id': 'player2', 'turn_count': 17, 'current_player_index': 1, - # 'player1_node': 1, 'player2_node': 3} - - edge_states = [each[2] for each in game_state['edges']] - - if edge_states.count(0) == 0 and game_state['player1_node'] != -1 and game_state['player2_node'] != -1: - return True - else: - return False - - -def find_isolated_vertices(n_var, base_jay): - # returns a list of isolated / disconnected vertices if there are any; returns empty list if not - my_graph = nx.Graph() - my_graph.add_nodes_from([k for k in range(n_var)]) - my_graph.add_edges_from([k for k, v in base_jay.items() if v != 0]) - - # Find isolated vertices (vertices with no edges) - isolated_vertices = list(nx.isolates(my_graph)) - - return isolated_vertices def get_tso(graph_number, file_path): @@ -85,9 +39,14 @@ def convert_erik_game_state_to_my_game_state(game_state): return my_state -def convert_to_erik_game_state_for_adjudication(my_state, number_of_vertices, list_of_edge_tuples): +def convert_my_game_state_to_erik_game_state(my_state, number_of_vertices, list_of_edge_tuples): + # extract erik state from geordie state + if isinstance(my_state, str): + my_state = ast.literal_eval(my_state) my_vertices = my_state[:number_of_vertices] + my_edges = my_state[number_of_vertices:] + turn_count = 0 try: @@ -102,22 +61,20 @@ def convert_to_erik_game_state_for_adjudication(my_state, number_of_vertices, li except ValueError: player_2_vertex = -1 - my_edges = my_state[number_of_vertices:] - turn_count += my_edges.count(1) + my_edges.count(2) + my_edges.count(3) # if turn_count is even, it's player 1 (red)'s turn current_player_idx = 1 if turn_count % 2 == 0 else 2 - erik_edges = [] - for k in range(len(list_of_edge_tuples)): - erik_edges.append((list_of_edge_tuples[k][0], list_of_edge_tuples[k][1], my_edges[k])) + erik_edges = [(list_of_edge_tuples[k][0], list_of_edge_tuples[k][1], my_edges[k]) for k in range(len(my_edges))] game_state = {'num_nodes': number_of_vertices, - # 'edges': [(0, 1, 3), (0, 2, 1), (0, 3, 3), (1, 2, 1), (1, 3, 3), (2, 3, 1)], 'edges': erik_edges, - 'player1_id': 'player1', 'player2_id': 'player2', 'turn_count': turn_count, - 'current_player_index': current_player_idx, 'player1_node': player_1_vertex, + 'player1_id': 'player1', + 'player2_id': 'player2', + 'turn_count': turn_count, + 'current_player_index': current_player_idx, + 'player1_node': player_1_vertex, 'player2_node': player_2_vertex} return game_state