forked from NVIDIA/cuda-quantum
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathquantum_platform.h
More file actions
316 lines (256 loc) · 12.3 KB
/
quantum_platform.h
File metadata and controls
316 lines (256 loc) · 12.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
/****************************************************************-*- C++ -*-****
* Copyright (c) 2022 - 2026 NVIDIA Corporation & Affiliates. *
* All rights reserved. *
* *
* This source code and the accompanying materials are made available under *
* the terms of the Apache License 2.0 which accompanies this distribution. *
******************************************************************************/
#pragma once
#include "common/CodeGenConfig.h"
#include "common/ExecutionContext.h"
#include "common/NoiseModel.h"
#include "common/ObserveResult.h"
#include "common/ThunkInterface.h"
#include "cudaq/remote_capabilities.h"
#include "cudaq/utils/cudaq_utils.h"
#include "nvqpp_interface.h"
#include <cstring>
#include <cxxabi.h>
#include <functional>
#include <future>
#include <memory>
#include <optional>
#include <string>
namespace mlir {
class ModuleOp;
}
namespace cudaq {
class QPU;
class gradient;
class optimizer;
struct RuntimeTarget;
class LinkedLibraryHolder;
namespace __internal__ {
class TargetSetter;
}
/// Typedefs for defining the connectivity structure of a QPU
using QubitEdge = std::pair<std::size_t, std::size_t>;
using QubitConnectivity = std::vector<QubitEdge>;
/// A sampling tasks takes no input arguments and returns
/// a sample_result instance.
using KernelExecutionTask = std::function<sample_result()>;
/// An observation tasks takes no input arguments and returns
/// a double expectation value.
using ObserveTask = std::function<observe_result()>;
/// The quantum_platform corresponds to a specific quantum architecture.
/// The quantum_platform exposes a public API for programmers to
/// query specific information about the targeted QPU(s) (e.g. number
/// of qubits, qubit connectivity, etc.). This type is meant to
/// be subclassed for concrete realizations of quantum platforms, which
/// are intended to populate this platformQPUs member of this base class.
class quantum_platform {
public:
quantum_platform() = default;
virtual ~quantum_platform() = default;
/// Fetch the connectivity info
std::optional<QubitConnectivity> connectivity();
/// Get the number of qubits for the QPU with ID qpu_id.
std::size_t get_num_qubits(std::size_t qpu_id = 0) const;
/// @brief Return true if this platform exposes multiple QPUs and
/// supports parallel distribution of quantum tasks.
virtual bool supports_task_distribution() const { return false; }
/// Specify the execution context for the current thread.
// [remove at]: runtime refactor release
[[deprecated("set_exec_ctx is deprecated - please use with_execution_context "
"instead.")]] void
set_exec_ctx(ExecutionContext *ctx, std::size_t qid = 0);
/// Return the current execution context
// [remove at]: runtime refactor release
[[deprecated("get_exec_ctx is deprecated - please use "
"cudaq::getExecutionContext() instead.")]] ExecutionContext *
get_exec_ctx() const {
return getExecutionContext();
}
/// Reset the execution context for the current thread.
// [remove at]: runtime refactor release
[[deprecated("reset_exec_ctx is deprecated - please use "
"with_execution_context instead.")]] void
reset_exec_ctx(std::size_t qid = 0);
void set_current_qpu(const std::size_t device_id);
/// @brief Execute the given function within the given execution context.
template <typename Callable, typename... Args>
auto with_execution_context(ExecutionContext &ctx, Callable &&f,
Args &&...args) {
// Save the outer execution context (if any) so we can restore it after.
auto *outerContext = getExecutionContext();
configureExecutionContext(ctx);
detail::setExecutionContext(&ctx);
beginExecution();
// Cleanup runs after the kernel returns or throws. It finalizes results
// and tears down, then resets the execution context.
// The context reset always runs even if finalization throws.
auto cleanup = [this, &ctx, &outerContext]() {
detail::try_finally(
[this, &ctx] {
finalizeExecutionContext(ctx);
endExecution();
},
[&outerContext] {
detail::resetExecutionContext();
if (outerContext)
detail::setExecutionContext(outerContext);
});
};
if constexpr (std::is_void_v<std::invoke_result_t<Callable, Args...>>) {
detail::try_finally([&] { f(std::forward<Args>(args)...); }, cleanup);
} else {
return detail::try_finally([&] { return f(std::forward<Args>(args)...); },
cleanup);
}
}
/// Get the number of QPUs available with this platform.
std::size_t num_qpus() const { return platformQPUs.size(); }
/// Return whether this platform is a simulator.
// TODO: replace const std::size_t with std::size_t
bool is_simulator(const std::size_t qpu_id = 0) const;
/// @brief Return whether the QPU supports explicit measurements.
bool supports_explicit_measurements(std::size_t qpu_id = 0) const;
/// The name of the platform, which also corresponds to the name of the
/// platform file.
std::string name() const { return platformName; }
/// @brief Return true if the QPU is remote.
// TODO: make is a const member function
bool is_remote(const std::size_t qpu_id = 0);
/// @brief Return true if QPU is locally emulating a remote QPU
// TODO: replace const std::size_t with std::size_t
bool is_emulated(const std::size_t qpu_id = 0) const;
/// @brief Set the noise model for @p qpu_id on this platform.
void set_noise(const noise_model *model, std::size_t qpu_id = 0);
/// @brief Return the noise model for @p qpu_id on this platform.
const noise_model *get_noise(std::size_t qpu_id = 0);
/// @brief Get the remote capabilities (only applicable for remote platforms)
RemoteCapabilities get_remote_capabilities(std::size_t qpu_id = 0) const;
/// Get code generation configuration values
CodeGenConfig get_codegen_config();
/// Get runtime target information
// This includes information about the target configuration (config file) and
// any other user-defined settings (nvq++ target option compile flags or
// `set_target` arguments).
const RuntimeTarget *get_runtime_target() const;
/// @brief Turn off any noise models.
void reset_noise(std::size_t qpu_id = 0);
/// Specify the execution context for this platform.
void configureExecutionContext(ExecutionContext &ctx) const;
/// @brief Post-process the results stored in @p ctx after execution on this
/// platform.
void finalizeExecutionContext(cudaq::ExecutionContext &ctx) const;
/// @brief Begin a new execution on this platform.
void beginExecution();
/// @brief End the current execution on this platform.
void endExecution();
/// Enqueue an asynchronous sampling task.
std::future<sample_result> enqueueAsyncTask(const std::size_t qpu_id,
KernelExecutionTask &t);
/// @brief Enqueue a general task that runs on the specified QPU
void enqueueAsyncTask(const std::size_t qpu_id, std::function<void()> &f);
/// @brief Launch a VQE operation on the platform.
void launchVQE(const std::string kernelName, const void *kernelArgs,
cudaq::gradient *gradient, const cudaq::spin_op &H,
cudaq::optimizer &optimizer, const int n_params,
const std::size_t shots, std::size_t qpu_id = 0);
// This method is the hook for the kernel rewrites to invoke quantum kernels.
[[nodiscard]] KernelThunkResultType
launchKernel(const std::string &kernelName, KernelThunkType kernelFunc,
void *args, std::uint64_t voidStarSize,
std::uint64_t resultOffset, const std::vector<void *> &rawArgs,
std::size_t qpu_id = 0);
void launchKernel(const std::string &kernelName, const std::vector<void *> &,
std::size_t qpu_id = 0);
// This method launches a kernel from a ModuleOp that has already been
// created.
[[nodiscard]] KernelThunkResultType
launchModule(const std::string &kernelName, mlir::ModuleOp module,
const std::vector<void *> &rawArgs, mlir::Type resultTy,
std::size_t qpu_id);
[[nodiscard]] void *
specializeModule(const std::string &kernelName, mlir::ModuleOp module,
const std::vector<void *> &rawArgs, mlir::Type resultTy,
std::optional<cudaq::JitEngine> &cachedEngine,
std::size_t qpu_id, bool isEntryPoint);
/// List all available platforms
static std::vector<std::string> list_platforms();
static std::string demangle(char const *mangled) {
auto ptr = std::unique_ptr<char, decltype(&std::free)>{
abi::__cxa_demangle(mangled, nullptr, nullptr, nullptr), std::free};
return {ptr.get()};
}
/// @brief Called by the runtime to notify that a new random seed value is
/// set.
virtual void onRandomSeedSet(std::size_t seed);
/// @brief Turn off any custom logging stream.
void resetLogStream();
/// @brief Get the stream for info logging.
// Returns null if no specific stream was set.
std::ostream *getLogStream();
/// @brief Set the info logging stream.
void setLogStream(std::ostream &logStream);
protected:
friend class cudaq::LinkedLibraryHolder;
friend class cudaq::__internal__::TargetSetter;
/// @brief Set the target backend, by default do nothing, let subclasses
/// override
/// @param name
virtual void setTargetBackend(const std::string &name) {}
/// The runtime target settings
std::unique_ptr<RuntimeTarget> runtimeTarget;
/// Code generation configuration
std::optional<CodeGenConfig> codeGenConfig;
/// The Platform QPUs, populated by concrete subtypes
std::vector<std::unique_ptr<QPU>> platformQPUs;
/// Name of the platform.
std::string platformName;
/// Optional logging stream for platform output.
// If set, the platform and its QPUs will print info log to this stream.
// Otherwise, default output stream (std::cout) will be used.
std::ostream *platformLogStream = nullptr;
private:
// Helper to validate QPU Id
void validateQpuId(std::size_t qpuId) const;
};
/// Entry point for the auto-generated kernel execution path. TODO: Needs to be
/// tied to the quantum platform instance somehow. Note that the compiler cannot
/// provide that information.
extern "C" {
// Client-server (legacy) interface.
[[nodiscard]] KernelThunkResultType
altLaunchKernel(const char *kernelName, KernelThunkType kernel, void *args,
std::uint64_t argsSize, std::uint64_t resultOffset);
// Streamlined interface for launching kernels. Argument synthesis and JIT
// compilation *must* happen on the local machine.
[[nodiscard]] KernelThunkResultType
streamlinedLaunchKernel(const char *kernelName,
const std::vector<void *> &rawArgs);
// Hybrid of the client-server and streamlined approaches. Letting JIT
// compilation happen either early or late and can handle return values from
// each kernel launch.
[[nodiscard]] KernelThunkResultType
hybridLaunchKernel(const char *kernelName, KernelThunkType kernel, void *args,
std::uint64_t argsSize, std::uint64_t resultOffset,
const std::vector<void *> &rawArgs);
} // extern "C"
} // namespace cudaq
#define CONCAT(a, b) CONCAT_INNER(a, b)
#define CONCAT_INNER(a, b) a##b
#define CUDAQ_REGISTER_PLATFORM(NAME, PRINTED_NAME) \
extern "C" { \
cudaq::quantum_platform *getQuantumPlatform() { \
thread_local static std::unique_ptr<cudaq::quantum_platform> m_platform = \
std::make_unique<NAME>(); \
return m_platform.get(); \
} \
cudaq::quantum_platform *CONCAT(getQuantumPlatform_, PRINTED_NAME)() { \
thread_local static std::unique_ptr<cudaq::quantum_platform> m_platform = \
std::make_unique<NAME>(); \
return m_platform.get(); \
} \
}