from genQC.platform.tokenizer.circuits_tokenizer import CircuitTokenizerCUDA-Q circuits backend
CUDA-Q based quantum circuit backend.
Utils
ParametrizedCudaqKernel
ParametrizedCudaqKernel (kernel:<function kernel>, params:list[float])
Backend
CircuitsCudaqBackend
CircuitsCudaqBackend (target:str='qpp-cpu')
Backends implement at least these functions.
Test
genqc <-> backend
tensor = torch.tensor([
[1, 0, -2, 0, 0, 5],
[0, 0, 2, 3, 4, 5],
[0, 6, -2, 3, 0, 0],
], dtype=torch.int32)
params_tensor = torch.tensor([[0, 0.1, 0, 0, 2.3, 0.7]])/(2*np.pi) - 1
vocabulary = {"h":1, "ccx":2, "swap":3, "rx":4, "cp": 5, "ry":6}
tokenizer = CircuitTokenizer(vocabulary)
instructions = tokenizer.decode(tensor, params_tensor)
instructions.print()CircuitInstruction(name='h', control_nodes=[], target_nodes=[0], params=[0.0])
CircuitInstruction(name='ry', control_nodes=[], target_nodes=[2], params=[0.10000012069940567])
CircuitInstruction(name='ccx', control_nodes=[0, 2], target_nodes=[1], params=[0.0])
CircuitInstruction(name='swap', control_nodes=[], target_nodes=[1, 2], params=[0.0])
CircuitInstruction(name='rx', control_nodes=[], target_nodes=[1], params=[2.299999713897705])
CircuitInstruction(name='cp', control_nodes=[], target_nodes=[0, 1], params=[0.7000001072883606])
N = 2**instructions.num_qubits
backend = CircuitsCudaqBackend()
parametrizedCudaqKernel = backend.genqc_to_backend(instructions)
kernel, thetas = parametrizedCudaqKernel.kernel, parametrizedCudaqKernel.params
c = [0] * N
c[0] = 1
print(cudaq.draw(kernel, c, thetas))
results = cudaq.sample(kernel, c, thetas)
print("Measurement distribution:" + str(results)) ╭───╮
q0 : ───┤ h ├─────●─────────────────────●─────
╰───╯ ╭─┴─╮ ╭─────────╮╭────┴────╮
q1 : ───────────┤ x ├─╳─┤ rx(2.3) ├┤ r1(0.7) ├
╭─────────╮╰─┬─╯ │ ╰─────────╯╰─────────╯
q2 : ┤ ry(0.1) ├──●───╳───────────────────────
╰─────────╯
Measurement distribution:{ 000:85 010:401 100:85 110:429 }
U = backend.get_unitary(parametrizedCudaqKernel, instructions.num_qubits)
print(np.round(U, 2))[[ 0.29-0.03j 0.29-0.03j 0. +0.j 0. +0.j -0.01-0.64j -0.01-0.64j 0. +0.j 0. +0.j ]
[ 0.29+0.j -0.29+0.j 0. -0.03j 0. +0.03j -0.01+0.j 0.01+0.j 0. -0.64j 0. +0.64j]
[ 0.01-0.64j 0.01-0.64j 0. +0.j 0. +0.j 0.29+0.03j 0.29+0.03j 0. +0.j 0. +0.j ]
[ 0.42-0.49j -0.42+0.49j 0.01+0.01j -0.01-0.01j -0.02+0.02j 0.02-0.02j 0.22+0.19j -0.22-0.19j]
[ 0. +0.j 0. +0.j 0.29-0.03j 0.29-0.03j 0. +0.j 0. +0.j -0.01-0.64j -0.01-0.64j]
[ 0. -0.03j 0. +0.03j 0.29+0.j -0.29+0.j 0. -0.64j 0. +0.64j -0.01+0.j 0.01+0.j ]
[ 0. +0.j 0. +0.j 0.01-0.64j 0.01-0.64j 0. +0.j 0. +0.j 0.29+0.03j 0.29+0.03j]
[ 0.01+0.01j -0.01-0.01j 0.42-0.49j -0.42+0.49j 0.22+0.19j -0.22-0.19j -0.02+0.02j 0.02-0.02j]]
U = np.matrix(U)
assert np.allclose(U.H@U, np.eye(N)) and np.allclose(U@U.H, np.eye(N))Time targets
def time_target(target):
if cudaq.has_target(target):
cudaq.reset_target()
cudaq.set_target(target)
res = %timeit -o -q backend.get_unitary(parametrizedCudaqKernel, instructions.num_qubits)
print(f"Timeit {target=}: {str(res)}")targets = ["qpp-cpu", "nvidia"]
for target in targets:
time_target(target)Timeit target='qpp-cpu': 1.08 ms ± 58.9 μs per loop (mean ± std. dev. of 7 runs, 1,000 loops each)
Timeit target='nvidia': 13.5 ms ± 3.14 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)