from genQC.platform.tokenizer.circuits_tokenizer import CircuitTokenizer
CUDA-Q circuits backend
CUDA-Q based quantum circuit backend.
Utils
ParametrizedCudaqKernel
ParametrizedCudaqKernel (kernel:<function kernel>, params:list[float])
Backend
CircuitsCudaqBackend
CircuitsCudaqBackend (target:str='qpp-cpu')
Backends implement at least these functions.
Test
genqc <-> backend
= torch.tensor([
tensor 1, 0, -2, 0, 0, 5],
[0, 0, 2, 3, 4, 5],
[0, 6, -2, 3, 0, 0],
[=torch.int32)
], dtype
= torch.tensor([[0, 0.1, 0, 0, 2.3, 0.7]])/(2*np.pi) - 1
params_tensor
= {"h":1, "ccx":2, "swap":3, "rx":4, "cp": 5, "ry":6}
vocabulary = CircuitTokenizer(vocabulary)
tokenizer = tokenizer.decode(tensor, params_tensor)
instructions
print() instructions.
CircuitInstruction(name='h', control_nodes=[], target_nodes=[0], params=[0.0])
CircuitInstruction(name='ry', control_nodes=[], target_nodes=[2], params=[0.10000012069940567])
CircuitInstruction(name='ccx', control_nodes=[0, 2], target_nodes=[1], params=[0.0])
CircuitInstruction(name='swap', control_nodes=[], target_nodes=[1, 2], params=[0.0])
CircuitInstruction(name='rx', control_nodes=[], target_nodes=[1], params=[2.299999713897705])
CircuitInstruction(name='cp', control_nodes=[], target_nodes=[0, 1], params=[0.7000001072883606])
= 2**instructions.num_qubits
N
= CircuitsCudaqBackend()
backend = backend.genqc_to_backend(instructions)
parametrizedCudaqKernel
= parametrizedCudaqKernel.kernel, parametrizedCudaqKernel.params
kernel, thetas
= [0] * N
c 0] = 1
c[
print(cudaq.draw(kernel, c, thetas))
= cudaq.sample(kernel, c, thetas)
results print("Measurement distribution:" + str(results))
╭───╮
q0 : ───┤ h ├─────●─────────────────────●─────
╰───╯ ╭─┴─╮ ╭─────────╮╭────┴────╮
q1 : ───────────┤ x ├─╳─┤ rx(2.3) ├┤ r1(0.7) ├
╭─────────╮╰─┬─╯ │ ╰─────────╯╰─────────╯
q2 : ┤ ry(0.1) ├──●───╳───────────────────────
╰─────────╯
Measurement distribution:{ 000:85 010:424 100:83 101:1 110:406 111:1 }
= backend.get_unitary(parametrizedCudaqKernel, instructions.num_qubits)
U print(np.round(U, 2))
[[ 0.29-0.03j 0.29-0.03j 0. +0.j 0. +0.j -0.01-0.64j -0.01-0.64j 0. +0.j 0. +0.j ]
[ 0.29+0.j -0.29+0.j 0. -0.03j 0. +0.03j -0.01+0.j 0.01+0.j 0. -0.64j 0. +0.64j]
[ 0.01-0.64j 0.01-0.64j 0. +0.j 0. +0.j 0.29+0.03j 0.29+0.03j 0. +0.j 0. +0.j ]
[ 0.42-0.49j -0.42+0.49j 0.01+0.01j -0.01-0.01j -0.02+0.02j 0.02-0.02j 0.22+0.19j -0.22-0.19j]
[ 0. +0.j 0. +0.j 0.29-0.03j 0.29-0.03j 0. +0.j 0. +0.j -0.01-0.64j -0.01-0.64j]
[ 0. -0.03j 0. +0.03j 0.29+0.j -0.29+0.j 0. -0.64j 0. +0.64j -0.01+0.j 0.01+0.j ]
[ 0. +0.j 0. +0.j 0.01-0.64j 0.01-0.64j 0. +0.j 0. +0.j 0.29+0.03j 0.29+0.03j]
[ 0.01+0.01j -0.01-0.01j 0.42-0.49j -0.42+0.49j 0.22+0.19j -0.22-0.19j -0.02+0.02j 0.02-0.02j]]
= np.matrix(U)
U assert np.allclose(U.H@U, np.eye(N)) and np.allclose(U@U.H, np.eye(N))
Time targets
def time_target(target):
if cudaq.has_target(target):
cudaq.reset_target()
cudaq.set_target(target)= %timeit -o -q backend.get_unitary(parametrizedCudaqKernel, instructions.num_qubits)
res print(f"Timeit {target=}: {str(res)}")
= ["qpp-cpu", "nvidia"]
targets for target in targets:
time_target(target)
Timeit target='qpp-cpu': 705 μs ± 758 ns per loop (mean ± std. dev. of 7 runs, 1,000 loops each)
Timeit target='nvidia': 4.68 ms ± 23.7 μs per loop (mean ± std. dev. of 7 runs, 100 loops each)