Skip to content

Commit

Permalink
Publishing fixes for single-GPU runner (#1672)
Browse files Browse the repository at this point in the history
Co-authored-by: Bettina Heim <[email protected]>
  • Loading branch information
bmhowe23 and bettinaheim committed May 14, 2024
1 parent 1926eea commit 1f8dd79
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 6 deletions.
11 changes: 8 additions & 3 deletions docs/sphinx/examples/python/tutorials/executing_circuits.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -150,9 +150,14 @@
" mz(qvector)\n",
"\n",
"\n",
"# Asynchronous execution on multiple qpus via nvidia gpus.\n",
"result_1 = cudaq.sample_async(kernel_1, qubit_count, shots_count=1000, qpu_id=0)\n",
"result_2 = cudaq.sample_async(kernel_2, qubit_count, shots_count=1000, qpu_id=1)\n",
"if cudaq.num_available_gpus() > 1:\n",
" # Asynchronous execution on multiple virtual QPUs, each simulated by an NVIDIA GPU.\n",
" result_1 = cudaq.sample_async(kernel_1, qubit_count, shots_count=1000, qpu_id=0)\n",
" result_2 = cudaq.sample_async(kernel_2, qubit_count, shots_count=1000, qpu_id=1)\n",
"else:\n",
" # Schedule for execution on the same virtual QPU.\n",
" result_1 = cudaq.sample_async(kernel_1, qubit_count, shots_count=1000, qpu_id=0)\n",
" result_2 = cudaq.sample_async(kernel_2, qubit_count, shots_count=1000, qpu_id=0)\n",
"\n",
"print(result_1.get())\n",
"print(result_2.get())"
Expand Down
11 changes: 8 additions & 3 deletions docs/sphinx/examples/python/tutorials/multi_gpu_workflows.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -339,15 +339,20 @@
],
"source": [
"%%timeit\n",
"\n",
"# Timing the execution on a single GPU vs 4 GPUs, users will see a 4x performance improvement\n",
"\n",
"# Timing the execution on a single GPU vs 4 GPUs,\n",
"# one will see a 4x performance improvement if 4 GPUs are available.\n",
"\n",
"asyncresults = []\n",
"num_gpus = cudaq.num_available_gpus()\n",
"\n",
"for i in range(len(xi)):\n",
" for j in range(xi[i].shape[0]):\n",
" qpu_id = i * num_gpus // len(xi)\n",
" asyncresults.append(\n",
" cudaq.observe_async(kernel, h, xi[i][j, :], qpu_id=i))"
" cudaq.observe_async(kernel, h, xi[i][j, :], qpu_id=qpu_id))\n",
"\n",
"result = [res.get() for res in asyncresults]"
]
}
],
Expand Down

0 comments on commit 1f8dd79

Please sign in to comment.