Skip to content
171 changes: 108 additions & 63 deletions qiskit/utils/run_circuits.py
Original file line number Diff line number Diff line change
Expand Up @@ -457,81 +457,126 @@ def run_circuits(
run_config = run_config or {}
with_autorecover = not is_simulator_backend(backend)

job, job_id = _safe_submit_circuits(
circuits,
backend,
qjob_config=qjob_config,
backend_options=backend_options,
noise_config=noise_config,
run_config=run_config,
)
result = None
if MAX_CIRCUITS_PER_JOB is not None:
max_circuits_per_job = int(MAX_CIRCUITS_PER_JOB)
else:
if is_local_backend(backend):
max_circuits_per_job = sys.maxsize
Comment on lines +463 to +464
Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I just copied this from the qobj path, but I don't think this is actually correct. We should probably defer to the max_experiments value in the configuration because a local backend might not accept the max list size circuits for a single job.

else:
max_circuits_per_job = backend.configuration().max_experiments

if len(circuits) > max_circuits_per_job:
jobs = []
job_ids = []
split_circuits = []
count = 0
while count < len(circuits):
some_circuits = circuits[count : count + max_circuits_per_job]
split_circuits.append(some_circuits)
job, job_id = _safe_submit_circuits(
some_circuits,
backend,
qjob_config=qjob_config,
backend_options=backend_options,
noise_config=noise_config,
run_config=run_config,
)
jobs.append(job)
job_ids.append(job_id)
count += max_circuits_per_job
else:
job, job_id = _safe_submit_circuits(
circuits,
backend,
qjob_config=qjob_config,
backend_options=backend_options,
noise_config=noise_config,
run_config=run_config,
)
jobs = [job]
job_ids = [job_id]
split_circuits = [circuits]
results = []
if with_autorecover:
logger.info("Backend status: %s", backend.status())
logger.info("There is one jobs are submitted: id: %s", job_id)
while True:
logger.info("Running job id: %s", job_id)
# try to get result if possible
logger.info("There are %s jobs are submitted.", len(jobs))
logger.info("All job ids:\n%s", job_ids)
for idx, _ in enumerate(jobs):
result = None
logger.info("Backend status: %s", backend.status())
logger.info("There is one jobs are submitted: id: %s", job_id)
job = jobs[idx]
job_id = job_ids[idx]
while True:
job_status = _safe_get_job_status(job, job_id)
queue_position = 0
if job_status in JOB_FINAL_STATES:
# do callback again after the job is in the final states
logger.info("Running job id: %s", job_id)
# try to get result if possible
while True:
job_status = _safe_get_job_status(job, job_id)
queue_position = 0
if job_status in JOB_FINAL_STATES:
# do callback again after the job is in the final states
if job_callback is not None:
job_callback(job_id, job_status, queue_position, job)
break
if job_status == JobStatus.QUEUED and hasattr(job, "queue_position"):
queue_position = job.queue_position()
logger.info("Job id: %s is queued at position %s", job_id, queue_position)
else:
logger.info("Job id: %s, status: %s", job_id, job_status)
if job_callback is not None:
job_callback(job_id, job_status, queue_position, job)
break
if job_status == JobStatus.QUEUED and hasattr(job, queue_position):
queue_position = job.queue_position()
logger.info("Job id: %s is queued at position %s", job_id, queue_position)
else:
logger.info("Job id: %s, status: %s", job_id, job_status)
if job_callback is not None:
job_callback(job_id, job_status, queue_position, job)
time.sleep(qjob_config["wait"])
time.sleep(qjob_config["wait"])

# get result after the status is DONE
if job_status == JobStatus.DONE:
while True:
result = job.result()
if result.success:
logger.info("COMPLETED: job id: %s", job_id)
break
# get result after the status is DONE
if job_status == JobStatus.DONE:
while True:
result = job.result()
if result.success:
results.append(result)
logger.info("COMPLETED the %s-th job, job id: %s", idx, job_id)
break

logger.warning("FAILURE: Job id: %s", job_id)
logger.warning("FAILURE: Job id: %s", job_id)
logger.warning(
"Job (%s) is completed anyway, retrieve result " "from backend again.",
job_id,
)
job = backend.retrieve_job(job_id)
break
# for other cases, resubmit the circuit until the result is available.
# since if there is no result returned, there is no way algorithm can do any process
if job_status == JobStatus.CANCELLED:
logger.warning(
"FAILURE: Job id: %s is cancelled. Re-submit the circuits.", job_id
)
elif job_status == JobStatus.ERROR:
logger.warning(
"Job (%s) is completed anyway, retrieve result " "from backend again.",
"FAILURE: Job id: %s encounters the error. "
"Error is : %s. Re-submit the circuits.",
job_id,
job.error_message(),
)
else:
logging.warning(
"FAILURE: Job id: %s. Unknown status: %s. " "Re-submit the circuits.",
job_id,
job_status,
)
job = backend.retrieve_job(job_id)
break
# for other cases, resubmit the circuit until the result is available.
# since if there is no result returned, there is no way algorithm can do any process
if job_status == JobStatus.CANCELLED:
logger.warning("FAILURE: Job id: %s is cancelled. Re-submit the circuits.", job_id)
elif job_status == JobStatus.ERROR:
logger.warning(
"FAILURE: Job id: %s encounters the error. "
"Error is : %s. Re-submit the circuits.",
job_id,
job.error_message(),
)
else:
logging.warning(
"FAILURE: Job id: %s. Unknown status: %s. " "Re-submit the circuits.",
job_id,
job_status,
)

job, job_id = _safe_submit_circuits(
circuits,
backend,
qjob_config=qjob_config,
backend_options=backend_options,
noise_config=noise_config,
run_config=run_config,
)
job, job_id = _safe_submit_circuits(
split_circuits[idx],
backend,
qjob_config=qjob_config,
backend_options=backend_options,
noise_config=noise_config,
run_config=run_config,
)
else:
result = job.result()
results = []
for job in jobs:
results.append(job.result())

result = _combine_result_objects(results) if results else None

# If result was not successful then raise an exception with either the status msg or
# extra information if this was an Aer partial result return
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
---
fixes:
- |
Fixed an issue with the :class:`~qiskit.utils.QuantumInstance` with
:class:`~qiskit.providers.BackendV1` backends with the
:attr:`~qiskit.providers.models.BackendConfiguration.`max_experiments`
attribute set to a value less than the number of circuits to run. Previously
the :class:`~qiskit.utils.QuantumInstance` would not correctly split the
circuits to run into separate jobs, which has been corrected.
14 changes: 14 additions & 0 deletions test/python/algorithms/test_backendv1.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,20 @@ def test_run_circuit_oracle(self):
result = grover.amplify(problem)
self.assertIn(result.top_measurement, ["11"])

def test_run_circuit_oracle_single_experiment_backend(self):
"""Test execution with a quantum circuit oracle"""
oracle = QuantumCircuit(2)
oracle.cz(0, 1)
problem = AmplificationProblem(oracle, is_good_state=["11"])
backend = self._provider.get_backend("fake_yorktown")
backend._configuration.max_experiments = 1
qi = QuantumInstance(
self._provider.get_backend("fake_yorktown"), seed_simulator=12, seed_transpiler=32
)
grover = Grover(quantum_instance=qi)
result = grover.amplify(problem)
self.assertIn(result.top_measurement, ["11"])

def test_measurement_error_mitigation_with_vqe(self):
"""measurement error mitigation test with vqe"""
try:
Expand Down