|
13 | 13 | Composite Experiment Analysis class.
|
14 | 14 | """
|
15 | 15 |
|
16 |
| -from typing import List, Dict, Union, Optional, Tuple |
| 16 | +from typing import List, Dict, Union, Optional, Iterator |
17 | 17 | import warnings
|
18 |
| -import numpy as np |
19 |
| -from qiskit.result import marginal_distribution |
20 |
| -from qiskit.result.postprocess import format_counts_memory |
| 18 | + |
| 19 | +from qiskit.result import marginal_distribution, marginal_memory |
21 | 20 | from qiskit_experiments.framework import BaseAnalysis, ExperimentData
|
22 | 21 | from qiskit_experiments.framework.analysis_result_data import AnalysisResultData
|
23 | 22 | from qiskit_experiments.framework.base_analysis import _requires_copy
|
24 |
| -from qiskit_experiments.exceptions import AnalysisError |
25 | 23 |
|
26 | 24 |
|
27 | 25 | class CompositeAnalysis(BaseAnalysis):
|
@@ -112,280 +110,145 @@ def run(
|
112 | 110 | if not replace_results and _requires_copy(experiment_data):
|
113 | 111 | experiment_data = experiment_data.copy()
|
114 | 112 |
|
115 |
| - if not self._flatten_results: |
116 |
| - # Initialize child components if they are not initalized |
117 |
| - # This only needs to be done if results are not being flattened |
118 |
| - self._add_child_data(experiment_data) |
119 |
| - |
120 | 113 | # Run analysis with replace_results = True since we have already
|
121 | 114 | # created the copy if it was required
|
122 | 115 | return super().run(experiment_data, replace_results=True, **options)
|
123 | 116 |
|
124 | 117 | def _run_analysis(self, experiment_data: ExperimentData):
|
125 |
| - # Return list of experiment data containers for each component experiment |
126 |
| - # containing the marginalized data from the composite experiment |
127 |
| - component_expdata = self._component_experiment_data(experiment_data) |
128 | 118 |
|
129 |
| - # Run the component analysis on each component data |
130 |
| - for i, sub_expdata in enumerate(component_expdata): |
| 119 | + component_exp_data = [] |
| 120 | + iter_components = self._initialize_component_experiment_data(experiment_data) |
| 121 | + for i, sub_exp_data in enumerate(iter_components): |
131 | 122 | # Since copy for replace result is handled at the parent level
|
132 | 123 | # we always run with replace result on component analysis
|
133 |
| - self._analyses[i].run(sub_expdata, replace_results=True) |
| 124 | + self._analyses[i].run(sub_exp_data, replace_results=True) |
| 125 | + component_exp_data.append(sub_exp_data) |
134 | 126 |
|
135 | 127 | # Analysis is running in parallel so we add loop to wait
|
136 | 128 | # for all component analysis to finish before returning
|
137 | 129 | # the parent experiment analysis results
|
138 |
| - for sub_expdata in component_expdata: |
139 |
| - sub_expdata.block_for_results() |
140 |
| - # Optionally flatten results from all component experiments |
141 |
| - # for adding to the main experiment data container |
142 |
| - if self._flatten_results: |
143 |
| - return self._combine_results(component_expdata) |
144 |
| - |
145 |
| - return [], [] |
| 130 | + analysis_results = [] |
| 131 | + figures = [] |
| 132 | + for i, sub_exp_data in enumerate(component_exp_data): |
| 133 | + sub_exp_data.block_for_results() |
| 134 | + |
| 135 | + if not self._flatten_results: |
| 136 | + experiment_data.add_child_data(sub_exp_data) |
| 137 | + continue |
| 138 | + |
| 139 | + # Convert table to AnalysisResultData lists for backward compatibility. |
| 140 | + # In principle this is not necessary because data can be directly concatenated to |
| 141 | + # the table of outer container, i.e. experiment_data._analysis_results, however |
| 142 | + # some custom composite analysis class, such as TphiAnalysis overrides |
| 143 | + # the _run_analysis method to perform further analysis on |
| 144 | + # sub-analysis outcomes. This is indeed an overhead, |
| 145 | + # and at some point we should restrict such subclass implementation. |
| 146 | + analysis_table = sub_exp_data.analysis_results(columns="all", dataframe=True) |
| 147 | + for _, series in analysis_table.iterrows(): |
| 148 | + data = AnalysisResultData.from_table_element(**series.to_dict()) |
| 149 | + data.experiment_id = experiment_data.experiment_id |
| 150 | + analysis_results.append(data) |
146 | 151 |
|
147 |
| - def _component_experiment_data(self, experiment_data: ExperimentData) -> List[ExperimentData]: |
148 |
| - """Return a list of marginalized experiment data for component experiments. |
| 152 | + for fig_key in sub_exp_data.figure_names: |
| 153 | + figures.append(sub_exp_data.figure(figure_key=fig_key)) |
149 | 154 |
|
150 |
| - Args: |
151 |
| - experiment_data: a composite experiment data container. |
| 155 | + del sub_exp_data |
152 | 156 |
|
153 |
| - Returns: |
154 |
| - The list of analysis-ready marginalized experiment data for each |
155 |
| - component experiment. |
| 157 | + return analysis_results, figures |
156 | 158 |
|
157 |
| - Raises: |
158 |
| - AnalysisError: If the component experiment data cannot be extracted. |
159 |
| - """ |
160 |
| - if not self._flatten_results: |
161 |
| - # Retrieve child data for component experiments for updating |
162 |
| - component_index = experiment_data.metadata.get("component_child_index", []) |
163 |
| - if not component_index: |
164 |
| - raise AnalysisError("Unable to extract component child experiment data") |
165 |
| - component_expdata = [experiment_data.child_data(i) for i in component_index] |
166 |
| - else: |
167 |
| - # Initialize temporary ExperimentData containers for |
168 |
| - # each component experiment to analysis on. These will |
169 |
| - # not be saved but results and figures will be collected |
170 |
| - # from them |
171 |
| - component_expdata = self._initialize_component_experiment_data(experiment_data) |
172 |
| - |
173 |
| - # Compute marginalize data for each component experiment |
174 |
| - marginalized_data = self._marginalized_component_data(experiment_data.data()) |
175 |
| - |
176 |
| - # Add the marginalized component data and component job metadata |
177 |
| - # to each component child experiment. Note that this will clear |
178 |
| - # any currently stored data in the experiment. Since copying of |
179 |
| - # child data is handled by the `replace_results` kwarg of the |
180 |
| - # parent container it is safe to always clear and replace the |
181 |
| - # results of child containers in this step |
182 |
| - for sub_expdata, sub_data in zip(component_expdata, marginalized_data): |
183 |
| - # Clear any previously stored data and add marginalized data |
184 |
| - sub_expdata._result_data.clear() |
185 |
| - sub_expdata.add_data(sub_data) |
186 |
| - |
187 |
| - return component_expdata |
188 |
| - |
189 |
| - def _marginalized_component_data(self, composite_data: List[Dict]) -> List[List[Dict]]: |
190 |
| - """Return marginalized data for component experiments. |
| 159 | + def _marginalize_data( |
| 160 | + self, |
| 161 | + composite_data: List[Dict], |
| 162 | + component_index: int, |
| 163 | + ) -> List[Dict]: |
| 164 | + """Return marginalized data for component with particular index. |
191 | 165 |
|
192 | 166 | Args:
|
193 | 167 | composite_data: a list of composite experiment circuit data.
|
| 168 | + component_index: an index of component to return. |
194 | 169 |
|
195 | 170 | Returns:
|
196 |
| - A List of lists of marginalized circuit data for each component |
| 171 | + A lists of marginalized circuit data for each component |
197 | 172 | experiment in the composite experiment.
|
198 | 173 | """
|
199 |
| - # Marginalize data |
200 |
| - marginalized_data = {} |
| 174 | + out = [] |
201 | 175 | for datum in composite_data:
|
202 | 176 | metadata = datum.get("metadata", {})
|
203 | 177 |
|
204 |
| - # Add marginalized data to sub experiments |
| 178 | + if component_index not in metadata["composite_index"]: |
| 179 | + # This circuit is not tied to the component experiment at "component_index". |
| 180 | + continue |
| 181 | + index = metadata["composite_index"].index(component_index) |
| 182 | + |
205 | 183 | if "composite_clbits" in metadata:
|
206 | 184 | composite_clbits = metadata["composite_clbits"]
|
207 | 185 | else:
|
208 | 186 | composite_clbits = None
|
209 | 187 |
|
210 |
| - # Pre-process the memory if any to avoid redundant calls to format_counts_memory |
211 |
| - f_memory = self._format_memory(datum, composite_clbits) |
212 |
| - |
213 |
| - for i, index in enumerate(metadata["composite_index"]): |
214 |
| - if index not in marginalized_data: |
215 |
| - # Initialize data list for marginalized |
216 |
| - marginalized_data[index] = [] |
217 |
| - sub_data = {"metadata": metadata["composite_metadata"][i]} |
218 |
| - if "counts" in datum: |
219 |
| - if composite_clbits is not None: |
220 |
| - sub_data["counts"] = marginal_distribution( |
221 |
| - counts=datum["counts"], |
222 |
| - indices=composite_clbits[i], |
223 |
| - ) |
224 |
| - else: |
225 |
| - sub_data["counts"] = datum["counts"] |
226 |
| - if "memory" in datum: |
227 |
| - if composite_clbits is not None: |
228 |
| - # level 2 |
229 |
| - if f_memory is not None: |
230 |
| - idx = slice( |
231 |
| - -1 - composite_clbits[i][-1], -composite_clbits[i][0] or None |
232 |
| - ) |
233 |
| - sub_data["memory"] = [shot[idx] for shot in f_memory] |
234 |
| - # level 1 |
235 |
| - else: |
236 |
| - mem = np.array(datum["memory"]) |
237 |
| - |
238 |
| - # Averaged level 1 data |
239 |
| - if len(mem.shape) == 2: |
240 |
| - sub_data["memory"] = mem[composite_clbits[i]].tolist() |
241 |
| - # Single-shot level 1 data |
242 |
| - if len(mem.shape) == 3: |
243 |
| - sub_data["memory"] = mem[:, composite_clbits[i]].tolist() |
244 |
| - else: |
245 |
| - sub_data["memory"] = datum["memory"] |
246 |
| - marginalized_data[index].append(sub_data) |
247 |
| - |
248 |
| - # Sort by index |
249 |
| - return [marginalized_data[i] for i in sorted(marginalized_data.keys())] |
250 |
| - |
251 |
| - @staticmethod |
252 |
| - def _format_memory(datum: Dict, composite_clbits: List): |
253 |
| - """A helper method to convert level 2 memory (if it exists) to bit-string format.""" |
254 |
| - f_memory = None |
255 |
| - if ( |
256 |
| - "memory" in datum |
257 |
| - and composite_clbits is not None |
258 |
| - and isinstance(datum["memory"][0], str) |
259 |
| - ): |
260 |
| - num_cbits = 1 + max(cbit for cbit_list in composite_clbits for cbit in cbit_list) |
261 |
| - header = {"memory_slots": num_cbits} |
262 |
| - f_memory = list(format_counts_memory(shot, header) for shot in datum["memory"]) |
263 |
| - |
264 |
| - return f_memory |
265 |
| - |
266 |
| - def _add_child_data(self, experiment_data: ExperimentData): |
267 |
| - """Save empty component experiment data as child data. |
268 |
| -
|
269 |
| - This will initialize empty ExperimentData objects for each component |
270 |
| - experiment and add them as child data to the main composite experiment |
271 |
| - ExperimentData container container for saving. |
272 |
| -
|
273 |
| - Args: |
274 |
| - experiment_data: a composite experiment experiment data container. |
275 |
| - """ |
276 |
| - component_index = experiment_data.metadata.get("component_child_index", []) |
277 |
| - if component_index: |
278 |
| - # Child components are already initialized |
279 |
| - return |
280 |
| - |
281 |
| - # Initialize the component experiment data containers and add them |
282 |
| - # as child data to the current experiment data |
283 |
| - child_components = self._initialize_component_experiment_data(experiment_data) |
284 |
| - start_index = len(experiment_data.child_data()) |
285 |
| - for i, subdata in enumerate(child_components): |
286 |
| - experiment_data.add_child_data(subdata) |
287 |
| - component_index.append(start_index + i) |
288 |
| - |
289 |
| - # Store the indices of the added child data in metadata |
290 |
| - experiment_data.metadata["component_child_index"] = component_index |
| 188 | + component_data = {"metadata": metadata["composite_metadata"][index]} |
| 189 | + |
| 190 | + # Use terra result marginalization utils. |
| 191 | + # These functions support parallel execution and are implemented in Rust. |
| 192 | + if "counts" in datum: |
| 193 | + if composite_clbits is not None: |
| 194 | + component_data["counts"] = marginal_distribution( |
| 195 | + counts=datum["counts"], |
| 196 | + indices=composite_clbits[index], |
| 197 | + ) |
| 198 | + else: |
| 199 | + component_data["counts"] = datum["counts"] |
| 200 | + if "memory" in datum: |
| 201 | + if composite_clbits is not None: |
| 202 | + component_data["memory"] = marginal_memory( |
| 203 | + memory=datum["memory"], |
| 204 | + indices=composite_clbits[index], |
| 205 | + ) |
| 206 | + else: |
| 207 | + component_data["memory"] = datum["memory"] |
| 208 | + out.append(component_data) |
| 209 | + return out |
291 | 210 |
|
292 | 211 | def _initialize_component_experiment_data(
|
293 |
| - self, experiment_data: ExperimentData |
294 |
| - ) -> List[ExperimentData]: |
| 212 | + self, |
| 213 | + experiment_data: ExperimentData, |
| 214 | + ) -> Iterator[ExperimentData]: |
295 | 215 | """Initialize empty experiment data containers for component experiments.
|
296 | 216 |
|
297 | 217 | Args:
|
298 |
| - experiment_data: a composite experiment experiment data container. |
| 218 | + experiment_data: a composite experiment data container. |
299 | 219 |
|
300 |
| - Returns: |
301 |
| - The list of experiment data containers for each component experiment |
302 |
| - containing the component metadata, and tags, share level, and |
303 |
| - auto save settings of the composite experiment. |
| 220 | + Yields: |
| 221 | + Experiment data containers for each component experiment |
| 222 | + containing the component metadata, and tags, share level. |
304 | 223 | """
|
| 224 | + metadata = experiment_data.metadata |
| 225 | + |
305 | 226 | # Extract component experiment types and metadata so they can be
|
306 | 227 | # added to the component experiment data containers
|
307 |
| - metadata = experiment_data.metadata |
308 | 228 | num_components = len(self._analyses)
|
309 | 229 | experiment_types = metadata.get("component_types", [None] * num_components)
|
310 | 230 | component_metadata = metadata.get("component_metadata", [{}] * num_components)
|
311 | 231 |
|
312 | 232 | # Create component experiments and set the backend and
|
313 | 233 | # metadata for the components
|
314 |
| - component_expdata = [] |
315 |
| - for i, _ in enumerate(self._analyses): |
316 |
| - subdata = ExperimentData(backend=experiment_data.backend) |
317 |
| - subdata.experiment_type = experiment_types[i] |
318 |
| - subdata.metadata.update(component_metadata[i]) |
319 |
| - |
320 |
| - if self._flatten_results: |
321 |
| - # Explicitly set auto_save to false so the temporary |
322 |
| - # data can't accidentally be saved |
323 |
| - subdata.auto_save = False |
324 |
| - else: |
325 |
| - # Copy tags, share_level and auto_save from the parent |
326 |
| - # experiment data if results are not being flattened. |
327 |
| - subdata.tags = experiment_data.tags |
328 |
| - subdata.share_level = experiment_data.share_level |
329 |
| - subdata.auto_save = experiment_data.auto_save |
| 234 | + composite_data = experiment_data.data() |
| 235 | + child_data_ids = [] |
| 236 | + for i in range(num_components): |
| 237 | + # Create empty container with metadata |
| 238 | + sub_exp_data = ExperimentData(backend=experiment_data.backend) |
| 239 | + sub_exp_data.experiment_type = experiment_types[i] |
| 240 | + sub_exp_data.metadata.update(component_metadata[i]) |
| 241 | + sub_exp_data.auto_save = False |
330 | 242 |
|
331 |
| - component_expdata.append(subdata) |
| 243 | + # Add marginalized experiment data |
| 244 | + sub_exp_data.add_data(self._marginalize_data(composite_data, i)) |
| 245 | + child_data_ids.append(sub_exp_data.experiment_id) |
332 | 246 |
|
333 |
| - return component_expdata |
| 247 | + yield sub_exp_data |
334 | 248 |
|
335 | 249 | def _set_flatten_results(self):
|
336 | 250 | """Recursively set flatten_results to True for all composite components."""
|
337 | 251 | self._flatten_results = True
|
338 | 252 | for analysis in self._analyses:
|
339 | 253 | if isinstance(analysis, CompositeAnalysis):
|
340 | 254 | analysis._set_flatten_results()
|
341 |
| - |
342 |
| - def _combine_results( |
343 |
| - self, component_experiment_data: List[ExperimentData] |
344 |
| - ) -> Tuple[List[AnalysisResultData], List["matplotlib.figure.Figure"]]: |
345 |
| - """Combine analysis results from component experiment data. |
346 |
| -
|
347 |
| - Args: |
348 |
| - component_experiment_data: list of experiment data containers containing the |
349 |
| - analysis results for each component experiment. |
350 |
| -
|
351 |
| - Returns: |
352 |
| - A pair of the combined list of all analysis results from each of the |
353 |
| - component experiments, and a list of all figures from each component |
354 |
| - experiment. |
355 |
| - """ |
356 |
| - analysis_results = [] |
357 |
| - figures = [] |
358 |
| - for sub_expdata in component_experiment_data: |
359 |
| - figures += sub_expdata._figures.values() |
360 |
| - |
361 |
| - # Convert Dataframe Series back into AnalysisResultData |
362 |
| - # This is due to limitation that _run_analysis must return List[AnalysisResultData], |
363 |
| - # and some composite analysis such as TphiAnalysis overrides this method to |
364 |
| - # return extra quantity computed from sub analysis results. |
365 |
| - # This produces unnecessary data conversion. |
366 |
| - # The _run_analysis mechanism seems just complicating the entire logic. |
367 |
| - # Since it's impossible to deprecate the usage of this protected method, |
368 |
| - # we should implement new CompositeAnalysis class with much more efficient |
369 |
| - # internal logic. Note that the child data structure is no longer necessary |
370 |
| - # because dataframe offers more efficient data filtering mechanisms. |
371 |
| - analysis_table = sub_expdata.analysis_results(verbosity=3, dataframe=True) |
372 |
| - for _, series in analysis_table.iterrows(): |
373 |
| - data_dict = series.to_dict() |
374 |
| - primary_info = { |
375 |
| - "name": data_dict.pop("name"), |
376 |
| - "value": data_dict.pop("value"), |
377 |
| - "quality": data_dict.pop("quality"), |
378 |
| - "device_components": data_dict.pop("components"), |
379 |
| - } |
380 |
| - chisq = data_dict.pop("chisq", np.nan) |
381 |
| - if chisq: |
382 |
| - primary_info["chisq"] = chisq |
383 |
| - data_dict["experiment"] = sub_expdata.experiment_type |
384 |
| - if "experiment_id" in data_dict: |
385 |
| - # Use experiment ID of parent experiment data. |
386 |
| - # Sub experiment data is merged and discarded. |
387 |
| - del data_dict["experiment_id"] |
388 |
| - analysis_result = AnalysisResultData(**primary_info, extra=data_dict) |
389 |
| - analysis_results.append(analysis_result) |
390 |
| - |
391 |
| - return analysis_results, figures |
0 commit comments