diff --git a/docs/api/dataset/data_set.rst b/docs/api/dataset/data_set.rst deleted file mode 100644 index abba9f2cdb9..00000000000 --- a/docs/api/dataset/data_set.rst +++ /dev/null @@ -1,8 +0,0 @@ -qcodes.dataset.data_set ------------------------ - -.. automodule:: qcodes.dataset.data_set - :members: - -.. automodule:: qcodes.dataset.data_set_cache - :members: diff --git a/docs/api/dataset/database_extract_runs.rst b/docs/api/dataset/database_extract_runs.rst deleted file mode 100644 index bd52199dbd6..00000000000 --- a/docs/api/dataset/database_extract_runs.rst +++ /dev/null @@ -1,5 +0,0 @@ -qcodes.dataset.database_extract_runs ------------------------------------- - -.. automodule:: qcodes.dataset.database_extract_runs - :members: diff --git a/docs/api/dataset/index.rst b/docs/api/dataset/index.rst index 439e72dd2d6..7081de14b06 100644 --- a/docs/api/dataset/index.rst +++ b/docs/api/dataset/index.rst @@ -4,25 +4,5 @@ qcodes.dataset ============== -.. autosummary:: - - qcodes.dataset - qcodes.dataset.measurements - qcodes.dataset.plotting - qcodes.dataset.data_set - qcodes.dataset.database_extract_runs - qcodes.dataset.legacy_import - - .. automodule:: qcodes.dataset - - -.. toctree:: - :maxdepth: 4 - :hidden: - - measurements - plotting - data_set - database_extract_runs - legacy_import + :autosummary: diff --git a/docs/api/dataset/legacy_import.rst b/docs/api/dataset/legacy_import.rst deleted file mode 100644 index e0301d93c86..00000000000 --- a/docs/api/dataset/legacy_import.rst +++ /dev/null @@ -1,5 +0,0 @@ -qcodes.dataset.legacy_import ----------------------------- - -.. automodule:: qcodes.dataset.legacy_import - :members: diff --git a/docs/api/dataset/measurements.rst b/docs/api/dataset/measurements.rst deleted file mode 100644 index 5d2db602e32..00000000000 --- a/docs/api/dataset/measurements.rst +++ /dev/null @@ -1,5 +0,0 @@ -qcodes.dataset.measurements ---------------------------- - -.. automodule:: qcodes.dataset.measurements - :members: diff --git a/docs/api/dataset/plotting.rst b/docs/api/dataset/plotting.rst deleted file mode 100644 index f19d8e60aed..00000000000 --- a/docs/api/dataset/plotting.rst +++ /dev/null @@ -1,5 +0,0 @@ -qcodes.dataset.plotting ------------------------ - -.. automodule:: qcodes.dataset.plotting - :members: diff --git a/docs/api/extensions/index.rst b/docs/api/extensions/index.rst new file mode 100644 index 00000000000..6e4d6714fac --- /dev/null +++ b/docs/api/extensions/index.rst @@ -0,0 +1,7 @@ +.. _extensions_api : + +qcodes.extensions +================= + +.. automodule:: qcodes.extensions + :autosummary: diff --git a/docs/api/index.rst b/docs/api/index.rst index 54ba9c3706b..dd2fb784a6c 100644 --- a/docs/api/index.rst +++ b/docs/api/index.rst @@ -18,6 +18,7 @@ QCoDeS API configuration/index dataset/index + extensions/index instrument/index parameters/index logger/index diff --git a/docs/api/utils/index.rst b/docs/api/utils/index.rst index e16246c5908..1b3d48ff8df 100644 --- a/docs/api/utils/index.rst +++ b/docs/api/utils/index.rst @@ -29,6 +29,5 @@ qcodes.utils magic metadata plotting - slack threading validators diff --git a/docs/api/utils/slack.rst b/docs/api/utils/slack.rst deleted file mode 100644 index f16caf967c7..00000000000 --- a/docs/api/utils/slack.rst +++ /dev/null @@ -1,5 +0,0 @@ -qcodes.utils.slack ------------------- - -.. automodule:: qcodes.utils.slack - :members: diff --git a/pyproject.toml b/pyproject.toml index c704cb37677..3bb91c63357 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,7 +55,7 @@ module = [ "qcodes.plots.*", "qcodes.tests.*", "qcodes.utils.magic", - "qcodes.utils.slack", + "qcodes.extensions.slack", ] disallow_untyped_defs = false diff --git a/qcodes/dataset/__init__.py b/qcodes/dataset/__init__.py index 81978223550..97289cde5ac 100644 --- a/qcodes/dataset/__init__.py +++ b/qcodes/dataset/__init__.py @@ -11,7 +11,9 @@ ) from .data_set_in_memory import load_from_netcdf from .data_set_protocol import DataSetProtocol, DataSetType +from .database_extract_runs import extract_runs_into_db from .descriptions.param_spec import ParamSpec +from .do_nd import AbstractSweep, ArraySweep, LinSweep, LogSweep, do0d, do1d, do2d, dond from .experiment_container import ( experiments, load_experiment, @@ -20,7 +22,10 @@ load_or_create_experiment, new_experiment, ) +from .experiment_settings import get_default_experiment_id, reset_default_experiment_id +from .legacy_import import import_dat_file from .measurements import Measurement +from .plotting import plot_by_id, plot_dataset from .sqlite.database import ( initialise_database, initialise_or_create_database_at, @@ -29,24 +34,38 @@ from .sqlite.settings import SQLiteSettings __all__ = [ - "load_by_counter", - "load_by_guid", - "load_by_id", - "load_by_run_spec", - "new_data_set", - "load_from_netcdf", + "AbstractSweep", + "ArraySweep", "DataSetProtocol", "DataSetType", + "LinSweep", + "LogSweep", + "Measurement", "ParamSpec", + "SQLiteSettings", + "do0d", + "do1d", + "do2d", + "dond", "experiments", + "extract_runs_into_db", + "get_default_experiment_id", + "import_dat_file", + "initialise_database", + "initialise_or_create_database_at", + "initialised_database_at", + "load_by_counter", + "load_by_guid", + "load_by_id", + "load_by_run_spec", "load_experiment", "load_experiment_by_name", + "load_from_netcdf", "load_last_experiment", "load_or_create_experiment", + "new_data_set", "new_experiment", - "Measurement", - "initialise_database", - "initialise_or_create_database_at", - "initialised_database_at", - "SQLiteSettings", + "plot_by_id", + "plot_dataset", + "reset_default_experiment_id", ] diff --git a/qcodes/dataset/do_nd.py b/qcodes/dataset/do_nd.py new file mode 100644 index 00000000000..abceabe2d64 --- /dev/null +++ b/qcodes/dataset/do_nd.py @@ -0,0 +1,1019 @@ +import logging +import sys +import time +from abc import ABC, abstractmethod +from contextlib import ExitStack, contextmanager +from typing import Callable, Dict, Iterator, List, Optional, Sequence, Tuple, Union + +import matplotlib.axes +import matplotlib.colorbar +import numpy as np +from tqdm.auto import tqdm +from typing_extensions import TypedDict + +from qcodes import config +from qcodes.dataset.data_set_protocol import DataSetProtocol, res_type +from qcodes.dataset.descriptions.detect_shapes import detect_shape_of_measurement +from qcodes.dataset.descriptions.versioning.rundescribertypes import Shapes +from qcodes.dataset.experiment_container import Experiment +from qcodes.dataset.measurements import Measurement +from qcodes.dataset.plotting import plot_and_save_image +from qcodes.instrument.parameter import _BaseParameter +from qcodes.utils.threading import ( + SequentialParamsCaller, + ThreadPoolParamsCaller, + process_params_meas, +) + +ActionsT = Sequence[Callable[[], None]] +BreakConditionT = Callable[[], bool] + +ParamMeasT = Union[_BaseParameter, Callable[[], None]] + +AxesTuple = Tuple[matplotlib.axes.Axes, matplotlib.colorbar.Colorbar] +AxesTupleList = Tuple[ + List[matplotlib.axes.Axes], List[Optional[matplotlib.colorbar.Colorbar]] +] +AxesTupleListWithDataSet = Tuple[ + DataSetProtocol, + List[matplotlib.axes.Axes], + List[Optional[matplotlib.colorbar.Colorbar]], +] +MultiAxesTupleListWithDataSet = Tuple[ + Tuple[DataSetProtocol, ...], + Tuple[List[matplotlib.axes.Axes], ...], + Tuple[List[Optional[matplotlib.colorbar.Colorbar]], ...], +] + +LOG = logging.getLogger(__name__) + + +class ParameterGroup(TypedDict): + params: Tuple[ParamMeasT, ...] + meas_name: str + measured_params: List[res_type] + + +class UnsafeThreadingException(Exception): + pass + + +class BreakConditionInterrupt(Exception): + pass + + +MeasInterruptT = Union[KeyboardInterrupt, BreakConditionInterrupt, None] + + +def _register_parameters( + meas: Measurement, + param_meas: Sequence[ParamMeasT], + setpoints: Optional[Sequence[_BaseParameter]] = None, + shapes: Shapes = None, +) -> None: + for parameter in param_meas: + if isinstance(parameter, _BaseParameter): + meas.register_parameter(parameter, setpoints=setpoints) + meas.set_shapes(shapes=shapes) + + +def _register_actions( + meas: Measurement, enter_actions: ActionsT, exit_actions: ActionsT +) -> None: + for action in enter_actions: + # this omits the possibility of passing + # argument to enter and exit actions. + # Do we want that? + meas.add_before_run(action, ()) + for action in exit_actions: + meas.add_after_run(action, ()) + + +def _set_write_period(meas: Measurement, write_period: Optional[float] = None) -> None: + if write_period is not None: + meas.write_period = write_period + + +@contextmanager +def _catch_interrupts() -> Iterator[Callable[[], MeasInterruptT]]: + interrupt_exception = None + + def get_interrupt_exception() -> MeasInterruptT: + nonlocal interrupt_exception + return interrupt_exception + + try: + yield get_interrupt_exception + except (KeyboardInterrupt, BreakConditionInterrupt) as e: + interrupt_exception = e + + +def do0d( + *param_meas: ParamMeasT, + write_period: Optional[float] = None, + measurement_name: str = "", + exp: Optional[Experiment] = None, + do_plot: Optional[bool] = None, + use_threads: Optional[bool] = None, + log_info: Optional[str] = None, +) -> AxesTupleListWithDataSet: + """ + Perform a measurement of a single parameter. This is probably most + useful for an ArrayParameter that already returns an array of data points + + Args: + *param_meas: Parameter(s) to measure at each step or functions that + will be called at each step. The function should take no arguments. + The parameters and functions are called in the order they are + supplied. + write_period: The time after which the data is actually written to the + database. + measurement_name: Name of the measurement. This will be passed down to + the dataset produced by the measurement. If not given, a default + value of 'results' is used for the dataset. + exp: The experiment to use for this measurement. + do_plot: should png and pdf versions of the images be saved after the + run. If None the setting will be read from ``qcodesrc.json`` + use_threads: If True measurements from each instrument will be done on + separate threads. If you are measuring from several instruments + this may give a significant speedup. + log_info: Message that is logged during the measurement. If None a default + message is used. + + Returns: + The QCoDeS dataset. + """ + if do_plot is None: + do_plot = config.dataset.dond_plot + meas = Measurement(name=measurement_name, exp=exp) + if log_info is not None: + meas._extra_log_info = log_info + else: + meas._extra_log_info = "Using 'qcodes.dataset.do0d'" + + measured_parameters = tuple( + param for param in param_meas if isinstance(param, _BaseParameter) + ) + + try: + shapes: Shapes = detect_shape_of_measurement( + measured_parameters, + ) + except TypeError: + LOG.exception( + f"Could not detect shape of {measured_parameters} " + f"falling back to unknown shape." + ) + shapes = None + + _register_parameters(meas, param_meas, shapes=shapes) + _set_write_period(meas, write_period) + + with meas.run() as datasaver: + datasaver.add_result(*process_params_meas(param_meas, use_threads=use_threads)) + dataset = datasaver.dataset + + return _handle_plotting(dataset, do_plot) + + +def do1d( + param_set: _BaseParameter, + start: float, + stop: float, + num_points: int, + delay: float, + *param_meas: ParamMeasT, + enter_actions: ActionsT = (), + exit_actions: ActionsT = (), + write_period: Optional[float] = None, + measurement_name: str = "", + exp: Optional[Experiment] = None, + do_plot: Optional[bool] = None, + use_threads: Optional[bool] = None, + additional_setpoints: Sequence[_BaseParameter] = tuple(), + show_progress: Optional[None] = None, + log_info: Optional[str] = None, + break_condition: Optional[BreakConditionT] = None, +) -> AxesTupleListWithDataSet: + """ + Perform a 1D scan of ``param_set`` from ``start`` to ``stop`` in + ``num_points`` measuring param_meas at each step. In case param_meas is + an ArrayParameter this is effectively a 2d scan. + + Args: + param_set: The QCoDeS parameter to sweep over + start: Starting point of sweep + stop: End point of sweep + num_points: Number of points in sweep + delay: Delay after setting parameter before measurement is performed + param_meas: Parameter(s) to measure at each step or functions that + will be called at each step. The function should take no arguments. + The parameters and functions are called in the order they are + supplied. + enter_actions: A list of functions taking no arguments that will be + called before the measurements start + exit_actions: A list of functions taking no arguments that will be + called after the measurements ends + write_period: The time after which the data is actually written to the + database. + additional_setpoints: A list of setpoint parameters to be registered in + the measurement but not scanned. + measurement_name: Name of the measurement. This will be passed down to + the dataset produced by the measurement. If not given, a default + value of 'results' is used for the dataset. + exp: The experiment to use for this measurement. + do_plot: should png and pdf versions of the images be saved after the + run. If None the setting will be read from ``qcodesrc.json`` + use_threads: If True measurements from each instrument will be done on + separate threads. If you are measuring from several instruments + this may give a significant speedup. + show_progress: should a progress bar be displayed during the + measurement. If None the setting will be read from ``qcodesrc.json`` + log_info: Message that is logged during the measurement. If None a default + message is used. + break_condition: Callable that takes no arguments. If returned True, + measurement is interrupted. + + Returns: + The QCoDeS dataset. + """ + if do_plot is None: + do_plot = config.dataset.dond_plot + if show_progress is None: + show_progress = config.dataset.dond_show_progress + + meas = Measurement(name=measurement_name, exp=exp) + if log_info is not None: + meas._extra_log_info = log_info + else: + meas._extra_log_info = "Using 'qcodes.dataset.do1d'" + + all_setpoint_params = (param_set,) + tuple(s for s in additional_setpoints) + + measured_parameters = tuple( + param for param in param_meas if isinstance(param, _BaseParameter) + ) + try: + loop_shape = (num_points,) + tuple(1 for _ in additional_setpoints) + shapes: Shapes = detect_shape_of_measurement(measured_parameters, loop_shape) + except TypeError: + LOG.exception( + f"Could not detect shape of {measured_parameters} " + f"falling back to unknown shape." + ) + shapes = None + + _register_parameters(meas, all_setpoint_params) + _register_parameters(meas, param_meas, setpoints=all_setpoint_params, shapes=shapes) + _set_write_period(meas, write_period) + _register_actions(meas, enter_actions, exit_actions) + + if use_threads is None: + use_threads = config.dataset.use_threads + + param_meas_caller = ( + ThreadPoolParamsCaller(*param_meas) + if use_threads + else SequentialParamsCaller(*param_meas) + ) + + # do1D enforces a simple relationship between measured parameters + # and set parameters. For anything more complicated this should be + # reimplemented from scratch + with _catch_interrupts() as interrupted, meas.run() as datasaver, param_meas_caller as call_param_meas: + dataset = datasaver.dataset + additional_setpoints_data = process_params_meas(additional_setpoints) + setpoints = np.linspace(start, stop, num_points) + + # flush to prevent unflushed print's to visually interrupt tqdm bar + # updates + sys.stdout.flush() + sys.stderr.flush() + + for set_point in tqdm(setpoints, disable=not show_progress): + param_set.set(set_point) + time.sleep(delay) + datasaver.add_result( + (param_set, set_point), *call_param_meas(), *additional_setpoints_data + ) + + if callable(break_condition): + if break_condition(): + raise BreakConditionInterrupt("Break condition was met.") + + return _handle_plotting(dataset, do_plot, interrupted()) + + +def do2d( + param_set1: _BaseParameter, + start1: float, + stop1: float, + num_points1: int, + delay1: float, + param_set2: _BaseParameter, + start2: float, + stop2: float, + num_points2: int, + delay2: float, + *param_meas: ParamMeasT, + set_before_sweep: Optional[bool] = True, + enter_actions: ActionsT = (), + exit_actions: ActionsT = (), + before_inner_actions: ActionsT = (), + after_inner_actions: ActionsT = (), + write_period: Optional[float] = None, + measurement_name: str = "", + exp: Optional[Experiment] = None, + flush_columns: bool = False, + do_plot: Optional[bool] = None, + use_threads: Optional[bool] = None, + additional_setpoints: Sequence[_BaseParameter] = tuple(), + show_progress: Optional[None] = None, + log_info: Optional[str] = None, + break_condition: Optional[BreakConditionT] = None, +) -> AxesTupleListWithDataSet: + """ + Perform a 1D scan of ``param_set1`` from ``start1`` to ``stop1`` in + ``num_points1`` and ``param_set2`` from ``start2`` to ``stop2`` in + ``num_points2`` measuring param_meas at each step. + + Args: + param_set1: The QCoDeS parameter to sweep over in the outer loop + start1: Starting point of sweep in outer loop + stop1: End point of sweep in the outer loop + num_points1: Number of points to measure in the outer loop + delay1: Delay after setting parameter in the outer loop + param_set2: The QCoDeS parameter to sweep over in the inner loop + start2: Starting point of sweep in inner loop + stop2: End point of sweep in the inner loop + num_points2: Number of points to measure in the inner loop + delay2: Delay after setting parameter before measurement is performed + param_meas: Parameter(s) to measure at each step or functions that + will be called at each step. The function should take no arguments. + The parameters and functions are called in the order they are + supplied. + set_before_sweep: if True the outer parameter is set to its first value + before the inner parameter is swept to its next value. + enter_actions: A list of functions taking no arguments that will be + called before the measurements start + exit_actions: A list of functions taking no arguments that will be + called after the measurements ends + before_inner_actions: Actions executed before each run of the inner loop + after_inner_actions: Actions executed after each run of the inner loop + write_period: The time after which the data is actually written to the + database. + measurement_name: Name of the measurement. This will be passed down to + the dataset produced by the measurement. If not given, a default + value of 'results' is used for the dataset. + exp: The experiment to use for this measurement. + flush_columns: The data is written after a column is finished + independent of the passed time and write period. + additional_setpoints: A list of setpoint parameters to be registered in + the measurement but not scanned. + do_plot: should png and pdf versions of the images be saved after the + run. If None the setting will be read from ``qcodesrc.json`` + use_threads: If True measurements from each instrument will be done on + separate threads. If you are measuring from several instruments + this may give a significant speedup. + show_progress: should a progress bar be displayed during the + measurement. If None the setting will be read from ``qcodesrc.json`` + log_info: Message that is logged during the measurement. If None a default + message is used. + break_condition: Callable that takes no arguments. If returned True, + measurement is interrupted. + + Returns: + The QCoDeS dataset. + """ + + if do_plot is None: + do_plot = config.dataset.dond_plot + if show_progress is None: + show_progress = config.dataset.dond_show_progress + + meas = Measurement(name=measurement_name, exp=exp) + if log_info is not None: + meas._extra_log_info = log_info + else: + meas._extra_log_info = "Using 'qcodes.dataset.do2d'" + all_setpoint_params = ( + param_set1, + param_set2, + ) + tuple(s for s in additional_setpoints) + + measured_parameters = tuple( + param for param in param_meas if isinstance(param, _BaseParameter) + ) + + try: + loop_shape = (num_points1, num_points2) + tuple(1 for _ in additional_setpoints) + shapes: Shapes = detect_shape_of_measurement(measured_parameters, loop_shape) + except TypeError: + LOG.exception( + f"Could not detect shape of {measured_parameters} " + f"falling back to unknown shape." + ) + shapes = None + + _register_parameters(meas, all_setpoint_params) + _register_parameters(meas, param_meas, setpoints=all_setpoint_params, shapes=shapes) + _set_write_period(meas, write_period) + _register_actions(meas, enter_actions, exit_actions) + + if use_threads is None: + use_threads = config.dataset.use_threads + + param_meas_caller = ( + ThreadPoolParamsCaller(*param_meas) + if use_threads + else SequentialParamsCaller(*param_meas) + ) + + with _catch_interrupts() as interrupted, meas.run() as datasaver, param_meas_caller as call_param_meas: + dataset = datasaver.dataset + additional_setpoints_data = process_params_meas(additional_setpoints) + setpoints1 = np.linspace(start1, stop1, num_points1) + for set_point1 in tqdm(setpoints1, disable=not show_progress): + if set_before_sweep: + param_set2.set(start2) + + param_set1.set(set_point1) + + for action in before_inner_actions: + action() + + time.sleep(delay1) + + setpoints2 = np.linspace(start2, stop2, num_points2) + + # flush to prevent unflushed print's to visually interrupt tqdm bar + # updates + sys.stdout.flush() + sys.stderr.flush() + for set_point2 in tqdm(setpoints2, disable=not show_progress, leave=False): + # skip first inner set point if `set_before_sweep` + if set_point2 == start2 and set_before_sweep: + pass + else: + param_set2.set(set_point2) + time.sleep(delay2) + + datasaver.add_result( + (param_set1, set_point1), + (param_set2, set_point2), + *call_param_meas(), + *additional_setpoints_data, + ) + + if callable(break_condition): + if break_condition(): + raise BreakConditionInterrupt("Break condition was met.") + + for action in after_inner_actions: + action() + if flush_columns: + datasaver.flush_data_to_database() + + return _handle_plotting(dataset, do_plot, interrupted()) + + +class AbstractSweep(ABC): + """ + Abstract sweep class that defines an interface for concrete sweep classes. + """ + + @abstractmethod + def get_setpoints(self) -> np.ndarray: + """ + Returns an array of setpoint values for this sweep. + """ + pass + + @property + @abstractmethod + def param(self) -> _BaseParameter: + """ + Returns the Qcodes sweep parameter. + """ + pass + + @property + @abstractmethod + def delay(self) -> float: + """ + Delay between two consecutive sweep points. + """ + pass + + @property + @abstractmethod + def num_points(self) -> int: + """ + Number of sweep points. + """ + pass + + @property + @abstractmethod + def post_actions(self) -> ActionsT: + """ + actions to be performed after setting param to its setpoint. + """ + pass + + +class LinSweep(AbstractSweep): + """ + Linear sweep. + + Args: + param: Qcodes parameter to sweep. + start: Sweep start value. + stop: Sweep end value. + num_points: Number of sweep points. + delay: Time in seconds between two consequtive sweep points + """ + + def __init__( + self, + param: _BaseParameter, + start: float, + stop: float, + num_points: int, + delay: float = 0, + post_actions: ActionsT = (), + ): + self._param = param + self._start = start + self._stop = stop + self._num_points = num_points + self._delay = delay + self._post_actions = post_actions + + def get_setpoints(self) -> np.ndarray: + """ + Linear (evenly spaced) numpy array for supplied start, stop and + num_points. + """ + return np.linspace(self._start, self._stop, self._num_points) + + @property + def param(self) -> _BaseParameter: + return self._param + + @property + def delay(self) -> float: + return self._delay + + @property + def num_points(self) -> int: + return self._num_points + + @property + def post_actions(self) -> ActionsT: + return self._post_actions + + +class LogSweep(AbstractSweep): + """ + Logarithmic sweep. + + Args: + param: Qcodes parameter for sweep. + start: Sweep start value. + stop: Sweep end value. + num_points: Number of sweep points. + delay: Time in seconds between two consequtive sweep points. + """ + + def __init__( + self, + param: _BaseParameter, + start: float, + stop: float, + num_points: int, + delay: float = 0, + post_actions: ActionsT = (), + ): + self._param = param + self._start = start + self._stop = stop + self._num_points = num_points + self._delay = delay + self._post_actions = post_actions + + def get_setpoints(self) -> np.ndarray: + """ + Logarithmically spaced numpy array for supplied start, stop and + num_points. + """ + return np.logspace(self._start, self._stop, self._num_points) + + @property + def param(self) -> _BaseParameter: + return self._param + + @property + def delay(self) -> float: + return self._delay + + @property + def num_points(self) -> int: + return self._num_points + + @property + def post_actions(self) -> ActionsT: + return self._post_actions + + +class ArraySweep(AbstractSweep): + """ + Sweep the values of a given array. + + Args: + param: Qcodes parameter for sweep. + array: array with values to sweep. + delay: Time in seconds between two consecutive sweep points. + post_actions: Actions to do after each sweep point. + """ + + def __init__( + self, + param: _BaseParameter, + array: Union[Sequence[float], np.ndarray], + delay: float = 0, + post_actions: ActionsT = (), + ): + self._param = param + self._array = np.array(array) + self._delay = delay + self._post_actions = post_actions + + def get_setpoints(self) -> np.ndarray: + return self._array + + @property + def param(self) -> _BaseParameter: + return self._param + + @property + def delay(self) -> float: + return self._delay + + @property + def num_points(self) -> int: + return len(self._array) + + @property + def post_actions(self) -> ActionsT: + return self._post_actions + + +def dond( + *params: Union[AbstractSweep, Union[ParamMeasT, Sequence[ParamMeasT]]], + write_period: Optional[float] = None, + measurement_name: str = "", + exp: Optional[Experiment] = None, + enter_actions: ActionsT = (), + exit_actions: ActionsT = (), + do_plot: Optional[bool] = None, + show_progress: Optional[bool] = None, + use_threads: Optional[bool] = None, + additional_setpoints: Sequence[_BaseParameter] = tuple(), + log_info: Optional[str] = None, + break_condition: Optional[BreakConditionT] = None, +) -> Union[AxesTupleListWithDataSet, MultiAxesTupleListWithDataSet]: + """ + Perform n-dimentional scan from slowest (first) to the fastest (last), to + measure m measurement parameters. The dimensions should be specified + as sweep objects, and after them the parameters to measure should be passed. + + Args: + params: Instances of n sweep classes and m measurement parameters, + e.g. if linear sweep is considered: + + .. code-block:: + + LinSweep(param_set_1, start_1, stop_1, num_points_1, delay_1), ..., + LinSweep(param_set_n, start_n, stop_n, num_points_n, delay_n), + param_meas_1, param_meas_2, ..., param_meas_m + + If multiple DataSets creation is needed, measurement parameters should + be grouped, so one dataset will be created for each group. e.g.: + + .. code-block:: + + LinSweep(param_set_1, start_1, stop_1, num_points_1, delay_1), ..., + LinSweep(param_set_n, start_n, stop_n, num_points_n, delay_n), + [param_meas_1, param_meas_2], ..., [param_meas_m] + + write_period: The time after which the data is actually written to the + database. + measurement_name: Name of the measurement. This will be passed down to + the dataset produced by the measurement. If not given, a default + value of 'results' is used for the dataset. + exp: The experiment to use for this measurement. + enter_actions: A list of functions taking no arguments that will be + called before the measurements start. + exit_actions: A list of functions taking no arguments that will be + called after the measurements ends. + do_plot: should png and pdf versions of the images be saved and plots + are shown after the run. If None the setting will be read from + ``qcodesrc.json`` + show_progress: should a progress bar be displayed during the + measurement. If None the setting will be read from ``qcodesrc.json`` + use_threads: If True, measurements from each instrument will be done on + separate threads. If you are measuring from several instruments + this may give a significant speedup. + additional_setpoints: A list of setpoint parameters to be registered in + the measurement but not scanned/swept-over. + log_info: Message that is logged during the measurement. If None a default + message is used. + break_condition: Callable that takes no arguments. If returned True, + measurement is interrupted. + + Returns: + A tuple of QCoDeS DataSet, Matplotlib axis, Matplotlib colorbar. If + more than one group of measurement parameters is supplied, the output + will be a tuple of tuple(QCoDeS DataSet), tuple(Matplotlib axis), + tuple(Matplotlib colorbar), in which each element of each sub-tuple + belongs to one group, and the order of elements is the order of + the supplied groups. + """ + if do_plot is None: + do_plot = config.dataset.dond_plot + if show_progress is None: + show_progress = config.dataset.dond_show_progress + + sweep_instances, params_meas = _parse_dond_arguments(*params) + nested_setpoints = _make_nested_setpoints(sweep_instances) + + all_setpoint_params = tuple(sweep.param for sweep in sweep_instances) + tuple( + s for s in additional_setpoints + ) + + ( + all_meas_parameters, + grouped_parameters, + measured_parameters, + ) = _extract_paramters_by_type_and_group(measurement_name, params_meas) + + try: + loop_shape = tuple(sweep.num_points for sweep in sweep_instances) + tuple( + 1 for _ in additional_setpoints + ) + shapes: Shapes = detect_shape_of_measurement(measured_parameters, loop_shape) + except TypeError: + LOG.exception( + f"Could not detect shape of {measured_parameters} " + f"falling back to unknown shape." + ) + shapes = None + meas_list = _create_measurements( + all_setpoint_params, + enter_actions, + exit_actions, + exp, + grouped_parameters, + shapes, + write_period, + log_info, + ) + + post_delays: List[float] = [] + params_set: List[_BaseParameter] = [] + post_actions: List[ActionsT] = [] + for sweep in sweep_instances: + post_delays.append(sweep.delay) + params_set.append(sweep.param) + post_actions.append(sweep.post_actions) + + datasets = [] + plots_axes = [] + plots_colorbar = [] + if use_threads is None: + use_threads = config.dataset.use_threads + + params_meas_caller = ( + ThreadPoolParamsCaller(*all_meas_parameters) + if use_threads + else SequentialParamsCaller(*all_meas_parameters) + ) + + try: + with _catch_interrupts() as interrupted, ExitStack() as stack, params_meas_caller as call_params_meas: + datasavers = [stack.enter_context(measure.run()) for measure in meas_list] + additional_setpoints_data = process_params_meas(additional_setpoints) + previous_setpoints = np.empty(len(sweep_instances)) + for setpoints in tqdm(nested_setpoints, disable=not show_progress): + + active_actions, delays = _select_active_actions_delays( + post_actions, + post_delays, + setpoints, + previous_setpoints, + ) + previous_setpoints = setpoints + + param_set_list = [] + param_value_action_delay = zip( + params_set, + setpoints, + active_actions, + delays, + ) + for setpoint_param, setpoint, action, delay in param_value_action_delay: + _conditional_parameter_set(setpoint_param, setpoint) + param_set_list.append((setpoint_param, setpoint)) + for act in action: + act() + time.sleep(delay) + + meas_value_pair = call_params_meas() + for group in grouped_parameters.values(): + group["measured_params"] = [] + for measured in meas_value_pair: + if measured[0] in group["params"]: + group["measured_params"].append(measured) + for ind, datasaver in enumerate(datasavers): + datasaver.add_result( + *param_set_list, + *grouped_parameters[f"group_{ind}"]["measured_params"], + *additional_setpoints_data, + ) + + if callable(break_condition): + if break_condition(): + raise BreakConditionInterrupt("Break condition was met.") + finally: + + for datasaver in datasavers: + ds, plot_axis, plot_color = _handle_plotting( + datasaver.dataset, do_plot, interrupted() + ) + datasets.append(ds) + plots_axes.append(plot_axis) + plots_colorbar.append(plot_color) + + if len(grouped_parameters) == 1: + return datasets[0], plots_axes[0], plots_colorbar[0] + else: + return tuple(datasets), tuple(plots_axes), tuple(plots_colorbar) + + +def _parse_dond_arguments( + *params: Union[AbstractSweep, Union[ParamMeasT, Sequence[ParamMeasT]]] +) -> Tuple[List[AbstractSweep], List[Union[ParamMeasT, Sequence[ParamMeasT]]]]: + """ + Parse supplied arguments into sweep objects and measurement parameters + and their callables. + """ + sweep_instances: List[AbstractSweep] = [] + params_meas: List[Union[ParamMeasT, Sequence[ParamMeasT]]] = [] + for par in params: + if isinstance(par, AbstractSweep): + sweep_instances.append(par) + else: + params_meas.append(par) + return sweep_instances, params_meas + + +def _conditional_parameter_set( + parameter: _BaseParameter, + value: Union[float, complex], +) -> None: + """ + Reads the cache value of the given parameter and set the parameter to + the given value if the value is different from the cache value. + """ + if value != parameter.cache.get(): + parameter.set(value) + + +def _make_nested_setpoints(sweeps: List[AbstractSweep]) -> np.ndarray: + """Create the cartesian product of all the setpoint values.""" + if len(sweeps) == 0: + return np.array([[]]) # 0d sweep (do0d) + setpoint_values = [sweep.get_setpoints() for sweep in sweeps] + setpoint_grids = np.meshgrid(*setpoint_values, indexing="ij") + flat_setpoint_grids = [np.ravel(grid, order="C") for grid in setpoint_grids] + return np.vstack(flat_setpoint_grids).T + + +def _select_active_actions_delays( + actions: Sequence[ActionsT], + delays: Sequence[float], + setpoints: np.ndarray, + previous_setpoints: np.ndarray, +) -> Tuple[List[ActionsT], List[float]]: + """ + Select ActionT (Sequence[Callable]) and delays(Sequence[float]) from + a Sequence of ActionsT and delays, respectively, if the corresponding + setpoint has changed. Otherwise, select an empty Sequence for actions + and zero for delays. + """ + actions_list: List[ActionsT] = [()] * len(setpoints) + setpoints_delay: List[float] = [0] * len(setpoints) + for ind, (new_setpoint, old_setpoint) in enumerate( + zip(setpoints, previous_setpoints) + ): + if new_setpoint != old_setpoint: + actions_list[ind] = actions[ind] + setpoints_delay[ind] = delays[ind] + return (actions_list, setpoints_delay) + + +def _create_measurements( + all_setpoint_params: Sequence[_BaseParameter], + enter_actions: ActionsT, + exit_actions: ActionsT, + exp: Optional[Experiment], + grouped_parameters: Dict[str, ParameterGroup], + shapes: Shapes, + write_period: Optional[float], + log_info: Optional[str], +) -> Tuple[Measurement, ...]: + meas_list: List[Measurement] = [] + if log_info is not None: + _extra_log_info = log_info + else: + _extra_log_info = "Using 'qcodes.dataset.dond'" + for group in grouped_parameters.values(): + meas_name = group["meas_name"] + meas_params = group["params"] + meas = Measurement(name=meas_name, exp=exp) + meas._extra_log_info = _extra_log_info + _register_parameters(meas, all_setpoint_params) + _register_parameters( + meas, meas_params, setpoints=all_setpoint_params, shapes=shapes + ) + _set_write_period(meas, write_period) + _register_actions(meas, enter_actions, exit_actions) + meas_list.append(meas) + return tuple(meas_list) + + +def _extract_paramters_by_type_and_group( + measurement_name: str, + params_meas: Sequence[Union[ParamMeasT, Sequence[ParamMeasT]]], +) -> Tuple[ + Tuple[ParamMeasT, ...], Dict[str, ParameterGroup], Tuple[_BaseParameter, ...] +]: + measured_parameters: List[_BaseParameter] = [] + all_meas_parameters: List[ParamMeasT] = [] + single_group: List[ParamMeasT] = [] + multi_group: List[Sequence[ParamMeasT]] = [] + grouped_parameters: Dict[str, ParameterGroup] = {} + for param in params_meas: + if not isinstance(param, Sequence): + single_group.append(param) + all_meas_parameters.append(param) + if isinstance(param, _BaseParameter): + measured_parameters.append(param) + elif not isinstance(param, str): + multi_group.append(param) + for nested_param in param: + all_meas_parameters.append(nested_param) + if isinstance(nested_param, _BaseParameter): + measured_parameters.append(nested_param) + if single_group: + pg: ParameterGroup = { + "params": tuple(single_group), + "meas_name": measurement_name, + "measured_params": [], + } + grouped_parameters["group_0"] = pg + if multi_group: + for index, par in enumerate(multi_group): + pg = { + "params": tuple(par), + "meas_name": measurement_name, + "measured_params": [], + } + grouped_parameters[f"group_{index}"] = pg + return tuple(all_meas_parameters), grouped_parameters, tuple(measured_parameters) + + +def _handle_plotting( + data: DataSetProtocol, + do_plot: bool = True, + interrupted: MeasInterruptT = None, +) -> AxesTupleListWithDataSet: + """ + Save the plots created by datasaver as pdf and png + + Args: + datasaver: a measurement datasaver that contains a dataset to be saved + as plot. + :param do_plot: + + """ + if do_plot: + res = plot_and_save_image(data) + else: + res = data, [None], [None] + + if interrupted: + raise interrupted + + return res diff --git a/qcodes/dataset/plotting.py b/qcodes/dataset/plotting.py index 2c7c8d6d1e5..b8dd0ba8c68 100644 --- a/qcodes/dataset/plotting.py +++ b/qcodes/dataset/plotting.py @@ -5,6 +5,7 @@ import inspect import logging +import os from contextlib import contextmanager from functools import partial from typing import Any, List, Optional, Sequence, Tuple, Union, cast @@ -306,6 +307,46 @@ def plot_dataset( return axeslist, new_colorbars +def plot_and_save_image( + data: DataSetProtocol, save_pdf: bool = True, save_png: bool = True +) -> Tuple[ + DataSetProtocol, + List[matplotlib.axes.Axes], + List[Optional[matplotlib.colorbar.Colorbar]], +]: + """ + The utility function to plot results and save the figures either in pdf or + png or both formats. + + Args: + data: The QCoDeS dataset to be plotted. + save_pdf: Save figure in pdf format. + save_png: Save figure in png format. + """ + from qcodes import config + + dataid = data.captured_run_id + axes, cbs = plot_dataset(data) + mainfolder = config.user.mainfolder + experiment_name = data.exp_name + sample_name = data.sample_name + storage_dir = os.path.join(mainfolder, experiment_name, sample_name) + os.makedirs(storage_dir, exist_ok=True) + png_dir = os.path.join(storage_dir, "png") + pdf_dif = os.path.join(storage_dir, "pdf") + os.makedirs(png_dir, exist_ok=True) + os.makedirs(pdf_dif, exist_ok=True) + for i, ax in enumerate(axes): + if save_pdf: + full_path = os.path.join(pdf_dif, f"{dataid}_{i}.pdf") + ax.figure.savefig(full_path, dpi=500) + if save_png: + full_path = os.path.join(png_dir, f"{dataid}_{i}.png") + ax.figure.savefig(full_path, dpi=500) + res = data, axes, cbs + return res + + def plot_by_id( run_id: int, axes: Optional[Union[matplotlib.axes.Axes, Sequence[matplotlib.axes.Axes]]] = None, diff --git a/qcodes/extensions/__init__.py b/qcodes/extensions/__init__.py new file mode 100644 index 00000000000..4cc004be837 --- /dev/null +++ b/qcodes/extensions/__init__.py @@ -0,0 +1,8 @@ +""" +The extensions module contains smaller modules that extend the functionality of QCoDeS. +These modules may import from all of QCoDeS but do not themselves get imported into QCoDeS. +""" +from .installation import register_station_schema_with_vscode +from .slack import Slack, SlackTimeoutWarning + +__all__ = ["Slack", "SlackTimeoutWarning", "register_station_schema_with_vscode"] diff --git a/qcodes/extensions/installation.py b/qcodes/extensions/installation.py new file mode 100644 index 00000000000..1334a052d64 --- /dev/null +++ b/qcodes/extensions/installation.py @@ -0,0 +1,58 @@ +"""This module contains helper scripts to make certain installation tasks +easier.""" + +import json +import os +import sys + +from qcodes.station import SCHEMA_PATH, STATION_YAML_EXT, update_config_schema + + +def register_station_schema_with_vscode() -> None: + """This function registeres the qcodes station schema with vscode. + + Run this function to add the user generated station schema to the list of + associated schemas for the Red Hat YAML schema extension for vscode. + (https://marketplace.visualstudio.com/items?itemName=redhat.vscode-yaml) + + This function will effectively add an entry to `yaml.schemas` in the user + config file of vscode, which is located under + `%APPDATA/Code/User/settings.json`, or will be created there. + + You can alternatively access this + setting via File->Preferences->Settings and search for `yaml.schemas`. + + To enable autocompletinon of QCoDeS instrument from additional packages run + `qcodes.station.update_config_schema`. + + For more information consult `qcodes/docs/examples/Station.ipynb`. + """ + if sys.platform != "win32": + raise RuntimeError( + "This script is only supported on Windows platforms.\n " + "Please consult docstring for more information." + ) + if not os.path.exists(SCHEMA_PATH): + update_config_schema() + + config_path = os.path.expandvars( + os.path.join("%APPDATA%", "Code", "User", "settings.json") + ) + config_backup_path = config_path + "_backup" + + if not os.path.exists(config_path): + raise RuntimeError( + "Could not find the user settings file of vscode. \n" + "Please refer to the station.ipynb notebook to learn how to " + "set the settings manually." + ) + with open(config_path, "r+") as f: + data = json.load(f) + data.setdefault("yaml.schemas", {})[ + r"file:\\" + os.path.splitdrive(SCHEMA_PATH)[1] + ] = STATION_YAML_EXT + + os.replace(config_path, config_backup_path) + + with open(config_path, "w") as f: + json.dump(data, f, indent=4) diff --git a/qcodes/extensions/slack.py b/qcodes/extensions/slack.py new file mode 100644 index 00000000000..ffb6bcd18fb --- /dev/null +++ b/qcodes/extensions/slack.py @@ -0,0 +1,459 @@ +""" +Slack bot is used to send information about qcodes via Slack IMs. +Some default commands are provided, and custom commands/tasks can be +attached (see below). + +To setup the Slack bot, a bot first has to be created via Slack +by clicking 'Create New App' on https://api.slack.com/apps. +Once created, the bot will have a name and unique token. +These and other settings have to be saved in a config dict (see init( or +Parameters) in :class:`Slack`). + +The App containing your bot needs to have the following bot token scopes to +perform all methods successfully: +- channels:history +- channels:read +- chat:write +- files:write +- users:read +These can be set after clicking OAuth & Permissions in the left menubar after +selecting your bot at https://api.slack.com/apps (or during creation). + +Communication with the Slack bot is performed via instant messaging. +When an IM is sent to the Slack bot, it will be processed during the next +`update()` call (provided the username is registered in the config). +Standard commands provided to the Slack bot are: + +- plot: Upload latest qcodes plot. +- msmt/measurement: Print information about latest measurement. +- notify finished: Send message once measurement is finished. + +Custom commands can be added as (cmd, func) key-value pairs to +`self.commands`. When `cmd` is sent to the bot, `func` is evaluated. + +Custom tasks can be added as well. These are functions that are performed +every time an update is called. The function must return a boolean that +indicates if the task should be removed from the list of tasks. +A custom task can be added as a (cmd, func) key-value pair to +`self.task_commands`. +They can then be called through Slack IM via: + +``notify/task {cmd} *args:`` register task with name `cmd` that is +performed every time `update()` is called. +""" + +import inspect +import logging +import os +import tempfile +import threading +import traceback +import warnings +from functools import partial +from time import sleep + +from requests.exceptions import ConnectTimeout, HTTPError, ReadTimeout +from requests.packages.urllib3.exceptions import ReadTimeoutError +from slack_sdk import WebClient + +from qcodes import config as qc_config +from qcodes.instrument.parameter import _BaseParameter +from qcodes.loops import active_data_set, active_loop +from qcodes.plots.base import BasePlot + + +class SlackTimeoutWarning(UserWarning): + pass + + +def convert_command(text): + def try_convert_str(string): + try: + val = int(string) + return val + except ValueError: + pass + try: + val = float(string) + return val + except ValueError: + pass + + return string + + # Format text to lowercase, and remove trailing whitespaces + text = text.lower().rstrip(" ") + command, *args_str = text.split(" ") + + # Convert string args to floats/kwargs + args = [] + kwargs = {} + for arg in args_str: + if "=" in arg: + # arg is a kwarg + key, val = arg.split("=") + # Try to convert into a float + val = try_convert_str(val) + kwargs[key] = val + else: + # arg is not a kwarg + # Try to convert into a float + val = try_convert_str(arg) + args.append(val) + return command, args, kwargs + + +class Slack(threading.Thread): + def __init__(self, interval=3, config=None, auto_start=True, **commands): + """ + Initializes Slack bot, including auto-updating widget if in notebook + and using multiprocessing. + + Args: + interval (int): Update interval for widget (must be over 1s). + config (Optional[dict]): Config dict + If not given, uses qc.config['user']['slack'] + The config dict must contain the following keys: + + - 'bot_name': Name of the bot + - 'bot_token': Token from bot (obtained from slack website) + - 'names': Usernames to periodically check for IM messages + + auto_start (bool): Defaults to True. + + """ + if config is not None: + self.config = config + else: + self.config = qc_config.user.slack + + self.slack = WebClient(token=self.config["token"]) + self.users = self.get_users(self.config["names"]) + self.get_im_ids(self.users) + + self.commands = { + "plot": self.upload_latest_plot, + "msmt": self.print_measurement_information, + "measurement": self.print_measurement_information, + "notify": self.add_task, + "help": self.help_message, + "task": self.add_task, + **commands, + } + self.task_commands = {"finished": self.check_msmt_finished} + + self.interval = interval + self.tasks = [] + + # Flag that exits loop when set to True (called via self.exit()) + self._exit = False + + # Flag that enables actions to be performed in the event loop + # Enabled via self.start(), disabled via self.stop() + self._is_active = False + + # Call Thread init + super().__init__() + + if auto_start: + self.start() + + def start(self): + self._is_active = True + try: + # Start thread, can only be called once + super().start() + except RuntimeError: + # Thread already started, ignoring + pass + + def run(self): + """ + Thread event loop that periodically checks for updates. + Can be stopped via :meth:`stop` , after which the Thread is stopped. + Returns: + None. + """ + while not self._exit: + # Continue event loop + if self._is_active: + # check for updates + self.update() + sleep(self.interval) + + def stop(self): + """ + Stop checking for updates. Can be started again via :meth:`start`. + Returns: + None. + """ + self._is_active = False + + def exit(self): + """ + Exit event loop, stop Thread. + Returns: + None + """ + self._stop = True + + def user_from_id(self, user_id): + """ + Retrieve user from user id. + Args: + user_id: Id from which to retrieve user information. + + Returns: + dict: User information. + """ + return self.slack.users_info(user=user_id)["user"] + + def get_users(self, usernames): + """ + Extracts user information for users. + Args: + usernames: Slack usernames of users. + + Returns: + dict: {username: user} + """ + users = {} + response = self.slack.users_list() + for member in response["members"]: + if member["name"] in usernames: + users[member["name"]] = member + if len(users) != len(usernames): + remaining_names = [name for name in usernames if name not in users] + raise RuntimeError(f"Could not find names {remaining_names}") + return users + + def get_im_ids(self, users): + """ + Adds IM ids of users to users dict. + Also adds `last_ts` to the latest IM message + Args: + users (dict): {username: user} + + Returns: + None. + """ + response = self.slack.conversations_list(types="im") + user_ids = {username: user["id"] for username, user in users.items()} + im_ids = {chan["user"]: chan["id"] for chan in response["channels"]} + for username, user_id in user_ids.items(): + if user_id in im_ids.keys(): + users[username]["im_id"] = im_ids[user_id] + # update last ts + messages = self.get_im_messages(username=username, limit=1) + if messages: + users[username]["last_ts"] = float(messages[0]["ts"]) + else: + users[username]["last_ts"] = None + + def get_im_messages(self, username, **kwargs): + """ + Retrieves IM messages from username. + Args: + username: Name of user. + **kwargs: Additional kwargs for retrieving IM messages. + + Returns: + List of IM messages. + """ + # provide backward compatibility with 'count' keyword. It still works, + # but is undocumented. 'count' likely does the same as 'limit', but + # 'limit' takes precedence + if "limit" not in kwargs.keys(): + kwargs["limit"] = kwargs.pop("count", None) + + channel = self.users[username].get("im_id", None) + if channel is None: + return [] + else: + response = self.slack.conversations_history(channel=channel, **kwargs) + return response["messages"] + + def get_new_im_messages(self): + """ + Retrieves new IM messages for each user in self.users. + Updates user['last_ts'] to ts of newest message. + Returns: + im_messages (Dict): {username: [messages list]} newer than last_ts. + """ + im_messages = {} + for username, user in self.users.items(): + last_ts = user.get("last_ts", None) + new_messages = self.get_im_messages(username=username, oldest=last_ts) + # Kwarg 'oldest' sometimes also returns message with ts==last_ts + new_messages = [m for m in new_messages if float(m["ts"]) != last_ts] + im_messages[username] = new_messages + if new_messages: + self.users[username]["last_ts"] = float(new_messages[0]["ts"]) + return im_messages + + def update(self): + """ + Performs tasks, and checks for new messages. + Periodically called from widget update. + Returns: + None. + """ + new_tasks = [] + for task in self.tasks: + task_finished = task() + if not task_finished: + new_tasks.append(task) + self.tasks = new_tasks + + new_messages = {} + try: + new_messages = self.get_new_im_messages() + except (ReadTimeout, HTTPError, ConnectTimeout, ReadTimeoutError) as e: + # catch any timeouts caused by network delays + warnings.warn("error retrieving slack messages", SlackTimeoutWarning) + logging.info(e) + self.handle_messages(new_messages) + + def help_message(self): + """Return simple help message""" + cc = ", ".join("`" + str(k) + "`" for k in self.commands.keys()) + return "\nAvailable commands: %s" % cc + + def handle_messages(self, messages): + """ + Performs commands depending on messages. + This includes adding tasks to be performed during each update. + """ + for user, user_messages in messages.items(): + for message in user_messages: + if message.get("user", None) != self.users[user]["id"]: + # Filter out bot messages + continue + channel = self.users[user]["im_id"] + # Extract command (first word) and possible args + command, args, kwargs = convert_command(message["text"]) + if command in self.commands: + msg = f"Executing {command}" + if args: + msg += f" {args}" + if kwargs: + msg += f" {kwargs}" + self.slack.chat_postMessage(text=msg, channel=channel) + + func = self.commands[command] + try: + if isinstance(func, _BaseParameter): + results = func(*args, **kwargs) + else: + # Only add channel and Slack if they are explicit + # kwargs + func_sig = inspect.signature(func) + if "channel" in func_sig.parameters: + kwargs["channel"] = channel + if "slack" in func_sig.parameters: + kwargs["slack"] = self + results = func(*args, **kwargs) + + if results is not None: + self.slack.chat_postMessage( + text=f"Results: {results}", channel=channel + ) + + except Exception: + self.slack.chat_postMessage( + text=f"Error: {traceback.format_exc()}", channel=channel + ) + else: + self.slack.chat_postMessage( + text=f"Command {command} not understood. Try `help`", + channel=channel, + ) + + def add_task(self, command, *args, channel, **kwargs): + """ + Add a task to self.tasks, which will be executed during each update + Args: + command: Task command. + *args: Additional args for command. + channel: Slack channel (can also be IM channel). + **kwargs: Additional kwargs for particular. + + Returns: + None. + """ + if command in self.task_commands: + self.slack.chat_postMessage(text=f'Added task "{command}"', channel=channel) + func = self.task_commands[command] + self.tasks.append(partial(func, *args, channel=channel, **kwargs)) + else: + self.slack.chat_postMessage( + text=f"Task command {command} not understood", channel=channel + ) + + def upload_latest_plot(self, channel, **kwargs): + """ + Uploads latest plot (if any) to slack channel. + The latest plot is retrieved from + :class:`qcodes.plots.base.BasePlot`, which is updated + every time a new qcodes plot is instantiated. + Args: + channel: Slack channel (can also be IM channel). + **kwargs: Not used. + + Returns: + None. + """ + + # Create temporary filename + temp_filename = tempfile.mktemp(suffix=".jpg") + # Retrieve latest plot + latest_plot = BasePlot.latest_plot + if latest_plot is not None: + # Saves latest plot to filename + latest_plot.save(filename=temp_filename) + # Upload plot to slack + self.slack.files_upload(file=temp_filename, channels=channel) + os.remove(temp_filename) + else: + self.slack.chat_postMessage(text="No latest plot", channel=channel) + + def print_measurement_information(self, channel, **kwargs): + """ + Prints information about the current measurement. + Information printed is percentage complete, and dataset representation. + Dataset is retrieved from DataSet.latest_dataset, which updates itself + every time a new dataset is created + Args: + channel: Slack channel (can also be IM channel). + **kwargs: Not used. + + Returns: + None. + """ + dataset = active_data_set() + if dataset is not None: + self.slack.chat_postMessage( + text="Measurement is {:.0f}% complete".format( + 100 * dataset.fraction_complete() + ), + channel=channel, + ) + self.slack.chat_postMessage(text=repr(dataset), channel=channel) + else: + self.slack.chat_postMessage(text="No latest dataset found", channel=channel) + + def check_msmt_finished(self, channel, **kwargs): + """ + Checks if the latest measurement is completed. + Args: + channel: Slack channel (can also be IM channel). + **kwargs: Not used. + + Returns: + bool: True if measurement is finished, False otherwise. + """ + if active_loop() is None: + self.slack.chat_postMessage(text="Measurement complete", channel=channel) + return True + else: + return False diff --git a/qcodes/tests/dataset/test_database_extract_runs.py b/qcodes/tests/dataset/test_database_extract_runs.py index 14a5ffdbf51..cad6b3d9f0b 100644 --- a/qcodes/tests/dataset/test_database_extract_runs.py +++ b/qcodes/tests/dataset/test_database_extract_runs.py @@ -22,6 +22,7 @@ load_by_run_spec, ) from qcodes.dataset.database_extract_runs import extract_runs_into_db +from qcodes.dataset.do_nd import do1d, do2d from qcodes.dataset.experiment_container import ( Experiment, load_experiment_by_name, @@ -34,7 +35,6 @@ from qcodes.dataset.sqlite.queries import get_experiments from qcodes.tests.common import error_caused_by from qcodes.tests.instrument_mocks import DummyInstrument -from qcodes.utils.dataset.doNd import do1d, do2d @contextmanager diff --git a/qcodes/tests/dataset/test_doNd.py b/qcodes/tests/dataset/test_doNd.py index 8227af3c51f..84851741197 100644 --- a/qcodes/tests/dataset/test_doNd.py +++ b/qcodes/tests/dataset/test_doNd.py @@ -11,6 +11,7 @@ from qcodes import config from qcodes.dataset import new_experiment from qcodes.dataset.data_set import DataSet +from qcodes.dataset.do_nd import ArraySweep, LinSweep, LogSweep, do0d, do1d, do2d, dond from qcodes.instrument.parameter import Parameter, _BaseParameter from qcodes.tests.instrument_mocks import ( ArraySetPointParam, @@ -19,7 +20,6 @@ MultiSetPointParam, ) from qcodes.utils import validators -from qcodes.utils.dataset.doNd import ArraySweep, LinSweep, LogSweep, do0d, do1d, do2d, dond from qcodes.utils.validators import Arrays from .conftest import ArrayshapedParam diff --git a/qcodes/tests/test_slack.py b/qcodes/tests/test_slack.py index ba833b08eb1..a1cc359a769 100644 --- a/qcodes/tests/test_slack.py +++ b/qcodes/tests/test_slack.py @@ -45,16 +45,18 @@ def setup_slack(): 'token': '123', 'names': ['dummyuser'] } - import qcodes.utils.slack # pylint: disable=import-outside-toplevel - slack = qcodes.utils.slack.Slack(config=slack_config, auto_start=False) + import qcodes.extensions.slack # pylint: disable=import-outside-toplevel + + slack = qcodes.extensions.slack.Slack(config=slack_config, auto_start=False) return slack def test_convert_command_should_convert_floats(): - import qcodes.utils.slack # pylint: disable=import-outside-toplevel - cmd, arg, kwarg = qcodes.utils.slack.convert_command('comm 0.234 key=0.1') - assert cmd == 'comm' + import qcodes.extensions.slack # pylint: disable=import-outside-toplevel + + cmd, arg, kwarg = qcodes.extensions.slack.convert_command("comm 0.234 key=0.1") + assert cmd == "comm" assert arg == [pytest.approx(0.234)] assert kwarg == {'key': pytest.approx(0.1)} @@ -71,8 +73,9 @@ def test_slack_instance_should_get_config_from_qc_config(): 'names': ['dummyuser'] } cf.add(key='slack', value=slack_config) - import qcodes.utils.slack # pylint: disable=import-outside-toplevel - slack = qcodes.utils.slack.Slack(config=None, auto_start=False) + import qcodes.extensions.slack # pylint: disable=import-outside-toplevel + + slack = qcodes.extensions.slack.Slack(config=None, auto_start=False) assert 'dummyuser' in slack.users.keys() @@ -83,8 +86,9 @@ def test_slack_instance_should_start(mocker): 'names': ['dummyuser'] } mock_thread_start = mocker.patch('threading.Thread.start') - import qcodes.utils.slack # pylint: disable=import-outside-toplevel - _ = qcodes.utils.slack.Slack(config=slack_config) + import qcodes.extensions.slack # pylint: disable=import-outside-toplevel + + _ = qcodes.extensions.slack.Slack(config=slack_config) mock_thread_start.assert_called() @@ -98,8 +102,9 @@ def test_slack_instance_should_not_start_when_already_started(mocker): mock_thread_start = mocker.patch('threading.Thread.start') mock_thread_start.side_effect = RuntimeError - import qcodes.utils.slack # pylint: disable=import-outside-toplevel - _ = qcodes.utils.slack.Slack(config=slack_config) + import qcodes.extensions.slack # pylint: disable=import-outside-toplevel + + _ = qcodes.extensions.slack.Slack(config=slack_config) mock_thread_start.assert_called() @@ -112,8 +117,9 @@ def test_slack_instance_should_start_and_stop(mocker): } mocker.patch('threading.Thread.start') - import qcodes.utils.slack # pylint: disable=import-outside-toplevel - slack = qcodes.utils.slack.Slack(config=slack_config, interval=0) + import qcodes.extensions.slack # pylint: disable=import-outside-toplevel + + slack = qcodes.extensions.slack.Slack(config=slack_config, interval=0) slack.stop() assert not slack._is_active @@ -244,7 +250,7 @@ def test_slack_instance_should_update_with_task_returning_false(slack): def test_slack_instance_should_update_with_task_returning_true(slack, mocker): - mocker.patch('qcodes.utils.slack.active_loop', return_value=not None) + mocker.patch("qcodes.extensions.slack.active_loop", return_value=not None) slack.add_task('finished', channel='CH234') slack.update() @@ -254,7 +260,7 @@ def test_slack_instance_should_update_with_task_returning_true(slack, mocker): def test_slack_instance_should_update_with_exception(slack, mocker): - method_name = 'qcodes.utils.slack.Slack.get_new_im_messages' + method_name = "qcodes.extensions.slack.Slack.get_new_im_messages" mock_get_new_im_messages = mocker.patch(method_name) mocker.patch('warnings.warn') mocker.patch('logging.info') @@ -324,7 +330,7 @@ def test_slack_inst_should_add_unknown_task_command(mock_webclient, slack): def test_slack_inst_should_upload_latest_plot(mock_webclient, slack, mocker): - method_name = 'qcodes.utils.slack.BasePlot.latest_plot' + method_name = "qcodes.extensions.slack.BasePlot.latest_plot" mocker.patch(method_name, return_value=not None) mocker.patch('os.remove') slack.upload_latest_plot(channel='CH234') @@ -341,7 +347,7 @@ def test_slack_inst_should_not_fail_upl_latest_wo_plot(mock_webclient, slack): def test_slack_inst_should_print_measurement(mock_webclient, slack, mocker): dataset = mocker.MagicMock() dataset.fraction_complete.return_value = 0.123 - mocker.patch('qcodes.utils.slack.active_data_set', return_value=dataset) + mocker.patch("qcodes.extensions.slack.active_data_set", return_value=dataset) slack.print_measurement_information(channel='CH234') diff --git a/qcodes/utils/dataset/doNd.py b/qcodes/utils/dataset/doNd.py index 8b812a1680c..1aa0c40031e 100644 --- a/qcodes/utils/dataset/doNd.py +++ b/qcodes/utils/dataset/doNd.py @@ -1,1054 +1,75 @@ -import logging -import os -import sys -import time -from abc import ABC, abstractmethod -from contextlib import ExitStack, contextmanager -from typing import Callable, Dict, Iterator, List, Optional, Sequence, Tuple, Union - -import matplotlib -import numpy as np -from tqdm.auto import tqdm -from typing_extensions import TypedDict - -from qcodes import config -from qcodes.dataset.data_set_protocol import DataSetProtocol, res_type -from qcodes.dataset.descriptions.detect_shapes import detect_shape_of_measurement -from qcodes.dataset.descriptions.versioning.rundescribertypes import Shapes -from qcodes.dataset.experiment_container import Experiment -from qcodes.dataset.measurements import Measurement -from qcodes.dataset.plotting import plot_dataset -from qcodes.instrument.parameter import _BaseParameter -from qcodes.utils.threading import ( - SequentialParamsCaller, - ThreadPoolParamsCaller, - process_params_meas, +import warnings + +from qcodes.dataset.do_nd import ( + AbstractSweep, + ActionsT, + ArraySweep, + AxesTuple, + AxesTupleList, + AxesTupleListWithDataSet, + BreakConditionInterrupt, + BreakConditionT, + LinSweep, + LogSweep, + MeasInterruptT, + MultiAxesTupleListWithDataSet, + ParameterGroup, + ParamMeasT, + UnsafeThreadingException, + _catch_interrupts, + _conditional_parameter_set, + _create_measurements, + _extract_paramters_by_type_and_group, + _handle_plotting, + _make_nested_setpoints, + _parse_dond_arguments, + _register_actions, + _register_parameters, + _select_active_actions_delays, + _set_write_period, + do0d, + do1d, + do2d, + dond, ) - -ActionsT = Sequence[Callable[[], None]] -BreakConditionT = Callable[[], bool] - -ParamMeasT = Union[_BaseParameter, Callable[[], None]] - -AxesTuple = Tuple[matplotlib.axes.Axes, matplotlib.colorbar.Colorbar] -AxesTupleList = Tuple[ - List[matplotlib.axes.Axes], List[Optional[matplotlib.colorbar.Colorbar]] +from qcodes.dataset.plotting import plot_and_save_image as plot + +# todo enable warning once new api is in release +# warnings.warn( +# "qcodes.utils.dataset.doNd module is deprecated. " +# "Please update to import from qcodes.dataset" +# ) + +__all__ = [ + "AbstractSweep", + "ActionsT", + "ArraySweep", + "AxesTuple", + "AxesTupleList", + "AxesTupleListWithDataSet", + "BreakConditionInterrupt", + "BreakConditionT", + "LinSweep", + "LogSweep", + "MeasInterruptT", + "MultiAxesTupleListWithDataSet", + "ParamMeasT", + "ParameterGroup", + "UnsafeThreadingException", + "_catch_interrupts", + "_conditional_parameter_set", + "_create_measurements", + "_extract_paramters_by_type_and_group", + "_handle_plotting", + "_make_nested_setpoints", + "_parse_dond_arguments", + "_register_actions", + "_register_parameters", + "_select_active_actions_delays", + "_set_write_period", + "do0d", + "do1d", + "do2d", + "dond", + "plot", ] -AxesTupleListWithDataSet = Tuple[ - DataSetProtocol, - List[matplotlib.axes.Axes], - List[Optional[matplotlib.colorbar.Colorbar]], -] -MultiAxesTupleListWithDataSet = Tuple[ - Tuple[DataSetProtocol, ...], - Tuple[List[matplotlib.axes.Axes], ...], - Tuple[List[Optional[matplotlib.colorbar.Colorbar]], ...], -] - -LOG = logging.getLogger(__name__) - - -class ParameterGroup(TypedDict): - params: Tuple[ParamMeasT, ...] - meas_name: str - measured_params: List[res_type] - - -class UnsafeThreadingException(Exception): - pass - - -class BreakConditionInterrupt(Exception): - pass - - -MeasInterruptT = Union[KeyboardInterrupt, BreakConditionInterrupt, None] - - -def _register_parameters( - meas: Measurement, - param_meas: Sequence[ParamMeasT], - setpoints: Optional[Sequence[_BaseParameter]] = None, - shapes: Shapes = None, -) -> None: - for parameter in param_meas: - if isinstance(parameter, _BaseParameter): - meas.register_parameter(parameter, setpoints=setpoints) - meas.set_shapes(shapes=shapes) - - -def _register_actions( - meas: Measurement, enter_actions: ActionsT, exit_actions: ActionsT -) -> None: - for action in enter_actions: - # this omits the possibility of passing - # argument to enter and exit actions. - # Do we want that? - meas.add_before_run(action, ()) - for action in exit_actions: - meas.add_after_run(action, ()) - - -def _set_write_period(meas: Measurement, write_period: Optional[float] = None) -> None: - if write_period is not None: - meas.write_period = write_period - - -@contextmanager -def _catch_interrupts() -> Iterator[Callable[[], MeasInterruptT]]: - interrupt_exception = None - - def get_interrupt_exception() -> MeasInterruptT: - nonlocal interrupt_exception - return interrupt_exception - - try: - yield get_interrupt_exception - except (KeyboardInterrupt, BreakConditionInterrupt) as e: - interrupt_exception = e - - -def do0d( - *param_meas: ParamMeasT, - write_period: Optional[float] = None, - measurement_name: str = "", - exp: Optional[Experiment] = None, - do_plot: Optional[bool] = None, - use_threads: Optional[bool] = None, - log_info: Optional[str] = None, -) -> AxesTupleListWithDataSet: - """ - Perform a measurement of a single parameter. This is probably most - useful for an ArrayParameter that already returns an array of data points - - Args: - *param_meas: Parameter(s) to measure at each step or functions that - will be called at each step. The function should take no arguments. - The parameters and functions are called in the order they are - supplied. - write_period: The time after which the data is actually written to the - database. - measurement_name: Name of the measurement. This will be passed down to - the dataset produced by the measurement. If not given, a default - value of 'results' is used for the dataset. - exp: The experiment to use for this measurement. - do_plot: should png and pdf versions of the images be saved after the - run. If None the setting will be read from ``qcodesrc.json`` - use_threads: If True measurements from each instrument will be done on - separate threads. If you are measuring from several instruments - this may give a significant speedup. - log_info: Message that is logged during the measurement. If None a default - message is used. - - Returns: - The QCoDeS dataset. - """ - if do_plot is None: - do_plot = config.dataset.dond_plot - meas = Measurement(name=measurement_name, exp=exp) - if log_info is not None: - meas._extra_log_info = log_info - else: - meas._extra_log_info = "Using 'qcodes.utils.dataset.doNd.do0d'" - - measured_parameters = tuple( - param for param in param_meas if isinstance(param, _BaseParameter) - ) - - try: - shapes: Shapes = detect_shape_of_measurement( - measured_parameters, - ) - except TypeError: - LOG.exception( - f"Could not detect shape of {measured_parameters} " - f"falling back to unknown shape." - ) - shapes = None - - _register_parameters(meas, param_meas, shapes=shapes) - _set_write_period(meas, write_period) - - with meas.run() as datasaver: - datasaver.add_result(*process_params_meas(param_meas, use_threads=use_threads)) - dataset = datasaver.dataset - - return _handle_plotting(dataset, do_plot) - - -def do1d( - param_set: _BaseParameter, - start: float, - stop: float, - num_points: int, - delay: float, - *param_meas: ParamMeasT, - enter_actions: ActionsT = (), - exit_actions: ActionsT = (), - write_period: Optional[float] = None, - measurement_name: str = "", - exp: Optional[Experiment] = None, - do_plot: Optional[bool] = None, - use_threads: Optional[bool] = None, - additional_setpoints: Sequence[_BaseParameter] = tuple(), - show_progress: Optional[None] = None, - log_info: Optional[str] = None, - break_condition: Optional[BreakConditionT] = None, -) -> AxesTupleListWithDataSet: - """ - Perform a 1D scan of ``param_set`` from ``start`` to ``stop`` in - ``num_points`` measuring param_meas at each step. In case param_meas is - an ArrayParameter this is effectively a 2d scan. - - Args: - param_set: The QCoDeS parameter to sweep over - start: Starting point of sweep - stop: End point of sweep - num_points: Number of points in sweep - delay: Delay after setting parameter before measurement is performed - *param_meas: Parameter(s) to measure at each step or functions that - will be called at each step. The function should take no arguments. - The parameters and functions are called in the order they are - supplied. - enter_actions: A list of functions taking no arguments that will be - called before the measurements start - exit_actions: A list of functions taking no arguments that will be - called after the measurements ends - write_period: The time after which the data is actually written to the - database. - additional_setpoints: A list of setpoint parameters to be registered in - the measurement but not scanned. - measurement_name: Name of the measurement. This will be passed down to - the dataset produced by the measurement. If not given, a default - value of 'results' is used for the dataset. - exp: The experiment to use for this measurement. - do_plot: should png and pdf versions of the images be saved after the - run. If None the setting will be read from ``qcodesrc.json` - use_threads: If True measurements from each instrument will be done on - separate threads. If you are measuring from several instruments - this may give a significant speedup. - show_progress: should a progress bar be displayed during the - measurement. If None the setting will be read from ``qcodesrc.json` - log_info: Message that is logged during the measurement. If None a default - message is used. - break_condition: Callable that takes no arguments. If returned True, - measurement is interrupted. - - Returns: - The QCoDeS dataset. - """ - if do_plot is None: - do_plot = config.dataset.dond_plot - if show_progress is None: - show_progress = config.dataset.dond_show_progress - - meas = Measurement(name=measurement_name, exp=exp) - if log_info is not None: - meas._extra_log_info = log_info - else: - meas._extra_log_info = "Using 'qcodes.utils.dataset.doNd.do1d'" - - all_setpoint_params = (param_set,) + tuple(s for s in additional_setpoints) - - measured_parameters = tuple( - param for param in param_meas if isinstance(param, _BaseParameter) - ) - try: - loop_shape = (num_points,) + tuple(1 for _ in additional_setpoints) - shapes: Shapes = detect_shape_of_measurement(measured_parameters, loop_shape) - except TypeError: - LOG.exception( - f"Could not detect shape of {measured_parameters} " - f"falling back to unknown shape." - ) - shapes = None - - _register_parameters(meas, all_setpoint_params) - _register_parameters(meas, param_meas, setpoints=all_setpoint_params, shapes=shapes) - _set_write_period(meas, write_period) - _register_actions(meas, enter_actions, exit_actions) - - - if use_threads is None: - use_threads = config.dataset.use_threads - - param_meas_caller = ( - ThreadPoolParamsCaller(*param_meas) - if use_threads - else SequentialParamsCaller(*param_meas) - ) - - # do1D enforces a simple relationship between measured parameters - # and set parameters. For anything more complicated this should be - # reimplemented from scratch - with _catch_interrupts() as interrupted, meas.run() as datasaver, param_meas_caller as call_param_meas: - dataset = datasaver.dataset - additional_setpoints_data = process_params_meas(additional_setpoints) - setpoints = np.linspace(start, stop, num_points) - - # flush to prevent unflushed print's to visually interrupt tqdm bar - # updates - sys.stdout.flush() - sys.stderr.flush() - - for set_point in tqdm(setpoints, disable=not show_progress): - param_set.set(set_point) - time.sleep(delay) - datasaver.add_result( - (param_set, set_point), *call_param_meas(), *additional_setpoints_data - ) - - if callable(break_condition): - if break_condition(): - raise BreakConditionInterrupt("Break condition was met.") - - return _handle_plotting(dataset, do_plot, interrupted()) - - -def do2d( - param_set1: _BaseParameter, - start1: float, - stop1: float, - num_points1: int, - delay1: float, - param_set2: _BaseParameter, - start2: float, - stop2: float, - num_points2: int, - delay2: float, - *param_meas: ParamMeasT, - set_before_sweep: Optional[bool] = True, - enter_actions: ActionsT = (), - exit_actions: ActionsT = (), - before_inner_actions: ActionsT = (), - after_inner_actions: ActionsT = (), - write_period: Optional[float] = None, - measurement_name: str = "", - exp: Optional[Experiment] = None, - flush_columns: bool = False, - do_plot: Optional[bool] = None, - use_threads: Optional[bool] = None, - additional_setpoints: Sequence[_BaseParameter] = tuple(), - show_progress: Optional[None] = None, - log_info: Optional[str] = None, - break_condition: Optional[BreakConditionT] = None, -) -> AxesTupleListWithDataSet: - """ - Perform a 1D scan of ``param_set1`` from ``start1`` to ``stop1`` in - ``num_points1`` and ``param_set2`` from ``start2`` to ``stop2`` in - ``num_points2`` measuring param_meas at each step. - - Args: - param_set1: The QCoDeS parameter to sweep over in the outer loop - start1: Starting point of sweep in outer loop - stop1: End point of sweep in the outer loop - num_points1: Number of points to measure in the outer loop - delay1: Delay after setting parameter in the outer loop - param_set2: The QCoDeS parameter to sweep over in the inner loop - start2: Starting point of sweep in inner loop - stop2: End point of sweep in the inner loop - num_points2: Number of points to measure in the inner loop - delay2: Delay after setting parameter before measurement is performed - *param_meas: Parameter(s) to measure at each step or functions that - will be called at each step. The function should take no arguments. - The parameters and functions are called in the order they are - supplied. - set_before_sweep: if True the outer parameter is set to its first value - before the inner parameter is swept to its next value. - enter_actions: A list of functions taking no arguments that will be - called before the measurements start - exit_actions: A list of functions taking no arguments that will be - called after the measurements ends - before_inner_actions: Actions executed before each run of the inner loop - after_inner_actions: Actions executed after each run of the inner loop - write_period: The time after which the data is actually written to the - database. - measurement_name: Name of the measurement. This will be passed down to - the dataset produced by the measurement. If not given, a default - value of 'results' is used for the dataset. - exp: The experiment to use for this measurement. - flush_columns: The data is written after a column is finished - independent of the passed time and write period. - additional_setpoints: A list of setpoint parameters to be registered in - the measurement but not scanned. - do_plot: should png and pdf versions of the images be saved after the - run. If None the setting will be read from ``qcodesrc.json`` - use_threads: If True measurements from each instrument will be done on - separate threads. If you are measuring from several instruments - this may give a significant speedup. - show_progress: should a progress bar be displayed during the - measurement. If None the setting will be read from ``qcodesrc.json` - log_info: Message that is logged during the measurement. If None a default - message is used. - break_condition: Callable that takes no arguments. If returned True, - measurement is interrupted. - - Returns: - The QCoDeS dataset. - """ - - if do_plot is None: - do_plot = config.dataset.dond_plot - if show_progress is None: - show_progress = config.dataset.dond_show_progress - - meas = Measurement(name=measurement_name, exp=exp) - if log_info is not None: - meas._extra_log_info = log_info - else: - meas._extra_log_info = "Using 'qcodes.utils.dataset.doNd.do2d'" - all_setpoint_params = ( - param_set1, - param_set2, - ) + tuple(s for s in additional_setpoints) - - measured_parameters = tuple( - param for param in param_meas if isinstance(param, _BaseParameter) - ) - - try: - loop_shape = (num_points1, num_points2) + tuple(1 for _ in additional_setpoints) - shapes: Shapes = detect_shape_of_measurement(measured_parameters, loop_shape) - except TypeError: - LOG.exception( - f"Could not detect shape of {measured_parameters} " - f"falling back to unknown shape." - ) - shapes = None - - _register_parameters(meas, all_setpoint_params) - _register_parameters(meas, param_meas, setpoints=all_setpoint_params, shapes=shapes) - _set_write_period(meas, write_period) - _register_actions(meas, enter_actions, exit_actions) - - if use_threads is None: - use_threads = config.dataset.use_threads - - param_meas_caller = ( - ThreadPoolParamsCaller(*param_meas) - if use_threads - else SequentialParamsCaller(*param_meas) - ) - - with _catch_interrupts() as interrupted, meas.run() as datasaver, param_meas_caller as call_param_meas: - dataset = datasaver.dataset - additional_setpoints_data = process_params_meas(additional_setpoints) - setpoints1 = np.linspace(start1, stop1, num_points1) - for set_point1 in tqdm(setpoints1, disable=not show_progress): - if set_before_sweep: - param_set2.set(start2) - - param_set1.set(set_point1) - - for action in before_inner_actions: - action() - - time.sleep(delay1) - - setpoints2 = np.linspace(start2, stop2, num_points2) - - # flush to prevent unflushed print's to visually interrupt tqdm bar - # updates - sys.stdout.flush() - sys.stderr.flush() - for set_point2 in tqdm(setpoints2, disable=not show_progress, leave=False): - # skip first inner set point if `set_before_sweep` - if set_point2 == start2 and set_before_sweep: - pass - else: - param_set2.set(set_point2) - time.sleep(delay2) - - datasaver.add_result( - (param_set1, set_point1), - (param_set2, set_point2), - *call_param_meas(), - *additional_setpoints_data, - ) - - if callable(break_condition): - if break_condition(): - raise BreakConditionInterrupt("Break condition was met.") - - for action in after_inner_actions: - action() - if flush_columns: - datasaver.flush_data_to_database() - - return _handle_plotting(dataset, do_plot, interrupted()) - - -class AbstractSweep(ABC): - """ - Abstract sweep class that defines an interface for concrete sweep classes. - """ - - @abstractmethod - def get_setpoints(self) -> np.ndarray: - """ - Returns an array of setpoint values for this sweep. - """ - pass - - @property - @abstractmethod - def param(self) -> _BaseParameter: - """ - Returns the Qcodes sweep parameter. - """ - pass - - @property - @abstractmethod - def delay(self) -> float: - """ - Delay between two consecutive sweep points. - """ - pass - - @property - @abstractmethod - def num_points(self) -> int: - """ - Number of sweep points. - """ - pass - - @property - @abstractmethod - def post_actions(self) -> ActionsT: - """ - actions to be performed after setting param to its setpoint. - """ - pass - - -class LinSweep(AbstractSweep): - """ - Linear sweep. - - Args: - param: Qcodes parameter to sweep. - start: Sweep start value. - stop: Sweep end value. - num_points: Number of sweep points. - delay: Time in seconds between two consequtive sweep points - """ - - def __init__( - self, - param: _BaseParameter, - start: float, - stop: float, - num_points: int, - delay: float = 0, - post_actions: ActionsT = (), - ): - self._param = param - self._start = start - self._stop = stop - self._num_points = num_points - self._delay = delay - self._post_actions = post_actions - - def get_setpoints(self) -> np.ndarray: - """ - Linear (evenly spaced) numpy array for supplied start, stop and - num_points. - """ - return np.linspace(self._start, self._stop, self._num_points) - - @property - def param(self) -> _BaseParameter: - return self._param - - @property - def delay(self) -> float: - return self._delay - - @property - def num_points(self) -> int: - return self._num_points - - @property - def post_actions(self) -> ActionsT: - return self._post_actions - - -class LogSweep(AbstractSweep): - """ - Logarithmic sweep. - - Args: - param: Qcodes parameter for sweep. - start: Sweep start value. - stop: Sweep end value. - num_points: Number of sweep points. - delay: Time in seconds between two consequtive sweep points. - """ - - def __init__( - self, - param: _BaseParameter, - start: float, - stop: float, - num_points: int, - delay: float = 0, - post_actions: ActionsT = (), - ): - self._param = param - self._start = start - self._stop = stop - self._num_points = num_points - self._delay = delay - self._post_actions = post_actions - - def get_setpoints(self) -> np.ndarray: - """ - Logarithmically spaced numpy array for supplied start, stop and - num_points. - """ - return np.logspace(self._start, self._stop, self._num_points) - - @property - def param(self) -> _BaseParameter: - return self._param - - @property - def delay(self) -> float: - return self._delay - - @property - def num_points(self) -> int: - return self._num_points - - @property - def post_actions(self) -> ActionsT: - return self._post_actions - - -class ArraySweep(AbstractSweep): - """ - Sweep the values of a given array. - - Args: - param: Qcodes parameter for sweep. - array: array with values to sweep. - delay: Time in seconds between two consecutive sweep points. - post_actions: Actions to do after each sweep point. - """ - - def __init__( - self, - param: _BaseParameter, - array: Union[Sequence[float], np.ndarray], - delay: float = 0, - post_actions: ActionsT = (), - ): - self._param = param - self._array = np.array(array) - self._delay = delay - self._post_actions = post_actions - - def get_setpoints(self) -> np.ndarray: - return self._array - - @property - def param(self) -> _BaseParameter: - return self._param - - @property - def delay(self) -> float: - return self._delay - - @property - def num_points(self) -> int: - return len(self._array) - - @property - def post_actions(self) -> ActionsT: - return self._post_actions - - -def dond( - *params: Union[AbstractSweep, Union[ParamMeasT, Sequence[ParamMeasT]]], - write_period: Optional[float] = None, - measurement_name: str = "", - exp: Optional[Experiment] = None, - enter_actions: ActionsT = (), - exit_actions: ActionsT = (), - do_plot: Optional[bool] = None, - show_progress: Optional[bool] = None, - use_threads: Optional[bool] = None, - additional_setpoints: Sequence[_BaseParameter] = tuple(), - log_info: Optional[str] = None, - break_condition: Optional[BreakConditionT] = None, -) -> Union[AxesTupleListWithDataSet, MultiAxesTupleListWithDataSet]: - """ - Perform n-dimentional scan from slowest (first) to the fastest (last), to - measure m measurement parameters. The dimensions should be specified - as sweep objects, and after them the parameters to measure should be passed. - - Args: - *params: Instances of n sweep classes and m measurement parameters, - e.g. if linear sweep is considered: - - .. code-block:: - - LinSweep(param_set_1, start_1, stop_1, num_points_1, delay_1), ..., - LinSweep(param_set_n, start_n, stop_n, num_points_n, delay_n), - param_meas_1, param_meas_2, ..., param_meas_m - - If multiple DataSets creation is needed, measurement parameters should - be grouped, so one dataset will be created for each group. e.g.: - - .. code-block:: - - LinSweep(param_set_1, start_1, stop_1, num_points_1, delay_1), ..., - LinSweep(param_set_n, start_n, stop_n, num_points_n, delay_n), - [param_meas_1, param_meas_2], ..., [param_meas_m] - - write_period: The time after which the data is actually written to the - database. - measurement_name: Name of the measurement. This will be passed down to - the dataset produced by the measurement. If not given, a default - value of 'results' is used for the dataset. - exp: The experiment to use for this measurement. - enter_actions: A list of functions taking no arguments that will be - called before the measurements start. - exit_actions: A list of functions taking no arguments that will be - called after the measurements ends. - do_plot: should png and pdf versions of the images be saved and plots - are shown after the run. If None the setting will be read from - ``qcodesrc.json`` - show_progress: should a progress bar be displayed during the - measurement. If None the setting will be read from ``qcodesrc.json` - use_threads: If True, measurements from each instrument will be done on - separate threads. If you are measuring from several instruments - this may give a significant speedup. - additional_setpoints: A list of setpoint parameters to be registered in - the measurement but not scanned/swept-over. - log_info: Message that is logged during the measurement. If None a default - message is used. - break_condition: Callable that takes no arguments. If returned True, - measurement is interrupted. - - Returns: - A tuple of QCoDeS DataSet, Matplotlib axis, Matplotlib colorbar. If - more than one group of measurement parameters is supplied, the output - will be a tuple of tuple(QCoDeS DataSet), tuple(Matplotlib axis), - tuple(Matplotlib colorbar), in which each element of each sub-tuple - belongs to one group, and the order of elements is the order of - the supplied groups. - """ - if do_plot is None: - do_plot = config.dataset.dond_plot - if show_progress is None: - show_progress = config.dataset.dond_show_progress - - sweep_instances, params_meas = _parse_dond_arguments(*params) - nested_setpoints = _make_nested_setpoints(sweep_instances) - - all_setpoint_params = tuple(sweep.param for sweep in sweep_instances) + tuple( - s for s in additional_setpoints - ) - - ( - all_meas_parameters, - grouped_parameters, - measured_parameters, - ) = _extract_paramters_by_type_and_group(measurement_name, params_meas) - - try: - loop_shape = tuple(sweep.num_points for sweep in sweep_instances) + tuple( - 1 for _ in additional_setpoints - ) - shapes: Shapes = detect_shape_of_measurement(measured_parameters, loop_shape) - except TypeError: - LOG.exception( - f"Could not detect shape of {measured_parameters} " - f"falling back to unknown shape." - ) - shapes = None - meas_list = _create_measurements( - all_setpoint_params, - enter_actions, - exit_actions, - exp, - grouped_parameters, - shapes, - write_period, - log_info, - ) - - post_delays: List[float] = [] - params_set: List[_BaseParameter] = [] - post_actions: List[ActionsT] = [] - for sweep in sweep_instances: - post_delays.append(sweep.delay) - params_set.append(sweep.param) - post_actions.append(sweep.post_actions) - - datasets = [] - plots_axes = [] - plots_colorbar = [] - if use_threads is None: - use_threads = config.dataset.use_threads - - params_meas_caller = ( - ThreadPoolParamsCaller(*all_meas_parameters) - if use_threads - else SequentialParamsCaller(*all_meas_parameters) - ) - - try: - with _catch_interrupts() as interrupted, ExitStack() as stack, params_meas_caller as call_params_meas: - datasavers = [stack.enter_context(measure.run()) for measure in meas_list] - additional_setpoints_data = process_params_meas(additional_setpoints) - previous_setpoints = np.empty(len(sweep_instances)) - for setpoints in tqdm(nested_setpoints, disable=not show_progress): - - active_actions, delays = _select_active_actions_delays( - post_actions, post_delays, setpoints, previous_setpoints, - ) - previous_setpoints = setpoints - - param_set_list = [] - param_value_action_delay = zip( - params_set, - setpoints, - active_actions, - delays, - ) - for setpoint_param, setpoint, action, delay in param_value_action_delay: - _conditional_parameter_set(setpoint_param, setpoint) - param_set_list.append((setpoint_param, setpoint)) - for act in action: - act() - time.sleep(delay) - - meas_value_pair = call_params_meas() - for group in grouped_parameters.values(): - group["measured_params"] = [] - for measured in meas_value_pair: - if measured[0] in group["params"]: - group["measured_params"].append(measured) - for ind, datasaver in enumerate(datasavers): - datasaver.add_result( - *param_set_list, - *grouped_parameters[f"group_{ind}"]["measured_params"], - *additional_setpoints_data, - ) - - if callable(break_condition): - if break_condition(): - raise BreakConditionInterrupt("Break condition was met.") - finally: - - for datasaver in datasavers: - ds, plot_axis, plot_color = _handle_plotting( - datasaver.dataset, do_plot, interrupted() - ) - datasets.append(ds) - plots_axes.append(plot_axis) - plots_colorbar.append(plot_color) - - if len(grouped_parameters) == 1: - return datasets[0], plots_axes[0], plots_colorbar[0] - else: - return tuple(datasets), tuple(plots_axes), tuple(plots_colorbar) - - -def _parse_dond_arguments( - *params: Union[AbstractSweep, Union[ParamMeasT, Sequence[ParamMeasT]]] - ) -> Tuple[List[AbstractSweep], List[Union[ParamMeasT, Sequence[ParamMeasT]]]]: - """ - Parse supplied arguments into sweep objects and measurement parameters - and their callables. - """ - sweep_instances: List[AbstractSweep] = [] - params_meas: List[Union[ParamMeasT, Sequence[ParamMeasT]]] = [] - for par in params: - if isinstance(par, AbstractSweep): - sweep_instances.append(par) - else: - params_meas.append(par) - return sweep_instances, params_meas - - -def _conditional_parameter_set( - parameter: _BaseParameter, value: Union[float, complex], - ) -> None: - """ - Reads the cache value of the given parameter and set the parameter to - the given value if the value is different from the cache value. - """ - if value != parameter.cache.get(): - parameter.set(value) - - -def _make_nested_setpoints(sweeps: List[AbstractSweep]) -> np.ndarray: - """Create the cartesian product of all the setpoint values.""" - if len(sweeps) == 0: - return np.array([[]]) # 0d sweep (do0d) - setpoint_values = [sweep.get_setpoints() for sweep in sweeps] - setpoint_grids = np.meshgrid(*setpoint_values, indexing="ij") - flat_setpoint_grids = [np.ravel(grid, order="C") for grid in setpoint_grids] - return np.vstack(flat_setpoint_grids).T - - -def _select_active_actions_delays( - actions: Sequence[ActionsT], - delays: Sequence[float], - setpoints: np.ndarray, - previous_setpoints: np.ndarray, -) -> Tuple[List[ActionsT], List[float]]: - """ - Select ActionT (Sequence[Callable]) and delays(Sequence[float]) from - a Sequence of ActionsT and delays, respectively, if the corresponding - setpoint has changed. Otherwise, select an empty Sequence for actions - and zero for delays. - """ - actions_list: List[ActionsT] = [()] * len(setpoints) - setpoints_delay: List[float] = [0] * len(setpoints) - for ind, (new_setpoint, old_setpoint) in enumerate( - zip(setpoints, previous_setpoints) - ): - if new_setpoint != old_setpoint: - actions_list[ind] = actions[ind] - setpoints_delay[ind] = delays[ind] - return (actions_list, setpoints_delay) - - -def _create_measurements( - all_setpoint_params: Sequence[_BaseParameter], - enter_actions: ActionsT, - exit_actions: ActionsT, - exp: Optional[Experiment], - grouped_parameters: Dict[str, ParameterGroup], - shapes: Shapes, - write_period: Optional[float], - log_info: Optional[str], -) -> Tuple[Measurement, ...]: - meas_list: List[Measurement] = [] - if log_info is not None: - _extra_log_info = log_info - else: - _extra_log_info = "Using 'qcodes.utils.dataset.doNd.dond'" - for group in grouped_parameters.values(): - meas_name = group["meas_name"] - meas_params = group["params"] - meas = Measurement(name=meas_name, exp=exp) - meas._extra_log_info = _extra_log_info - _register_parameters(meas, all_setpoint_params) - _register_parameters( - meas, meas_params, setpoints=all_setpoint_params, shapes=shapes - ) - _set_write_period(meas, write_period) - _register_actions(meas, enter_actions, exit_actions) - meas_list.append(meas) - return tuple(meas_list) - - -def _extract_paramters_by_type_and_group( - measurement_name: str, - params_meas: Sequence[Union[ParamMeasT, Sequence[ParamMeasT]]], -) -> Tuple[ - Tuple[ParamMeasT, ...], Dict[str, ParameterGroup], Tuple[_BaseParameter, ...] -]: - measured_parameters: List[_BaseParameter] = [] - all_meas_parameters: List[ParamMeasT] = [] - single_group: List[ParamMeasT] = [] - multi_group: List[Sequence[ParamMeasT]] = [] - grouped_parameters: Dict[str, ParameterGroup] = {} - for param in params_meas: - if not isinstance(param, Sequence): - single_group.append(param) - all_meas_parameters.append(param) - if isinstance(param, _BaseParameter): - measured_parameters.append(param) - elif not isinstance(param, str): - multi_group.append(param) - for nested_param in param: - all_meas_parameters.append(nested_param) - if isinstance(nested_param, _BaseParameter): - measured_parameters.append(nested_param) - if single_group: - pg: ParameterGroup = { - "params": tuple(single_group), - "meas_name": measurement_name, - "measured_params": [], - } - grouped_parameters["group_0"] = pg - if multi_group: - for index, par in enumerate(multi_group): - pg = { - "params": tuple(par), - "meas_name": measurement_name, - "measured_params": [], - } - grouped_parameters[f"group_{index}"] = pg - return tuple(all_meas_parameters), grouped_parameters, tuple(measured_parameters) - - -def _handle_plotting( - data: DataSetProtocol, - do_plot: bool = True, - interrupted: MeasInterruptT = None, -) -> AxesTupleListWithDataSet: - """ - Save the plots created by datasaver as pdf and png - - Args: - datasaver: a measurement datasaver that contains a dataset to be saved - as plot. - :param do_plot: - - """ - if do_plot: - res = plot(data) - else: - res = data, [None], [None] - - if interrupted: - raise interrupted - - return res - - -def plot( - data: DataSetProtocol, save_pdf: bool = True, save_png: bool = True -) -> Tuple[ - DataSetProtocol, - List[matplotlib.axes.Axes], - List[Optional[matplotlib.colorbar.Colorbar]], -]: - """ - The utility function to plot results and save the figures either in pdf or - png or both formats. - - Args: - data: The QCoDeS dataset to be plotted. - save_pdf: Save figure in pdf format. - save_png: Save figure in png format. - """ - dataid = data.captured_run_id - axes, cbs = plot_dataset(data) - mainfolder = config.user.mainfolder - experiment_name = data.exp_name - sample_name = data.sample_name - storage_dir = os.path.join(mainfolder, experiment_name, sample_name) - os.makedirs(storage_dir, exist_ok=True) - png_dir = os.path.join(storage_dir, "png") - pdf_dif = os.path.join(storage_dir, "pdf") - os.makedirs(png_dir, exist_ok=True) - os.makedirs(pdf_dif, exist_ok=True) - for i, ax in enumerate(axes): - if save_pdf: - full_path = os.path.join(pdf_dif, f"{dataid}_{i}.pdf") - ax.figure.savefig(full_path, dpi=500) - if save_png: - full_path = os.path.join(png_dir, f"{dataid}_{i}.png") - ax.figure.savefig(full_path, dpi=500) - res = data, axes, cbs - return res diff --git a/qcodes/utils/installation.py b/qcodes/utils/installation.py index 6b23cd2661e..5d35b7765d1 100644 --- a/qcodes/utils/installation.py +++ b/qcodes/utils/installation.py @@ -1,54 +1,11 @@ -"""This module contains helper scripts to make certain installation tasks -easier.""" +import warnings -import os -import sys -import json -from qcodes.station import SCHEMA_PATH, STATION_YAML_EXT, update_config_schema +from qcodes.extensions.installation import register_station_schema_with_vscode +# todo enable warning once new api is in release +# warnings.warn( +# "qcodes.utils.installation module is deprecated. " +# "Please update to import from qcodes.extensions" +# ) -def register_station_schema_with_vscode() -> None: - """This function registeres the qcodes station schema with vscode. - - Run this function to add the user generated station schema to the list of - associated schemas for the Red Hat YAML schema extension for vscode. - (https://marketplace.visualstudio.com/items?itemName=redhat.vscode-yaml) - - This function will effectively add an entry to `yaml.schemas` in the user - config file of vscode, which is located under - `%APPDATA/Code/User/settings.json`, or will be created there. - - You can alternatively access this - setting via File->Preferences->Settings and search for `yaml.schemas`. - - To enable autocompletinon of QCoDeS instrument from additional packages run - `qcodes.station.update_config_schema`. - - For more information consult `qcodes/docs/examples/Station.ipynb`. - """ - if sys.platform != 'win32': - raise RuntimeError( - 'This script is only supported on Windows platforms.\n ' - 'Please consult docstring for more information.') - if not os.path.exists(SCHEMA_PATH): - update_config_schema() - - config_path = os.path.expandvars( - os.path.join('%APPDATA%', 'Code', 'User', 'settings.json')) - config_backup_path = config_path + '_backup' - - if not os.path.exists(config_path): - raise RuntimeError( - 'Could not find the user settings file of vscode. \n' - 'Please refer to the station.ipynb notebook to learn how to ' - 'set the settings manually.') - with open(config_path, 'r+') as f: - data = json.load(f) - data.setdefault( - 'yaml.schemas', {} - )[r'file:\\' + os.path.splitdrive(SCHEMA_PATH)[1]] = STATION_YAML_EXT - - os.replace(config_path, config_backup_path) - - with open(config_path, 'w') as f: - json.dump(data, f, indent=4) +__all__ = ["register_station_schema_with_vscode"] diff --git a/qcodes/utils/plotting.py b/qcodes/utils/plotting.py index 311c8f79e8b..b246d247c2a 100644 --- a/qcodes/utils/plotting.py +++ b/qcodes/utils/plotting.py @@ -6,13 +6,13 @@ """ import copy import logging -from typing import Tuple, Union, Optional, Any, cast, Set, Dict from collections import OrderedDict -import numpy as np +from typing import Any, Dict, Optional, Set, Tuple, Union, cast + import matplotlib -import matplotlib.colorbar import matplotlib.collections -import qcodes +import matplotlib.colorbar +import numpy as np log = logging.getLogger(__name__) @@ -253,6 +253,7 @@ def auto_color_scale_from_config(colorbar: matplotlib.colorbar.Colorbar, by the lower limit. Default value is read from ``config.plotting.auto_color_scale.color_under``. """ + import qcodes if colorbar is None: log.warning('"auto_color_scale_from_config" did not receive a colorbar ' 'for scaling. Are you trying to scale a plot without ' diff --git a/qcodes/utils/slack.py b/qcodes/utils/slack.py index 4a93d79d729..1c3b371cbeb 100644 --- a/qcodes/utils/slack.py +++ b/qcodes/utils/slack.py @@ -1,468 +1,11 @@ -""" -Slack bot is used to send information about qcodes via Slack IMs. -Some default commands are provided, and custom commands/tasks can be -attached (see below). - -To setup the Slack bot, a bot first has to be created via Slack -by clicking 'Create New App' on https://api.slack.com/apps. -Once created, the bot will have a name and unique token. -These and other settings have to be saved in a config dict (see init( or -Parameters) in :class:`Slack`). - -The App containing your bot needs to have the following bot token scopes to -perform all methods successfully: -- channels:history -- channels:read -- chat:write -- files:write -- users:read -These can be set after clicking OAuth & Permissions in the left menubar after -selecting your bot at https://api.slack.com/apps (or during creation). - -Communication with the Slack bot is performed via instant messaging. -When an IM is sent to the Slack bot, it will be processed during the next -`update()` call (provided the username is registered in the config). -Standard commands provided to the Slack bot are: - -- plot: Upload latest qcodes plot. -- msmt/measurement: Print information about latest measurement. -- notify finished: Send message once measurement is finished. - -Custom commands can be added as (cmd, func) key-value pairs to -`self.commands`. When `cmd` is sent to the bot, `func` is evaluated. - -Custom tasks can be added as well. These are functions that are performed -every time an update is called. The function must return a boolean that -indicates if the task should be removed from the list of tasks. -A custom task can be added as a (cmd, func) key-value pair to -`self.task_commands`. -They can then be called through Slack IM via: - -``notify/task {cmd} *args:`` register task with name `cmd` that is -performed every time `update()` is called. -""" - -import inspect -import logging -import os -import tempfile -import threading -import traceback import warnings -from functools import partial -from time import sleep - -from requests.exceptions import ConnectTimeout, HTTPError, ReadTimeout -from requests.packages.urllib3.exceptions import ReadTimeoutError -from slack_sdk import WebClient - -from qcodes import config as qc_config -from qcodes.instrument.parameter import _BaseParameter -from qcodes.loops import active_data_set, active_loop -from qcodes.plots.base import BasePlot - - -class SlackTimeoutWarning(UserWarning): - pass - - -def convert_command(text): - def try_convert_str(string): - try: - val = int(string) - return val - except ValueError: - pass - try: - val = float(string) - return val - except ValueError: - pass - - return string - - # Format text to lowercase, and remove trailing whitespaces - text = text.lower().rstrip(' ') - command, *args_str = text.split(' ') - - # Convert string args to floats/kwargs - args = [] - kwargs = {} - for arg in args_str: - if '=' in arg: - # arg is a kwarg - key, val = arg.split('=') - # Try to convert into a float - val = try_convert_str(val) - kwargs[key] = val - else: - # arg is not a kwarg - # Try to convert into a float - val = try_convert_str(arg) - args.append(val) - return command, args, kwargs - - -class Slack(threading.Thread): - - def __init__(self, interval=3, config=None, auto_start=True, **commands): - """ - Initializes Slack bot, including auto-updating widget if in notebook - and using multiprocessing. - - Args: - interval (int): Update interval for widget (must be over 1s). - config (Optional[dict]): Config dict - If not given, uses qc.config['user']['slack'] - The config dict must contain the following keys: - - - 'bot_name': Name of the bot - - 'bot_token': Token from bot (obtained from slack website) - - 'names': Usernames to periodically check for IM messages - - auto_start (bool): Defaults to True. - - """ - if config is not None: - self.config = config - else: - self.config = qc_config.user.slack - - self.slack = WebClient(token=self.config['token']) - self.users = self.get_users(self.config['names']) - self.get_im_ids(self.users) - - self.commands = {'plot': self.upload_latest_plot, - 'msmt': self.print_measurement_information, - 'measurement': self.print_measurement_information, - 'notify': self.add_task, - 'help': self.help_message, - 'task': self.add_task, - **commands} - self.task_commands = {'finished': self.check_msmt_finished} - - self.interval = interval - self.tasks = [] - - # Flag that exits loop when set to True (called via self.exit()) - self._exit = False - - # Flag that enables actions to be performed in the event loop - # Enabled via self.start(), disabled via self.stop() - self._is_active = False - - # Call Thread init - super().__init__() - - if auto_start: - self.start() - - def start(self): - self._is_active = True - try: - # Start thread, can only be called once - super().start() - except RuntimeError: - # Thread already started, ignoring - pass - - def run(self): - """ - Thread event loop that periodically checks for updates. - Can be stopped via :meth:`stop` , after which the Thread is stopped. - Returns: - None. - """ - while not self._exit: - # Continue event loop - if self._is_active: - # check for updates - self.update() - sleep(self.interval) - - def stop(self): - """ - Stop checking for updates. Can be started again via :meth:`start`. - Returns: - None. - """ - self._is_active = False - - def exit(self): - """ - Exit event loop, stop Thread. - Returns: - None - """ - self._stop = True - - def user_from_id(self, user_id): - """ - Retrieve user from user id. - Args: - user_id: Id from which to retrieve user information. - - Returns: - dict: User information. - """ - return self.slack.users_info(user=user_id)['user'] - - def get_users(self, usernames): - """ - Extracts user information for users. - Args: - usernames: Slack usernames of users. - - Returns: - dict: {username: user} - """ - users = {} - response = self.slack.users_list() - for member in response['members']: - if member['name'] in usernames: - users[member['name']] = member - if len(users) != len(usernames): - remaining_names = [name for name in usernames if name not in users] - raise RuntimeError( - f'Could not find names {remaining_names}') - return users - - def get_im_ids(self, users): - """ - Adds IM ids of users to users dict. - Also adds `last_ts` to the latest IM message - Args: - users (dict): {username: user} - - Returns: - None. - """ - response = self.slack.conversations_list(types='im') - user_ids = {username: user['id'] for username, user in users.items()} - im_ids = {chan['user']: chan['id'] for chan in response['channels']} - for username, user_id in user_ids.items(): - if user_id in im_ids.keys(): - users[username]['im_id'] = im_ids[user_id] - # update last ts - messages = self.get_im_messages(username=username, limit=1) - if messages: - users[username]['last_ts'] = float(messages[0]['ts']) - else: - users[username]['last_ts'] = None - - def get_im_messages(self, username, **kwargs): - """ - Retrieves IM messages from username. - Args: - username: Name of user. - **kwargs: Additional kwargs for retrieving IM messages. - - Returns: - List of IM messages. - """ - # provide backward compatibility with 'count' keyword. It still works, - # but is undocumented. 'count' likely does the same as 'limit', but - # 'limit' takes precedence - if 'limit' not in kwargs.keys(): - kwargs['limit'] = kwargs.pop('count', None) - - channel = self.users[username].get('im_id', None) - if channel is None: - return [] - else: - response = self.slack.conversations_history(channel=channel, - **kwargs) - return response['messages'] - - def get_new_im_messages(self): - """ - Retrieves new IM messages for each user in self.users. - Updates user['last_ts'] to ts of newest message. - Returns: - im_messages (Dict): {username: [messages list]} newer than last_ts. - """ - im_messages = {} - for username, user in self.users.items(): - last_ts = user.get('last_ts', None) - new_messages = self.get_im_messages(username=username, - oldest=last_ts) - # Kwarg 'oldest' sometimes also returns message with ts==last_ts - new_messages = [m for m in new_messages if - float(m['ts']) != last_ts] - im_messages[username] = new_messages - if new_messages: - self.users[username]['last_ts'] = float(new_messages[0]['ts']) - return im_messages - - def update(self): - """ - Performs tasks, and checks for new messages. - Periodically called from widget update. - Returns: - None. - """ - new_tasks = [] - for task in self.tasks: - task_finished = task() - if not task_finished: - new_tasks.append(task) - self.tasks = new_tasks - - new_messages = {} - try: - new_messages = self.get_new_im_messages() - except (ReadTimeout, HTTPError, ConnectTimeout, ReadTimeoutError) as e: - # catch any timeouts caused by network delays - warnings.warn('error retrieving slack messages', - SlackTimeoutWarning) - logging.info(e) - self.handle_messages(new_messages) - - def help_message(self): - """Return simple help message""" - cc = ", ".join("`" + str(k) + "`" for k in self.commands.keys()) - return "\nAvailable commands: %s" % cc - - def handle_messages(self, messages): - """ - Performs commands depending on messages. - This includes adding tasks to be performed during each update. - """ - for user, user_messages in messages.items(): - for message in user_messages: - if message.get('user', None) != self.users[user]['id']: - # Filter out bot messages - continue - channel = self.users[user]['im_id'] - # Extract command (first word) and possible args - command, args, kwargs = convert_command(message['text']) - if command in self.commands: - msg = f'Executing {command}' - if args: - msg += f' {args}' - if kwargs: - msg += f' {kwargs}' - self.slack.chat_postMessage(text=msg, channel=channel) - - func = self.commands[command] - try: - if isinstance(func, _BaseParameter): - results = func(*args, **kwargs) - else: - # Only add channel and Slack if they are explicit - # kwargs - func_sig = inspect.signature(func) - if 'channel' in func_sig.parameters: - kwargs['channel'] = channel - if 'slack' in func_sig.parameters: - kwargs['slack'] = self - results = func(*args, **kwargs) - - if results is not None: - self.slack.chat_postMessage( - text=f'Results: {results}', - channel=channel) - - except Exception: - self.slack.chat_postMessage( - text=f'Error: {traceback.format_exc()}', - channel=channel) - else: - self.slack.chat_postMessage( - text='Command {} not understood. Try `help`'.format( - command), - channel=channel) - - def add_task(self, command, *args, channel, **kwargs): - """ - Add a task to self.tasks, which will be executed during each update - Args: - command: Task command. - *args: Additional args for command. - channel: Slack channel (can also be IM channel). - **kwargs: Additional kwargs for particular. - - Returns: - None. - """ - if command in self.task_commands: - self.slack.chat_postMessage( - text=f'Added task "{command}"', - channel=channel) - func = self.task_commands[command] - self.tasks.append(partial(func, *args, channel=channel, **kwargs)) - else: - self.slack.chat_postMessage( - text=f'Task command {command} not understood', - channel=channel) - - def upload_latest_plot(self, channel, **kwargs): - """ - Uploads latest plot (if any) to slack channel. - The latest plot is retrieved from - :class:`qcodes.plots.base.BasePlot`, which is updated - every time a new qcodes plot is instantiated. - Args: - channel: Slack channel (can also be IM channel). - **kwargs: Not used. - - Returns: - None. - """ - # Create temporary filename - temp_filename = tempfile.mktemp(suffix='.jpg') - # Retrieve latest plot - latest_plot = BasePlot.latest_plot - if latest_plot is not None: - # Saves latest plot to filename - latest_plot.save(filename=temp_filename) - # Upload plot to slack - self.slack.files_upload(file=temp_filename, channels=channel) - os.remove(temp_filename) - else: - self.slack.chat_postMessage(text='No latest plot', - channel=channel) - - def print_measurement_information(self, channel, **kwargs): - """ - Prints information about the current measurement. - Information printed is percentage complete, and dataset representation. - Dataset is retrieved from DataSet.latest_dataset, which updates itself - every time a new dataset is created - Args: - channel: Slack channel (can also be IM channel). - **kwargs: Not used. - Returns: - None. - """ - dataset = active_data_set() - if dataset is not None: - self.slack.chat_postMessage( - text='Measurement is {:.0f}% complete'.format( - 100 * dataset.fraction_complete()), - channel=channel) - self.slack.chat_postMessage( - text=repr(dataset), channel=channel) - else: - self.slack.chat_postMessage( - text='No latest dataset found', - channel=channel) +from qcodes.extensions.slack import Slack, SlackTimeoutWarning, convert_command - def check_msmt_finished(self, channel, **kwargs): - """ - Checks if the latest measurement is completed. - Args: - channel: Slack channel (can also be IM channel). - **kwargs: Not used. +# todo enable warning once new api is in release +# warnings.warn( +# "qcodes.utils.slack module is deprecated. " +# "Please update to import from qcodes.extensions" +# ) - Returns: - bool: True if measurement is finished, False otherwise. - """ - if active_loop() is None: - self.slack.chat_postMessage( - text='Measurement complete', - channel=channel) - return True - else: - return False +__all__ = ["Slack", "SlackTimeoutWarning", "convert_command"] diff --git a/qcodes/utils/threading.py b/qcodes/utils/threading.py index 4b9ae9ec999..4451897af4b 100644 --- a/qcodes/utils/threading.py +++ b/qcodes/utils/threading.py @@ -11,6 +11,7 @@ from functools import partial from types import TracebackType from typing import ( + TYPE_CHECKING, Any, Callable, Dict, @@ -25,13 +26,12 @@ from typing_extensions import Protocol -from qcodes import config -from qcodes.dataset.measurements import res_type -from qcodes.instrument.parameter import ParamDataType, _BaseParameter +if TYPE_CHECKING: + from qcodes.dataset.measurements import res_type + from qcodes.instrument.parameter import ParamDataType, _BaseParameter -ParamMeasT = Union[_BaseParameter, Callable[[], None]] - -OutType = List[res_type] +ParamMeasT = Union["_BaseParameter", Callable[[], None]] +OutType = List["res_type"] T = TypeVar("T") @@ -121,11 +121,11 @@ def thread_map( class _ParamCaller: - def __init__(self, *parameters: _BaseParameter): + def __init__(self, *parameters: "_BaseParameter"): self._parameters = parameters - def __call__(self) -> Tuple[Tuple[_BaseParameter, ParamDataType], ...]: + def __call__(self) -> Tuple[Tuple["_BaseParameter", "ParamDataType"], ...]: output = [] for param in self._parameters: output.append((param, param.get())) @@ -138,12 +138,12 @@ def __repr__(self) -> str: def _instrument_to_param( params: Sequence[ParamMeasT] -) -> Dict[Optional[str], Tuple[_BaseParameter, ...]]: - +) -> Dict[Optional[str], Tuple["_BaseParameter", ...]]: + from qcodes.instrument.parameter import _BaseParameter real_parameters = [param for param in params if isinstance(param, _BaseParameter)] - output: Dict[Optional[str], Tuple[_BaseParameter, ...]] = defaultdict(tuple) + output: Dict[Optional[str], Tuple["_BaseParameter", ...]] = defaultdict(tuple) for param in real_parameters: if param.underlying_instrument: output[param.underlying_instrument.full_name] += (param,) @@ -185,7 +185,7 @@ def call_params_threaded(param_meas: Sequence[ParamMeasT]) -> OutType: def _call_params(param_meas: Sequence[ParamMeasT]) -> OutType: - + from qcodes.instrument.parameter import _BaseParameter output: OutType = [] for parameter in param_meas: @@ -201,7 +201,7 @@ def process_params_meas( param_meas: Sequence[ParamMeasT], use_threads: Optional[bool] = None ) -> OutType: - + from qcodes import config if use_threads is None: use_threads = config.dataset.use_threads