From a944e97221a35244963e1cbb88cec496dc4ba531 Mon Sep 17 00:00:00 2001 From: "Jens H. Nielsen" Date: Mon, 6 Nov 2023 11:07:15 +0100 Subject: [PATCH 1/9] move external fixture out of src --- .gitmodules | 2 +- src/qcodes/tests/dataset/fixtures/db_files => db_files | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/qcodes/tests/dataset/fixtures/db_files => db_files (100%) diff --git a/.gitmodules b/.gitmodules index 9260dea4a4e..8f672b56124 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,5 +1,5 @@ [submodule "qcodes/tests/dataset/fixtures/db_files"] - path = src/qcodes/tests/dataset/fixtures/db_files + path = db_files url = https://github.com/QCoDeS/qcodes_db_fixtures.git branch = main [submodule "typings"] diff --git a/src/qcodes/tests/dataset/fixtures/db_files b/db_files similarity index 100% rename from src/qcodes/tests/dataset/fixtures/db_files rename to db_files From ca350ead58b934ba32face49967be085fa7346fc Mon Sep 17 00:00:00 2001 From: "Jens H. Nielsen" Date: Mon, 6 Nov 2023 11:17:22 +0100 Subject: [PATCH 2/9] Move tests outside of src package --- .../helpers/test_compare_dictionaries.py | 85 --- tests/__init__.py | 0 tests/common.py | 162 ++++ tests/conftest.py | 188 +++++ tests/dataset/__init__.py | 3 + tests/dataset/conftest.py | 708 ++++++++++++++++++ tests/dataset/dond/__init__.py | 0 tests/dataset/dond/conftest.py | 92 +++ .../tests => tests}/dataset/dond/test_do0d.py | 0 .../tests => tests}/dataset/dond/test_do1d.py | 0 .../tests => tests}/dataset/dond/test_do2d.py | 0 .../tests => tests}/dataset/dond/test_doNd.py | 0 tests/dataset/fixtures/.gitignore | 2 + tests/dataset/fixtures/__init__.py | 0 .../fixtures/data_2018_01_17/__init__.py | 0 .../data_001_testsweep_15_42_57/__init__.py | 0 .../dac_ch1_set.dat | 204 +++++ .../data_001_testsweep_15_42_57/snapshot.json | 230 ++++++ .../data_002_2D_test_15_43_14/__init__.py | 0 .../dac_ch1_set_dac_ch2_set.dat | 44 ++ .../data_002_2D_test_15_43_14/snapshot.json | 286 +++++++ tests/dataset/helper_functions.py | 131 ++++ tests/dataset/measurement/__init__.py | 0 .../measurement/test_load_legacy_data.py | 0 .../test_measurement_context_manager.py | 0 .../dataset/measurement/test_register_name.py | 0 .../dataset/measurement/test_shapes.py | 0 .../dataset/test__get_data_from_ds.py | 0 .../dataset/test_concurrent_datasets.py | 0 .../dataset/test_converters.py | 0 .../dataset/test_data_set_cache.py | 0 .../test_database_creation_and_upgrading.py | 0 .../dataset/test_database_extract_runs.py | 0 .../tests => tests}/dataset/test_datasaver.py | 0 .../dataset/test_dataset_basic.py | 0 .../dataset/test_dataset_export.py | 0 .../dataset/test_dataset_in_mem_import.py | 0 .../dataset/test_dataset_in_memory.py | 0 .../dataset/test_dataset_in_memory_bacis.py | 0 .../dataset/test_dataset_loading.py | 0 .../dataset/test_dependencies.py | 0 .../dataset/test_descriptions.py | 0 .../dataset/test_detect_shape.py | 0 .../dataset/test_experiment_container.py | 0 .../dataset/test_export_info.py | 0 .../dataset/test_fix_functions.py | 0 .../dataset/test_guid_helpers.py | 0 .../tests => tests}/dataset/test_guids.py | 0 .../tests => tests}/dataset/test_links.py | 0 .../dataset/test_measurement_extensions.py | 0 .../tests => tests}/dataset/test_metadata.py | 0 .../dataset/test_nested_measurements.py | 0 .../tests => tests}/dataset/test_paramspec.py | 0 .../tests => tests}/dataset/test_plotting.py | 0 .../tests => tests}/dataset/test_snapshot.py | 0 .../dataset/test_sqlite_base.py | 0 .../dataset/test_sqlite_connection.py | 0 .../dataset/test_sqlitesettings.py | 0 .../dataset/test_string_data.py | 0 .../dataset/test_subscribing.py | 0 tests/dataset_generators.py | 33 + tests/delegate/__init__.py | 0 tests/delegate/conftest.py | 56 ++ tests/delegate/data/__init__.py | 0 tests/delegate/data/chip.yml | 77 ++ tests/delegate/data/chip_typo.yml | 42 ++ .../delegate/test_delegate_instrument.py | 0 .../tests => tests}/delegate/test_device.py | 0 tests/driver_test_case.py | 51 ++ tests/drivers/AlazarTech/__init__.py | 1 + .../drivers/AlazarTech/test_alazar_api.py | 0 .../drivers/AlazarTech/test_alazar_buffer.py | 0 .../drivers/AlazarTech/test_dll_wrapper.py | 0 tests/drivers/__init__.py | 1 + tests/drivers/auxiliary_files/__init__.py | 1 + .../auxiliary_files/awgSeqDataSets.xsd | 147 ++++ tests/drivers/keysight_b1500/__init__.py | 0 .../b1500_driver_tests/__init__.py | 0 .../b1500_driver_tests/conftest.py | 42 ++ .../b1500_driver_tests/test_b1500.py | 0 .../b1500_driver_tests/test_b1500_module.py | 0 .../b1500_driver_tests/test_b1511b_smu.py | 0 .../b1500_driver_tests/test_b1517a_smu.py | 0 .../b1500_driver_tests/test_b1520a_cmu.py | 0 .../test_sampling_measurement.py | 0 .../keysight_b1500/test_MessageBuilder.py | 0 .../keysight_b1500/test_commandList.py | 0 .../drivers/test_Agilent_E8257D.py | 0 .../drivers/test_AimTTi_PL601P.py | 0 .../drivers/test_Keithley_2450.py | 0 .../drivers/test_Keysight_33XXX.py | 0 .../drivers/test_Keysight_N6705B.py | 0 .../drivers/test_MercuryiPS.py | 0 .../drivers/test_Rigol_DS1074Z.py | 0 .../tests => tests}/drivers/test_ami430.py | 0 .../drivers/test_ami430_visa.py | 0 .../drivers/test_keithley_26xx.py | 0 .../drivers/test_keithley_3706A.py | 0 .../drivers/test_keithley_7510.py | 0 .../drivers/test_keithley_s46.py | 0 .../drivers/test_keysight_34465a.py | 0 .../drivers/test_keysight_34934a.py | 0 .../drivers/test_keysight_34980a.py | 0 .../drivers/test_keysight_b220x.py | 0 .../drivers/test_keysight_e4980a.py | 0 .../drivers/test_keysight_n9030b.py | 0 .../tests => tests}/drivers/test_lakeshore.py | 0 .../drivers/test_lakeshore_325.py | 0 .../drivers/test_lakeshore_325_legacy.py | 0 .../drivers/test_lakeshore_335.py | 0 .../drivers/test_lakeshore_336.py | 0 .../drivers/test_lakeshore_336_legacy.py | 0 .../drivers/test_lakeshore_file_parser.py | 0 .../tests => tests}/drivers/test_rto_1000.py | 0 .../tests => tests}/drivers/test_stahl.py | 0 .../drivers/test_tektronix_AWG5014C.py | 0 .../drivers/test_tektronix_AWG5208.py | 0 .../drivers/test_tektronix_AWG70000A.py | 0 .../drivers/test_tektronix_dpo7200xx.py | 0 .../tests => tests}/drivers/test_weinchel.py | 0 .../drivers/test_yokogawa_gs200.py | 0 tests/helpers/__init__.py | 0 .../helpers/test_delegate_attribues.py | 0 .../helpers/test_json_encoder.py | 0 .../helpers/test_strip_attrs.py | 0 tests/instrument_mocks.py | 52 ++ tests/mockers/__init__.py | 0 .../mockers/test_simulated_ats_api.py | 0 tests/parameter/__init__.py | 0 tests/parameter/conftest.py | 208 +++++ .../parameter/test_array_parameter.py | 0 .../parameter/test_combined_par.py | 0 .../parameter/test_delegate_parameter.py | 0 .../parameter/test_elapsed_time_parameter.py | 0 .../parameter/test_function.py | 0 .../parameter/test_get_latest.py | 0 .../parameter/test_get_set_parser.py | 0 .../parameter/test_get_set_wrapping.py | 0 .../parameter/test_group_parameter.py | 0 .../test_instrument_ref_parameter.py | 0 .../parameter/test_issequence.py | 0 .../parameter/test_issequenceof.py | 0 .../parameter/test_make_sweep.py | 0 .../parameter/test_multi_parameter.py | 0 .../parameter/test_non_gettable_parameter.py | 0 .../parameter/test_on_off_mapping.py | 0 .../parameter/test_parameter_basics.py | 0 .../parameter/test_parameter_cache.py | 0 .../test_parameter_context_manager.py | 0 .../parameter/test_parameter_ramp.py | 0 .../parameter/test_parameter_registration.py | 0 .../parameter/test_parameter_scale_offset.py | 0 .../parameter/test_parameter_validation.py | 0 .../test_parameter_with_setpoints.py | 0 .../parameter/test_permissive_range.py | 0 .../parameter/test_scaled_parameter.py | 0 .../parameter/test_snapshot.py | 0 .../parameter/test_val_mapping.py | 0 .../parameter/test_validators.py | 0 tests/sphinx_extension/__init__.py | 0 .../test_parse_parameter_attr.py | 0 .../test_abstract_instrument.py | 0 .../test_autoloadable_channels.py | 0 {src/qcodes/tests => tests}/test_channels.py | 0 {src/qcodes/tests => tests}/test_command.py | 0 {src/qcodes/tests => tests}/test_config.py | 0 {src/qcodes/tests => tests}/test_deprecate.py | 0 .../tests => tests}/test_field_vector.py | 0 .../tests => tests}/test_installation_info.py | 0 .../qcodes/tests => tests}/test_instrument.py | 0 .../test_interactive_widget.py | 0 {src/qcodes/tests => tests}/test_logger.py | 0 {src/qcodes/tests => tests}/test_metadata.py | 0 {src/qcodes/tests => tests}/test_monitor.py | 0 .../qcodes/tests => tests}/test_plot_utils.py | 0 {src/qcodes/tests => tests}/test_snapshot.py | 0 {src/qcodes/tests => tests}/test_station.py | 0 .../tests => tests}/test_sweep_values.py | 0 {src/qcodes/tests => tests}/test_testutils.py | 0 {src/qcodes/tests => tests}/test_threading.py | 0 {src/qcodes/tests => tests}/test_visa.py | 0 tests/utils/__init__.py | 0 .../test_attributes_set_to_context_manager.py | 0 .../utils/test_class_strings.py | 0 .../tests => tests}/utils/test_isfunction.py | 0 .../utils/test_partial_with_docstring.py | 0 tests/validators/__init__.py | 0 tests/validators/conftest.py | 8 + .../tests => tests}/validators/test_arrays.py | 0 .../tests => tests}/validators/test_basic.py | 0 .../tests => tests}/validators/test_bool.py | 0 .../validators/test_callable.py | 0 .../validators/test_complex.py | 0 .../tests => tests}/validators/test_dict.py | 0 .../tests => tests}/validators/test_enum.py | 0 .../tests => tests}/validators/test_ints.py | 0 .../tests => tests}/validators/test_lists.py | 0 .../validators/test_multi_type.py | 0 .../validators/test_multi_type_and.py | 0 .../validators/test_multi_type_or.py | 0 .../validators/test_multiples.py | 0 .../validators/test_numbers.py | 0 .../validators/test_permissive_ints.py | 0 .../validators/test_permissive_multiples.py | 0 .../validators/test_sequence.py | 0 .../tests => tests}/validators/test_string.py | 0 206 files changed, 2769 insertions(+), 85 deletions(-) delete mode 100644 src/qcodes/tests/helpers/test_compare_dictionaries.py create mode 100644 tests/__init__.py create mode 100644 tests/common.py create mode 100644 tests/conftest.py create mode 100644 tests/dataset/__init__.py create mode 100644 tests/dataset/conftest.py create mode 100644 tests/dataset/dond/__init__.py create mode 100644 tests/dataset/dond/conftest.py rename {src/qcodes/tests => tests}/dataset/dond/test_do0d.py (100%) rename {src/qcodes/tests => tests}/dataset/dond/test_do1d.py (100%) rename {src/qcodes/tests => tests}/dataset/dond/test_do2d.py (100%) rename {src/qcodes/tests => tests}/dataset/dond/test_doNd.py (100%) create mode 100644 tests/dataset/fixtures/.gitignore create mode 100644 tests/dataset/fixtures/__init__.py create mode 100644 tests/dataset/fixtures/data_2018_01_17/__init__.py create mode 100644 tests/dataset/fixtures/data_2018_01_17/data_001_testsweep_15_42_57/__init__.py create mode 100644 tests/dataset/fixtures/data_2018_01_17/data_001_testsweep_15_42_57/dac_ch1_set.dat create mode 100644 tests/dataset/fixtures/data_2018_01_17/data_001_testsweep_15_42_57/snapshot.json create mode 100644 tests/dataset/fixtures/data_2018_01_17/data_002_2D_test_15_43_14/__init__.py create mode 100644 tests/dataset/fixtures/data_2018_01_17/data_002_2D_test_15_43_14/dac_ch1_set_dac_ch2_set.dat create mode 100644 tests/dataset/fixtures/data_2018_01_17/data_002_2D_test_15_43_14/snapshot.json create mode 100644 tests/dataset/helper_functions.py create mode 100644 tests/dataset/measurement/__init__.py rename {src/qcodes/tests => tests}/dataset/measurement/test_load_legacy_data.py (100%) rename {src/qcodes/tests => tests}/dataset/measurement/test_measurement_context_manager.py (100%) rename {src/qcodes/tests => tests}/dataset/measurement/test_register_name.py (100%) rename {src/qcodes/tests => tests}/dataset/measurement/test_shapes.py (100%) rename {src/qcodes/tests => tests}/dataset/test__get_data_from_ds.py (100%) rename {src/qcodes/tests => tests}/dataset/test_concurrent_datasets.py (100%) rename {src/qcodes/tests => tests}/dataset/test_converters.py (100%) rename {src/qcodes/tests => tests}/dataset/test_data_set_cache.py (100%) rename {src/qcodes/tests => tests}/dataset/test_database_creation_and_upgrading.py (100%) rename {src/qcodes/tests => tests}/dataset/test_database_extract_runs.py (100%) rename {src/qcodes/tests => tests}/dataset/test_datasaver.py (100%) rename {src/qcodes/tests => tests}/dataset/test_dataset_basic.py (100%) rename {src/qcodes/tests => tests}/dataset/test_dataset_export.py (100%) rename {src/qcodes/tests => tests}/dataset/test_dataset_in_mem_import.py (100%) rename {src/qcodes/tests => tests}/dataset/test_dataset_in_memory.py (100%) rename {src/qcodes/tests => tests}/dataset/test_dataset_in_memory_bacis.py (100%) rename {src/qcodes/tests => tests}/dataset/test_dataset_loading.py (100%) rename {src/qcodes/tests => tests}/dataset/test_dependencies.py (100%) rename {src/qcodes/tests => tests}/dataset/test_descriptions.py (100%) rename {src/qcodes/tests => tests}/dataset/test_detect_shape.py (100%) rename {src/qcodes/tests => tests}/dataset/test_experiment_container.py (100%) rename {src/qcodes/tests => tests}/dataset/test_export_info.py (100%) rename {src/qcodes/tests => tests}/dataset/test_fix_functions.py (100%) rename {src/qcodes/tests => tests}/dataset/test_guid_helpers.py (100%) rename {src/qcodes/tests => tests}/dataset/test_guids.py (100%) rename {src/qcodes/tests => tests}/dataset/test_links.py (100%) rename {src/qcodes/tests => tests}/dataset/test_measurement_extensions.py (100%) rename {src/qcodes/tests => tests}/dataset/test_metadata.py (100%) rename {src/qcodes/tests => tests}/dataset/test_nested_measurements.py (100%) rename {src/qcodes/tests => tests}/dataset/test_paramspec.py (100%) rename {src/qcodes/tests => tests}/dataset/test_plotting.py (100%) rename {src/qcodes/tests => tests}/dataset/test_snapshot.py (100%) rename {src/qcodes/tests => tests}/dataset/test_sqlite_base.py (100%) rename {src/qcodes/tests => tests}/dataset/test_sqlite_connection.py (100%) rename {src/qcodes/tests => tests}/dataset/test_sqlitesettings.py (100%) rename {src/qcodes/tests => tests}/dataset/test_string_data.py (100%) rename {src/qcodes/tests => tests}/dataset/test_subscribing.py (100%) create mode 100644 tests/dataset_generators.py create mode 100644 tests/delegate/__init__.py create mode 100644 tests/delegate/conftest.py create mode 100644 tests/delegate/data/__init__.py create mode 100644 tests/delegate/data/chip.yml create mode 100644 tests/delegate/data/chip_typo.yml rename {src/qcodes/tests => tests}/delegate/test_delegate_instrument.py (100%) rename {src/qcodes/tests => tests}/delegate/test_device.py (100%) create mode 100644 tests/driver_test_case.py create mode 100644 tests/drivers/AlazarTech/__init__.py rename {src/qcodes/tests => tests}/drivers/AlazarTech/test_alazar_api.py (100%) rename {src/qcodes/tests => tests}/drivers/AlazarTech/test_alazar_buffer.py (100%) rename {src/qcodes/tests => tests}/drivers/AlazarTech/test_dll_wrapper.py (100%) create mode 100644 tests/drivers/__init__.py create mode 100644 tests/drivers/auxiliary_files/__init__.py create mode 100644 tests/drivers/auxiliary_files/awgSeqDataSets.xsd create mode 100644 tests/drivers/keysight_b1500/__init__.py create mode 100644 tests/drivers/keysight_b1500/b1500_driver_tests/__init__.py create mode 100644 tests/drivers/keysight_b1500/b1500_driver_tests/conftest.py rename {src/qcodes/tests => tests}/drivers/keysight_b1500/b1500_driver_tests/test_b1500.py (100%) rename {src/qcodes/tests => tests}/drivers/keysight_b1500/b1500_driver_tests/test_b1500_module.py (100%) rename {src/qcodes/tests => tests}/drivers/keysight_b1500/b1500_driver_tests/test_b1511b_smu.py (100%) rename {src/qcodes/tests => tests}/drivers/keysight_b1500/b1500_driver_tests/test_b1517a_smu.py (100%) rename {src/qcodes/tests => tests}/drivers/keysight_b1500/b1500_driver_tests/test_b1520a_cmu.py (100%) rename {src/qcodes/tests => tests}/drivers/keysight_b1500/b1500_driver_tests/test_sampling_measurement.py (100%) rename {src/qcodes/tests => tests}/drivers/keysight_b1500/test_MessageBuilder.py (100%) rename {src/qcodes/tests => tests}/drivers/keysight_b1500/test_commandList.py (100%) rename {src/qcodes/tests => tests}/drivers/test_Agilent_E8257D.py (100%) rename {src/qcodes/tests => tests}/drivers/test_AimTTi_PL601P.py (100%) rename {src/qcodes/tests => tests}/drivers/test_Keithley_2450.py (100%) rename {src/qcodes/tests => tests}/drivers/test_Keysight_33XXX.py (100%) rename {src/qcodes/tests => tests}/drivers/test_Keysight_N6705B.py (100%) rename {src/qcodes/tests => tests}/drivers/test_MercuryiPS.py (100%) rename {src/qcodes/tests => tests}/drivers/test_Rigol_DS1074Z.py (100%) rename {src/qcodes/tests => tests}/drivers/test_ami430.py (100%) rename {src/qcodes/tests => tests}/drivers/test_ami430_visa.py (100%) rename {src/qcodes/tests => tests}/drivers/test_keithley_26xx.py (100%) rename {src/qcodes/tests => tests}/drivers/test_keithley_3706A.py (100%) rename {src/qcodes/tests => tests}/drivers/test_keithley_7510.py (100%) rename {src/qcodes/tests => tests}/drivers/test_keithley_s46.py (100%) rename {src/qcodes/tests => tests}/drivers/test_keysight_34465a.py (100%) rename {src/qcodes/tests => tests}/drivers/test_keysight_34934a.py (100%) rename {src/qcodes/tests => tests}/drivers/test_keysight_34980a.py (100%) rename {src/qcodes/tests => tests}/drivers/test_keysight_b220x.py (100%) rename {src/qcodes/tests => tests}/drivers/test_keysight_e4980a.py (100%) rename {src/qcodes/tests => tests}/drivers/test_keysight_n9030b.py (100%) rename {src/qcodes/tests => tests}/drivers/test_lakeshore.py (100%) rename {src/qcodes/tests => tests}/drivers/test_lakeshore_325.py (100%) rename {src/qcodes/tests => tests}/drivers/test_lakeshore_325_legacy.py (100%) rename {src/qcodes/tests => tests}/drivers/test_lakeshore_335.py (100%) rename {src/qcodes/tests => tests}/drivers/test_lakeshore_336.py (100%) rename {src/qcodes/tests => tests}/drivers/test_lakeshore_336_legacy.py (100%) rename {src/qcodes/tests => tests}/drivers/test_lakeshore_file_parser.py (100%) rename {src/qcodes/tests => tests}/drivers/test_rto_1000.py (100%) rename {src/qcodes/tests => tests}/drivers/test_stahl.py (100%) rename {src/qcodes/tests => tests}/drivers/test_tektronix_AWG5014C.py (100%) rename {src/qcodes/tests => tests}/drivers/test_tektronix_AWG5208.py (100%) rename {src/qcodes/tests => tests}/drivers/test_tektronix_AWG70000A.py (100%) rename {src/qcodes/tests => tests}/drivers/test_tektronix_dpo7200xx.py (100%) rename {src/qcodes/tests => tests}/drivers/test_weinchel.py (100%) rename {src/qcodes/tests => tests}/drivers/test_yokogawa_gs200.py (100%) create mode 100644 tests/helpers/__init__.py rename {src/qcodes/tests => tests}/helpers/test_delegate_attribues.py (100%) rename {src/qcodes/tests => tests}/helpers/test_json_encoder.py (100%) rename {src/qcodes/tests => tests}/helpers/test_strip_attrs.py (100%) create mode 100644 tests/instrument_mocks.py create mode 100644 tests/mockers/__init__.py rename {src/qcodes/tests => tests}/mockers/test_simulated_ats_api.py (100%) create mode 100644 tests/parameter/__init__.py create mode 100644 tests/parameter/conftest.py rename {src/qcodes/tests => tests}/parameter/test_array_parameter.py (100%) rename {src/qcodes/tests => tests}/parameter/test_combined_par.py (100%) rename {src/qcodes/tests => tests}/parameter/test_delegate_parameter.py (100%) rename {src/qcodes/tests => tests}/parameter/test_elapsed_time_parameter.py (100%) rename {src/qcodes/tests => tests}/parameter/test_function.py (100%) rename {src/qcodes/tests => tests}/parameter/test_get_latest.py (100%) rename {src/qcodes/tests => tests}/parameter/test_get_set_parser.py (100%) rename {src/qcodes/tests => tests}/parameter/test_get_set_wrapping.py (100%) rename {src/qcodes/tests => tests}/parameter/test_group_parameter.py (100%) rename {src/qcodes/tests => tests}/parameter/test_instrument_ref_parameter.py (100%) rename {src/qcodes/tests => tests}/parameter/test_issequence.py (100%) rename {src/qcodes/tests => tests}/parameter/test_issequenceof.py (100%) rename {src/qcodes/tests => tests}/parameter/test_make_sweep.py (100%) rename {src/qcodes/tests => tests}/parameter/test_multi_parameter.py (100%) rename {src/qcodes/tests => tests}/parameter/test_non_gettable_parameter.py (100%) rename {src/qcodes/tests => tests}/parameter/test_on_off_mapping.py (100%) rename {src/qcodes/tests => tests}/parameter/test_parameter_basics.py (100%) rename {src/qcodes/tests => tests}/parameter/test_parameter_cache.py (100%) rename {src/qcodes/tests => tests}/parameter/test_parameter_context_manager.py (100%) rename {src/qcodes/tests => tests}/parameter/test_parameter_ramp.py (100%) rename {src/qcodes/tests => tests}/parameter/test_parameter_registration.py (100%) rename {src/qcodes/tests => tests}/parameter/test_parameter_scale_offset.py (100%) rename {src/qcodes/tests => tests}/parameter/test_parameter_validation.py (100%) rename {src/qcodes/tests => tests}/parameter/test_parameter_with_setpoints.py (100%) rename {src/qcodes/tests => tests}/parameter/test_permissive_range.py (100%) rename {src/qcodes/tests => tests}/parameter/test_scaled_parameter.py (100%) rename {src/qcodes/tests => tests}/parameter/test_snapshot.py (100%) rename {src/qcodes/tests => tests}/parameter/test_val_mapping.py (100%) rename {src/qcodes/tests => tests}/parameter/test_validators.py (100%) create mode 100644 tests/sphinx_extension/__init__.py rename {src/qcodes/tests => tests}/sphinx_extension/test_parse_parameter_attr.py (100%) rename {src/qcodes/tests => tests}/test_abstract_instrument.py (100%) rename {src/qcodes/tests => tests}/test_autoloadable_channels.py (100%) rename {src/qcodes/tests => tests}/test_channels.py (100%) rename {src/qcodes/tests => tests}/test_command.py (100%) rename {src/qcodes/tests => tests}/test_config.py (100%) rename {src/qcodes/tests => tests}/test_deprecate.py (100%) rename {src/qcodes/tests => tests}/test_field_vector.py (100%) rename {src/qcodes/tests => tests}/test_installation_info.py (100%) rename {src/qcodes/tests => tests}/test_instrument.py (100%) rename {src/qcodes/tests => tests}/test_interactive_widget.py (100%) rename {src/qcodes/tests => tests}/test_logger.py (100%) rename {src/qcodes/tests => tests}/test_metadata.py (100%) rename {src/qcodes/tests => tests}/test_monitor.py (100%) rename {src/qcodes/tests => tests}/test_plot_utils.py (100%) rename {src/qcodes/tests => tests}/test_snapshot.py (100%) rename {src/qcodes/tests => tests}/test_station.py (100%) rename {src/qcodes/tests => tests}/test_sweep_values.py (100%) rename {src/qcodes/tests => tests}/test_testutils.py (100%) rename {src/qcodes/tests => tests}/test_threading.py (100%) rename {src/qcodes/tests => tests}/test_visa.py (100%) create mode 100644 tests/utils/__init__.py rename {src/qcodes/tests => tests}/utils/test_attributes_set_to_context_manager.py (100%) rename {src/qcodes/tests => tests}/utils/test_class_strings.py (100%) rename {src/qcodes/tests => tests}/utils/test_isfunction.py (100%) rename {src/qcodes/tests => tests}/utils/test_partial_with_docstring.py (100%) create mode 100644 tests/validators/__init__.py create mode 100644 tests/validators/conftest.py rename {src/qcodes/tests => tests}/validators/test_arrays.py (100%) rename {src/qcodes/tests => tests}/validators/test_basic.py (100%) rename {src/qcodes/tests => tests}/validators/test_bool.py (100%) rename {src/qcodes/tests => tests}/validators/test_callable.py (100%) rename {src/qcodes/tests => tests}/validators/test_complex.py (100%) rename {src/qcodes/tests => tests}/validators/test_dict.py (100%) rename {src/qcodes/tests => tests}/validators/test_enum.py (100%) rename {src/qcodes/tests => tests}/validators/test_ints.py (100%) rename {src/qcodes/tests => tests}/validators/test_lists.py (100%) rename {src/qcodes/tests => tests}/validators/test_multi_type.py (100%) rename {src/qcodes/tests => tests}/validators/test_multi_type_and.py (100%) rename {src/qcodes/tests => tests}/validators/test_multi_type_or.py (100%) rename {src/qcodes/tests => tests}/validators/test_multiples.py (100%) rename {src/qcodes/tests => tests}/validators/test_numbers.py (100%) rename {src/qcodes/tests => tests}/validators/test_permissive_ints.py (100%) rename {src/qcodes/tests => tests}/validators/test_permissive_multiples.py (100%) rename {src/qcodes/tests => tests}/validators/test_sequence.py (100%) rename {src/qcodes/tests => tests}/validators/test_string.py (100%) diff --git a/src/qcodes/tests/helpers/test_compare_dictionaries.py b/src/qcodes/tests/helpers/test_compare_dictionaries.py deleted file mode 100644 index 869f89708be..00000000000 --- a/src/qcodes/tests/helpers/test_compare_dictionaries.py +++ /dev/null @@ -1,85 +0,0 @@ -import numpy as np -import pytest - -from qcodes.tests.common import compare_dictionaries - - -def test_same() -> None: - # NOTE(alexcjohnson): the numpy array and list compare equal, - # even though a list and tuple would not. See TODO in - # compare_dictionaries. - a = {'a': 1, 2: [3, 4, {5: 6}], 'b': {'c': 'd'}, 'x': np.array([7, 8])} - b = {'a': 1, 2: [3, 4, {5: 6}], 'b': {'c': 'd'}, 'x': [7, 8]} - - match, err = compare_dictionaries(a, b) - assert match - assert err == '' - - -def test_bad_dict() -> None: - # NOTE(alexcjohnson): - # this is a valid dict, but not good JSON because the tuple key cannot - # be converted into a string. - # It throws an error in compare_dictionaries, which is likely what we - # want, but we should be aware of it. - a = {(5, 6): (7, 8)} - with pytest.raises(TypeError): - compare_dictionaries(a, a) - - -def test_key_diff() -> None: - a = {'a': 1, 'c': 4} - b = {'b': 1, 'c': 4} - - match, err = compare_dictionaries(a, b) - - assert not match - assert 'Key d1[a] not in d2' in err - assert 'Key d2[b] not in d1' in err - - # try again with dict names for completeness - match, err = compare_dictionaries(a, b, 'a', 'b') - - assert not match - assert 'Key a[a] not in b' in err - assert 'Key b[b] not in a' in err - - -def test_val_diff_simple() -> None: - a = {'a': 1} - b = {'a': 2} - - match, err = compare_dictionaries(a, b) - - assert not match - assert 'Value of "d1[a]" ("1", type"") not same as' in err - assert '"d2[a]" ("2", type"")' in err - - -def test_val_diff_seq() -> None: - # NOTE(alexcjohnson): - # we don't dive recursively into lists at the moment. - # Perhaps we want to? Seems like list equality does a deep comparison, - # so it's not necessary to get ``match`` right, but the error message - # could be more helpful if we did. - a = {'a': [1, {2: 3}, 4]} - b = {'a': [1, {5: 6}, 4]} - - match, err = compare_dictionaries(a, b) - - assert not match - assert ( - 'Value of "d1[a]" ("[1, {2: 3}, 4]", type"") not same' - ) in err - assert '"d2[a]" ("[1, {5: 6}, 4]", type"")' in err - - -def test_nested_key_diff() -> None: - a = {'a': {'b': 'c'}} - b = {'a': {'d': 'c'}} - - match, err = compare_dictionaries(a, b) - - assert not match - assert 'Key d1[a][b] not in d2' in err - assert 'Key d2[a][d] not in d1' in err diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/common.py b/tests/common.py new file mode 100644 index 00000000000..3a180333e38 --- /dev/null +++ b/tests/common.py @@ -0,0 +1,162 @@ +from __future__ import annotations + +import cProfile +import os +from functools import wraps +from pathlib import Path +from time import sleep +from typing import TYPE_CHECKING, Any, Callable, TypeVar + +import pytest +from typing_extensions import ParamSpec + +from qcodes.metadatable import MetadatableWithName + +if TYPE_CHECKING: + from pytest import ExceptionInfo + + +T = TypeVar("T") +P = ParamSpec("P") + +def retry_until_does_not_throw( + exception_class_to_expect: type[Exception] = AssertionError, + tries: int = 5, + delay: float = 0.1, +) -> Callable[[Callable[P, T]], Callable[P, T]]: + """ + Call the decorated function given number of times with given delay between + the calls until it does not throw an exception of a given class. + + If the function throws an exception of a different class, it gets propagated + outside (i.e. the function is not called anymore). + + Usage: + >> x = False # let's assume that another thread has access to "x", + # and it is going to change "x" to "True" very soon + >> @retry_until_does_not_throw() ... + def assert_x_is_true(): ... + assert x, "x is still False..." ... + >> assert_x_is_true() # depending on the settings of + # "retry_until_does_not_throw", it will keep + # calling the function (with breaks in between) + # until either it does not throw or + # the number of tries is exceeded. + + Args: + exception_class_to_expect + Only in case of this exception the function will be called again + tries + Number of times to retry calling the function before giving up + delay + Delay between retries of the function call, in seconds + + Returns: + A callable that runs the decorated function until it does not throw + a given exception + """ + + def retry_until_passes_decorator(func: Callable[P, T]) -> Callable[P, T]: + + @wraps(func) + def func_retry(*args: P.args, **kwargs: P.kwargs) -> T: + tries_left = tries - 1 + while tries_left > 0: + try: + return func(*args, **kwargs) + except exception_class_to_expect: + tries_left -= 1 + sleep(delay) + # the very last attempt to call the function is outside + # the "try-except" clause, so that the exception can propagate + # up the call stack + return func(*args, **kwargs) + + return func_retry + + return retry_until_passes_decorator + + +def profile(func: Callable[P, T]) -> Callable[P, T]: + """ + Decorator that profiles the wrapped function with cProfile. + + It produces a '.prof' file in the current working directory + that has the name of the executed function. + + Use the 'Stats' class from the 'pstats' module to read the file, + analyze the profile data (for example, 'p.sort_stats('tottime')' + where 'p' is an instance of the 'Stats' class), and print the data + (for example, 'p.print_stats()'). + """ + + def wrapper(*args: P.args, **kwargs: P.kwargs) -> T: + profile_filename = func.__name__ + '.prof' + profiler = cProfile.Profile() + result = profiler.runcall(func, *args, **kwargs) + profiler.dump_stats(profile_filename) + return result + return wrapper + + +def error_caused_by(excinfo: ExceptionInfo[Any], cause: str) -> bool: + """ + Helper function to figure out whether an exception was caused by another + exception with the message provided. + + Args: + excinfo: the output of with pytest.raises() as excinfo + cause: the error message or a substring of it + """ + + exc_repr = excinfo.getrepr() + + chain = getattr(exc_repr, "chain", None) + + if chain is not None: + # first element of the chain is info about the root exception + error_location = chain[0][1] + root_traceback = chain[0][0] + # the error location is the most reliable data since + # it only contains the location and the error raised. + # however there are cases where this is empty + # in such cases fall back to the traceback + if error_location is not None: + return cause in str(error_location) + else: + return cause in str(root_traceback) + else: + return False + + +def skip_if_no_fixtures(dbname: str | Path) -> None: + if not os.path.exists(dbname): + pytest.skip( + "No db-file fixtures found. " + "Make sure that your git clone of qcodes has submodules " + "This can be done by executing: `git submodule update --init`" + ) + + +class DummyComponent(MetadatableWithName): + + """Docstring for DummyComponent.""" + + def __init__(self, name: str): + super().__init__() + self.name = name + + def __str__(self) -> str: + return self.full_name + + def set(self, value: float) -> float: + value = value * 2 + return value + + @property + def short_name(self) -> str: + return self.name + + @property + def full_name(self) -> str: + return self.full_name diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000000..2d6fd537f72 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,188 @@ +from __future__ import annotations + +import copy +import gc +import os +import sys +from collections.abc import Generator +from pathlib import Path +from typing import TYPE_CHECKING + +import pytest +from hypothesis import settings + +import qcodes as qc +from qcodes.configuration import Config +from qcodes.dataset import initialise_database, new_data_set +from qcodes.dataset.data_set import DataSet +from qcodes.dataset.descriptions.dependencies import InterDependencies_ +from qcodes.dataset.descriptions.param_spec import ParamSpecBase +from qcodes.dataset.experiment_container import Experiment, new_experiment +from qcodes.instrument import Instrument +from qcodes.station import Station + +settings.register_profile("ci", deadline=1000) + +n_experiments = 0 + +if TYPE_CHECKING: + from qcodes.configuration import DotDict + +def pytest_configure(config: pytest.Config) -> None: + config.addinivalue_line("markers", "win32: tests that only run under windows") + + +def pytest_runtest_setup(item: pytest.Item) -> None: + ALL = set("darwin linux win32".split()) + supported_platforms = ALL.intersection(mark.name for mark in item.iter_markers()) + if supported_platforms and sys.platform not in supported_platforms: + pytest.skip(f"cannot run on platform {sys.platform}") + + +@pytest.fixture(scope="session", autouse=True) +def default_session_config( + tmpdir_factory: pytest.TempdirFactory, +) -> Generator[None, None, None]: + """ + Set the config for the test session to be the default config. + Making sure that that user config does not influence the tests and + that tests cannot write to the user config. + """ + home_file_name = Config.home_file_name + schema_home_file_name = Config.schema_home_file_name + env_file_name = Config.env_file_name + schema_env_file_name = Config.schema_env_file_name + cwd_file_name = Config.cwd_file_name + schema_cwd_file_name = Config.schema_cwd_file_name + + old_config: DotDict | None = copy.deepcopy(qc.config.current_config) + qc.config.current_config = copy.deepcopy(qc.config.defaults) + + tmp_path = tmpdir_factory.mktemp("qcodes_tests") + + file_name = str(tmp_path / "user_config.json") + file_name_schema = str(tmp_path / "user_config_schema.json") + + qc.config.home_file_name = file_name + qc.config.schema_home_file_name = file_name_schema + qc.config.env_file_name = "" + qc.config.schema_env_file_name = "" + qc.config.cwd_file_name = "" + qc.config.schema_cwd_file_name = "" + + # set any config that we want to be different from the default + # for the test session here + # also set the default db path here + qc.config.logger.start_logging_on_import = "never" + qc.config.telemetry.enabled = False + qc.config.subscription.default_subscribers = [] + qc.config.core.db_location = str(tmp_path / "temp.db") + + try: + yield + finally: + qc.config.home_file_name = home_file_name + qc.config.schema_home_file_name = schema_home_file_name + qc.config.env_file_name = env_file_name + qc.config.schema_env_file_name = schema_env_file_name + qc.config.cwd_file_name = cwd_file_name + qc.config.schema_cwd_file_name = schema_cwd_file_name + + qc.config.current_config = old_config + + +@pytest.fixture(scope="function", autouse=True) +def reset_state_on_exit() -> Generator[None, None, None]: + """ + Fixture to clean any shared state on exit + + Currently this resets the config to the default config, + closes the default station and closes all instruments. + """ + default_config_obj: DotDict | None = copy.deepcopy(qc.config.current_config) + + try: + yield + finally: + qc.config.current_config = default_config_obj + Instrument.close_all() + Station.default = None + + +@pytest.fixture(scope="function", name="empty_temp_db") +def _make_empty_temp_db(tmp_path: Path) -> Generator[None, None, None]: + global n_experiments + n_experiments = 0 + # create a temp database for testing + try: + qc.config["core"]["db_location"] = str(tmp_path / "temp.db") + if os.environ.get("QCODES_SQL_DEBUG"): + qc.config["core"]["db_debug"] = True + else: + qc.config["core"]["db_debug"] = False + initialise_database() + yield + finally: + # there is a very real chance that the tests will leave open + # connections to the database. These will have gone out of scope at + # this stage but a gc collection may not have run. The gc + # collection ensures that all connections belonging to now out of + # scope objects will be closed + gc.collect() + + +# note that you cannot use mark.usefixtures in a fixture +# so empty_temp_db needs to be passed to this fixture +# even if unused https://github.com/pytest-dev/pytest/issues/3664 +@pytest.fixture(scope="function", name="experiment") +def _make_experiment(empty_temp_db: None) -> Generator[Experiment, None, None]: + e = new_experiment("test-experiment", sample_name="test-sample") + try: + yield e + finally: + e.conn.close() + + +@pytest.fixture(scope="function", name="dataset") +def _make_dataset(experiment: Experiment) -> Generator[DataSet, None, None]: + dataset = new_data_set("test-dataset") + try: + yield dataset + finally: + dataset.unsubscribe_all() + dataset.conn.close() + + +@pytest.fixture(name="standalone_parameters_dataset") +def _make_standalone_parameters_dataset( + dataset: DataSet, +) -> Generator[DataSet, None, None]: + n_params = 3 + n_rows = 10 ** 3 + params_indep = [ + ParamSpecBase(f"param_{i}", "numeric", label=f"param_{i}", unit="V") + for i in range(n_params) + ] + + param_dep = ParamSpecBase( + f"param_{n_params}", "numeric", label=f"param_{n_params}", unit="Ohm" + ) + + params_all = params_indep + [param_dep] + + idps = InterDependencies_( + dependencies={param_dep: tuple(params_indep[0:1])}, + standalones=tuple(params_indep[1:]), + ) + + dataset.set_interdependencies(idps) + + dataset.mark_started() + dataset.add_results( + [ + {p.name: int(n_rows * 10 * pn + i) for pn, p in enumerate(params_all)} + for i in range(n_rows) + ] + ) + dataset.mark_completed() + yield dataset diff --git a/tests/dataset/__init__.py b/tests/dataset/__init__.py new file mode 100644 index 00000000000..7362041bca4 --- /dev/null +++ b/tests/dataset/__init__.py @@ -0,0 +1,3 @@ +import pytest + +pytest.register_assert_rewrite('qcodes.tests.dataset.helper_functions') diff --git a/tests/dataset/conftest.py b/tests/dataset/conftest.py new file mode 100644 index 00000000000..9057aa9bc65 --- /dev/null +++ b/tests/dataset/conftest.py @@ -0,0 +1,708 @@ +from __future__ import annotations + +import gc +import os +import shutil +import tempfile +from collections.abc import Generator, Iterator +from contextlib import contextmanager + +import numpy as np +import pytest +from pytest import FixtureRequest + +import qcodes as qc +from qcodes.dataset.data_set import DataSet +from qcodes.dataset.descriptions.dependencies import InterDependencies_ +from qcodes.dataset.descriptions.param_spec import ParamSpec, ParamSpecBase +from qcodes.dataset.measurements import Measurement +from qcodes.dataset.sqlite.database import connect +from qcodes.instrument_drivers.mock_instruments import ( + ArraySetPointParam, + DummyChannelInstrument, + DummyInstrument, + Multi2DSetPointParam, + Multi2DSetPointParam2Sizes, + setpoint_generator, +) +from qcodes.parameters import ArrayParameter, Parameter, ParameterWithSetpoints +from qcodes.validators import Arrays, ComplexNumbers, Numbers + + +@pytest.fixture(scope="function", name="non_created_db") +def _make_non_created_db(tmp_path) -> Generator[None, None, None]: + # set db location to a non existing file + try: + qc.config["core"]["db_location"] = str(tmp_path / "temp.db") + if os.environ.get("QCODES_SQL_DEBUG"): + qc.config["core"]["db_debug"] = True + else: + qc.config["core"]["db_debug"] = False + yield + finally: + # there is a very real chance that the tests will leave open + # connections to the database. These will have gone out of scope at + # this stage but a gc collection may not have run. The gc + # collection ensures that all connections belonging to now out of + # scope objects will be closed + gc.collect() + + +@pytest.fixture(scope='function') +def empty_temp_db_connection(tmp_path): + """ + Yield connection to an empty temporary DB file. + """ + path = str(tmp_path / 'source.db') + conn = connect(path) + try: + yield conn + finally: + conn.close() + # there is a very real chance that the tests will leave open + # connections to the database. These will have gone out of scope at + # this stage but a gc collection may not have run. The gc + # collection ensures that all connections belonging to now out of + # scope objects will be closed + gc.collect() + + +@pytest.fixture(scope='function') +def two_empty_temp_db_connections(tmp_path): + """ + Yield connections to two empty files. Meant for use with the + test_database_extract_runs + """ + + source_path = str(tmp_path / 'source.db') + target_path = str(tmp_path / 'target.db') + source_conn = connect(source_path) + target_conn = connect(target_path) + try: + yield (source_conn, target_conn) + finally: + source_conn.close() + target_conn.close() + # there is a very real chance that the tests will leave open + # connections to the database. These will have gone out of scope at + # this stage but a gc collection may not have run. The gc + # collection ensures that all connections belonging to now out of + # scope objects will be closed + gc.collect() + + +@contextmanager +def temporarily_copied_DB(filepath: str, **kwargs): + """ + Make a temporary copy of a db-file and delete it after use. Meant to be + used together with the old version database fixtures, lest we change the + fixtures on disk. Yields the connection object + + Args: + filepath: path to the db-file + + Kwargs: + kwargs to be passed to connect + """ + with tempfile.TemporaryDirectory() as tmpdir: + dbname_new = os.path.join(tmpdir, 'temp.db') + shutil.copy2(filepath, dbname_new) + + conn = connect(dbname_new, **kwargs) + + try: + yield conn + + finally: + conn.close() + + +@pytest.fixture(name="scalar_dataset") +def _make_scalar_dataset(dataset): + n_params = 3 + n_rows = 10**3 + params_indep = [ + ParamSpecBase(f"param_{i}", "numeric", label=f"param_{i}", unit="V") + for i in range(n_params) + ] + param_dep = ParamSpecBase( + f"param_{n_params}", "numeric", label=f"param_{n_params}", unit="Ohm" + ) + + all_params = params_indep + [param_dep] + + idps = InterDependencies_(dependencies={param_dep: tuple(params_indep)}) + + dataset.set_interdependencies(idps) + dataset.mark_started() + dataset.add_results( + [ + {p.name: int(n_rows * 10 * pn + i) for pn, p in enumerate(all_params)} + for i in range(n_rows) + ] + ) + dataset.mark_completed() + yield dataset + + +@pytest.fixture( + name="scalar_datasets_parameterized", params=((3, 10**3), (5, 10**3), (10, 50)) +) +def _make_scalar_datasets_parameterized(dataset, request: FixtureRequest): + n_params = request.param[0] + n_rows = request.param[1] + params_indep = [ParamSpecBase(f'param_{i}', + 'numeric', + label=f'param_{i}', + unit='V') + for i in range(n_params)] + param_dep = ParamSpecBase(f'param_{n_params}', + 'numeric', + label=f'param_{n_params}', + unit='Ohm') + + all_params = params_indep + [param_dep] + + idps = InterDependencies_(dependencies={param_dep: tuple(params_indep)}) + + dataset.set_interdependencies(idps) + dataset.mark_started() + dataset.add_results([{p.name: int(n_rows*10*pn+i) + for pn, p in enumerate(all_params)} + for i in range(n_rows)]) + dataset.mark_completed() + yield dataset + + +@pytest.fixture +def scalar_dataset_with_nulls(dataset): + """ + A very simple dataset. A scalar is varied, and two parameters are measured + one by one + """ + sp = ParamSpecBase('setpoint', 'numeric') + val1 = ParamSpecBase('first_value', 'numeric') + val2 = ParamSpecBase('second_value', 'numeric') + + idps = InterDependencies_(dependencies={val1: (sp,), val2: (sp,)}) + dataset.set_interdependencies(idps) + + dataset.mark_started() + + dataset.add_results([{sp.name: 0, val1.name: 1}, + {sp.name: 0, val2.name: 2}]) + dataset.mark_completed() + yield dataset + + +@pytest.fixture(scope="function", + params=["array", "numeric"]) +def array_dataset(experiment, request: FixtureRequest): + meas = Measurement() + param = ArraySetPointParam() + meas.register_parameter(param, paramtype=request.param) + + with meas.run() as datasaver: + datasaver.add_result((param, param.get(),)) + try: + yield datasaver.dataset + finally: + assert isinstance(datasaver.dataset, DataSet) + datasaver.dataset.conn.close() + + +@pytest.fixture(scope="function", + params=["array", "numeric"]) +def array_dataset_with_nulls(experiment, request: FixtureRequest): + """ + A dataset where two arrays are measured, one as a function + of two other (setpoint) arrays, the other as a function of just one + of them + """ + meas = Measurement() + meas.register_custom_parameter('sp1', paramtype=request.param) + meas.register_custom_parameter('sp2', paramtype=request.param) + meas.register_custom_parameter('val1', paramtype=request.param, + setpoints=('sp1', 'sp2')) + meas.register_custom_parameter('val2', paramtype=request.param, + setpoints=('sp1',)) + + with meas.run() as datasaver: + sp1_vals = np.arange(0, 5) + sp2_vals = np.arange(5, 10) + val1_vals = np.ones(5) + val2_vals = np.zeros(5) + datasaver.add_result(('sp1', sp1_vals), + ('sp2', sp2_vals), + ('val1', val1_vals)) + datasaver.add_result(('sp1', sp1_vals), + ('val2', val2_vals)) + try: + yield datasaver.dataset + finally: + assert isinstance(datasaver.dataset, DataSet) + datasaver.dataset.conn.close() + + +@pytest.fixture(scope="function", + params=["array", "numeric"]) +def multi_dataset(experiment, request: FixtureRequest): + meas = Measurement() + param = Multi2DSetPointParam() + + meas.register_parameter(param, paramtype=request.param) + + with meas.run() as datasaver: + datasaver.add_result((param, param.get(),)) + try: + yield datasaver.dataset + finally: + assert isinstance(datasaver.dataset, DataSet) + datasaver.dataset.conn.close() + + +@pytest.fixture(scope="function", + params=["array"]) +def different_setpoint_dataset(experiment, request: FixtureRequest): + meas = Measurement() + param = Multi2DSetPointParam2Sizes() + + meas.register_parameter(param, paramtype=request.param) + + with meas.run() as datasaver: + datasaver.add_result((param, param.get(),)) + try: + yield datasaver.dataset + finally: + assert isinstance(datasaver.dataset, DataSet) + datasaver.dataset.conn.close() + + +@pytest.fixture(scope="function") +def array_in_scalar_dataset(experiment): + meas = Measurement() + scalar_param = Parameter('scalarparam', set_cmd=None) + param = ArraySetPointParam() + meas.register_parameter(scalar_param) + meas.register_parameter(param, setpoints=(scalar_param,), + paramtype='array') + + with meas.run() as datasaver: + for i in range(1, 10): + scalar_param.set(i) + datasaver.add_result((scalar_param, scalar_param.get()), + (param, param.get())) + try: + yield datasaver.dataset + finally: + assert isinstance(datasaver.dataset, DataSet) + datasaver.dataset.conn.close() + + +@pytest.fixture(scope="function") +def varlen_array_in_scalar_dataset(experiment): + meas = Measurement() + scalar_param = Parameter('scalarparam', set_cmd=None) + param = ArraySetPointParam() + meas.register_parameter(scalar_param) + meas.register_parameter(param, setpoints=(scalar_param,), + paramtype='array') + np.random.seed(0) + with meas.run() as datasaver: + for i in range(1, 10): + scalar_param.set(i) + param.setpoints = (np.arange(i),) + datasaver.add_result((scalar_param, scalar_param.get()), + (param, np.random.rand(i))) + try: + yield datasaver.dataset + finally: + assert isinstance(datasaver.dataset, DataSet) + datasaver.dataset.conn.close() + + +@pytest.fixture(scope="function") +def array_in_scalar_dataset_unrolled(experiment): + """ + This fixture yields a dataset where an array-valued parameter is registered + as a 'numeric' type and has an additional single-valued setpoint. We + expect data to be saved as individual scalars, with the scalar setpoint + repeated. + """ + meas = Measurement() + scalar_param = Parameter('scalarparam', set_cmd=None) + param = ArraySetPointParam() + meas.register_parameter(scalar_param) + meas.register_parameter(param, setpoints=(scalar_param,), + paramtype='numeric') + + with meas.run() as datasaver: + for i in range(1, 10): + scalar_param.set(i) + datasaver.add_result((scalar_param, scalar_param.get()), + (param, param.get())) + try: + yield datasaver.dataset + finally: + assert isinstance(datasaver.dataset, DataSet) + datasaver.dataset.conn.close() + + +@pytest.fixture(scope="function", + params=["array", "numeric"]) +def array_in_str_dataset(experiment, request: FixtureRequest): + meas = Measurement() + scalar_param = Parameter('textparam', set_cmd=None) + param = ArraySetPointParam() + meas.register_parameter(scalar_param, paramtype='text') + meas.register_parameter(param, setpoints=(scalar_param,), + paramtype=request.param) + + with meas.run() as datasaver: + for i in ['A', 'B', 'C']: + scalar_param.set(i) + datasaver.add_result((scalar_param, scalar_param.get()), + (param, param.get())) + try: + yield datasaver.dataset + finally: + assert isinstance(datasaver.dataset, DataSet) + datasaver.dataset.conn.close() + + +@pytest.fixture +def some_paramspecbases(): + + psb1 = ParamSpecBase('psb1', paramtype='text', label='blah', unit='') + psb2 = ParamSpecBase('psb2', paramtype='array', label='', unit='V') + psb3 = ParamSpecBase('psb3', paramtype='array', label='', unit='V') + psb4 = ParamSpecBase('psb4', paramtype='numeric', label='number', unit='') + + return (psb1, psb2, psb3, psb4) + + +@pytest.fixture +def some_paramspecs(): + """ + Some different paramspecs for testing. The idea is that we just add a + new group of paramspecs as the need arises + """ + + groups = {} + + # A valid group. Corresponding to a heatmap with a text label at each point + first = {} + first['ps1'] = ParamSpec('ps1', paramtype='numeric', label='Raw Data 1', + unit='V') + first['ps2'] = ParamSpec('ps2', paramtype='array', label='Raw Data 2', + unit='V') + first['ps3'] = ParamSpec('ps3', paramtype='text', label='Axis 1', + unit='', inferred_from=[first['ps1']]) + first['ps4'] = ParamSpec('ps4', paramtype='numeric', label='Axis 2', + unit='V', inferred_from=[first['ps2']]) + first['ps5'] = ParamSpec('ps5', paramtype='numeric', label='Signal', + unit='Conductance', + depends_on=[first['ps3'], first['ps4']]) + first['ps6'] = ParamSpec('ps6', paramtype='text', label='Goodness', + unit='', depends_on=[first['ps3'], first['ps4']]) + groups[1] = first + + # a small, valid group + second = {} + second['ps1'] = ParamSpec('ps1', paramtype='numeric', + label='setpoint', unit='Hz') + second['ps2'] = ParamSpec('ps2', paramtype='numeric', label='signal', + unit='V', depends_on=[second['ps1']]) + groups[2] = second + + return groups + + +@pytest.fixture +def some_interdeps(): + """ + Some different InterDependencies_ objects for testing + """ + idps_list = [] + ps1 = ParamSpecBase('ps1', paramtype='numeric', label='Raw Data 1', + unit='V') + ps2 = ParamSpecBase('ps2', paramtype='array', label='Raw Data 2', + unit='V') + ps3 = ParamSpecBase('ps3', paramtype='text', label='Axis 1', + unit='') + ps4 = ParamSpecBase('ps4', paramtype='numeric', label='Axis 2', + unit='V') + ps5 = ParamSpecBase('ps5', paramtype='numeric', label='Signal', + unit='Conductance') + ps6 = ParamSpecBase('ps6', paramtype='text', label='Goodness', + unit='') + + idps = InterDependencies_(dependencies={ps5: (ps3, ps4), ps6: (ps3, ps4)}, + inferences={ps4: (ps2,), ps3: (ps1,)}) + + idps_list.append(idps) + + ps1 = ParamSpecBase('ps1', paramtype='numeric', + label='setpoint', unit='Hz') + ps2 = ParamSpecBase('ps2', paramtype='numeric', label='signal', + unit='V') + idps = InterDependencies_(dependencies={ps2: (ps1,)}) + + idps_list.append(idps) + + return idps_list + + +@pytest.fixture(name="DAC") # scope is "function" per default +def _make_dac(): + dac = DummyInstrument('dummy_dac', gates=['ch1', 'ch2']) + yield dac + dac.close() + + +@pytest.fixture(name="DAC3D") # scope is "function" per default +def _make_dac_3d(): + dac = DummyInstrument("dummy_dac", gates=["ch1", "ch2", "ch3"]) + yield dac + dac.close() + + +@pytest.fixture(name="DAC_with_metadata") # scope is "function" per default +def _make_dac_with_metadata(): + dac = DummyInstrument('dummy_dac', gates=['ch1', 'ch2'], + metadata={"dac": "metadata"}) + yield dac + dac.close() + + +@pytest.fixture(name="DMM") +def _make_dmm(): + dmm = DummyInstrument('dummy_dmm', gates=['v1', 'v2']) + yield dmm + dmm.close() + + +@pytest.fixture +def channel_array_instrument(): + channelarrayinstrument = DummyChannelInstrument('dummy_channel_inst') + yield channelarrayinstrument + channelarrayinstrument.close() + + +@pytest.fixture +def complex_num_instrument(): + + class MyParam(Parameter): + + def get_raw(self): + assert self.instrument is not None + return self.instrument.setpoint() + 1j*self.instrument.setpoint() + + class RealPartParam(Parameter): + + def get_raw(self): + assert self.instrument is not None + return self.instrument.complex_setpoint().real + + dummyinst = DummyInstrument('dummy_channel_inst', gates=()) + + dummyinst.add_parameter('setpoint', + parameter_class=Parameter, + initial_value=0, + label='Some Setpoint', + unit="Some Unit", + vals=Numbers(), + get_cmd=None, set_cmd=None) + + dummyinst.add_parameter('complex_num', + parameter_class=MyParam, + initial_value=0+0j, + label='Complex Num', + unit="complex unit", + vals=ComplexNumbers(), + get_cmd=None, set_cmd=None) + + dummyinst.add_parameter('complex_setpoint', + initial_value=0+0j, + label='Complex Setpoint', + unit="complex unit", + vals=ComplexNumbers(), + get_cmd=None, set_cmd=None) + + dummyinst.add_parameter('real_part', + parameter_class=RealPartParam, + label='Real Part', + unit="real unit", + vals=Numbers(), + set_cmd=None) + + dummyinst.add_parameter('some_array_setpoints', + label='Some Array Setpoints', + unit='some other unit', + vals=Arrays(shape=(5,)), + set_cmd=False, + get_cmd=lambda: np.arange(5)) + + dummyinst.add_parameter('some_array', + parameter_class=ParameterWithSetpoints, + setpoints=(dummyinst.some_array_setpoints,), + label='Some Array', + unit='some_array_unit', + vals=Arrays(shape=(5,)), + get_cmd=lambda: np.ones(5), + set_cmd=False) + + dummyinst.add_parameter('some_complex_array_setpoints', + label='Some complex array setpoints', + unit='some_array_unit', + get_cmd=lambda: np.arange(5), + set_cmd=False) + + dummyinst.add_parameter('some_complex_array', + label='Some Array', + unit='some_array_unit', + get_cmd=lambda: np.ones(5) + 1j*np.ones(5), + set_cmd=False) + + yield dummyinst + dummyinst.close() + + +@pytest.fixture +def SpectrumAnalyzer(): + """ + Yields a DummyInstrument that holds ArrayParameters returning + different types + """ + + class Spectrum(ArrayParameter): + + def __init__(self, name, instrument, **kwargs): + super().__init__( + name=name, + shape=(1,), # this attribute should be removed + label="Flower Power Spectrum", + unit="V/sqrt(Hz)", + setpoint_names=("Frequency",), + setpoint_units=("Hz",), + instrument=instrument, + **kwargs, + ) + + self.npts = 100 + self.start = 0 + self.stop = 2e6 + + def get_raw(self): + # This is how it should be: the setpoints are generated at the + # time we get. But that will of course not work with the old Loop + self.setpoints = (tuple(np.linspace(self.start, self.stop, + self.npts)),) + # not the best SA on the market; it just returns noise... + return np.random.randn(self.npts) + + class MultiDimSpectrum(ArrayParameter): + + def __init__(self, name, instrument, **kwargs): + self.start = 0 + self.stop = 2e6 + self.npts = (100, 50, 20) + sp1 = np.linspace(self.start, self.stop, + self.npts[0]) + sp2 = np.linspace(self.start, self.stop, + self.npts[1]) + sp3 = np.linspace(self.start, self.stop, + self.npts[2]) + setpoints = setpoint_generator(sp1, sp2, sp3) + + super().__init__( + name=name, + instrument=instrument, + setpoints=setpoints, + shape=(100, 50, 20), + label="Flower Power Spectrum in 3D", + unit="V/sqrt(Hz)", + setpoint_names=("Frequency0", "Frequency1", "Frequency2"), + setpoint_units=("Hz", "Other Hz", "Third Hz"), + **kwargs, + ) + + def get_raw(self): + return np.random.randn(*self.npts) + + class ListSpectrum(Spectrum): + + def get_raw(self): + output = super().get_raw() + return list(output) + + class TupleSpectrum(Spectrum): + + def get_raw(self): + output = super().get_raw() + return tuple(output) + + SA = DummyInstrument('dummy_SA') + SA.add_parameter('spectrum', parameter_class=Spectrum) + SA.add_parameter('listspectrum', parameter_class=ListSpectrum) + SA.add_parameter('tuplespectrum', parameter_class=TupleSpectrum) + SA.add_parameter('multidimspectrum', parameter_class=MultiDimSpectrum) + yield SA + + SA.close() + + +@pytest.fixture +def meas_with_registered_param(experiment, DAC, DMM): + meas = Measurement() + meas.register_parameter(DAC.ch1) + meas.register_parameter(DMM.v1, setpoints=[DAC.ch1]) + yield meas + + +@pytest.fixture +def meas_with_registered_param_2d(experiment, DAC, DMM): + meas = Measurement() + meas.register_parameter(DAC.ch1) + meas.register_parameter(DAC.ch2) + meas.register_parameter(DMM.v1, setpoints=[DAC.ch1, DAC.ch2]) + yield meas + + +@pytest.fixture +def meas_with_registered_param_3d(experiment, DAC3D, DMM): + meas = Measurement() + meas.register_parameter(DAC3D.ch1) + meas.register_parameter(DAC3D.ch2) + meas.register_parameter(DAC3D.ch3) + meas.register_parameter(DMM.v1, setpoints=[DAC3D.ch1, DAC3D.ch2, DAC3D.ch3]) + yield meas + + +@pytest.fixture(name="meas_with_registered_param_complex") +def _make_meas_with_registered_param_complex(experiment, DAC, complex_num_instrument): + meas = Measurement() + meas.register_parameter(DAC.ch1) + meas.register_parameter(complex_num_instrument.complex_num, setpoints=[DAC.ch1]) + yield meas + + +@pytest.fixture(name="dummyinstrument") +def _make_dummy_instrument() -> Iterator[DummyChannelInstrument]: + inst = DummyChannelInstrument('dummyinstrument') + try: + yield inst + finally: + inst.close() + + +class ArrayshapedParam(Parameter): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def get_raw(self): + assert isinstance(self.vals, Arrays) + shape = self.vals.shape + + return np.random.rand(*shape) diff --git a/tests/dataset/dond/__init__.py b/tests/dataset/dond/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/dataset/dond/conftest.py b/tests/dataset/dond/conftest.py new file mode 100644 index 00000000000..947b5e3610d --- /dev/null +++ b/tests/dataset/dond/conftest.py @@ -0,0 +1,92 @@ +import matplotlib.pyplot as plt +import pytest + +from qcodes import config, validators +from qcodes.parameters import Parameter + + +@pytest.fixture(autouse=True) +def set_tmp_output_dir(tmpdir): + old_config = config.user.mainfolder + try: + config.user.mainfolder = str(tmpdir) + yield + finally: + config.user.mainfolder = old_config + + +@pytest.fixture() +def plot_close(): + yield + plt.close("all") + + +@pytest.fixture() +def _param(): + p = Parameter("simple_parameter", set_cmd=None, get_cmd=lambda: 1) + return p + + +@pytest.fixture() +def _param_2(): + p = Parameter("simple_parameter_2", set_cmd=None, get_cmd=lambda: 2) + return p + + +@pytest.fixture() +def _param_complex(): + p = Parameter( + "simple_complex_parameter", + set_cmd=None, + get_cmd=lambda: 1 + 1j, + vals=validators.ComplexNumbers(), + ) + return p + + +@pytest.fixture() +def _param_complex_2(): + p = Parameter( + "simple_complex_parameter_2", + set_cmd=None, + get_cmd=lambda: 2 + 2j, + vals=validators.ComplexNumbers(), + ) + return p + + +@pytest.fixture() +def _param_set(): + p = Parameter("simple_setter_parameter", set_cmd=None, get_cmd=None) + return p + + +@pytest.fixture() +def _param_set_2(): + p = Parameter("simple_setter_parameter_2", set_cmd=None, get_cmd=None) + return p + + +def _param_func(_p): + """ + A private utility function. + """ + _new_param = Parameter( + "modified_parameter", set_cmd=None, get_cmd=lambda: _p.get() * 2 + ) + return _new_param + + +@pytest.fixture() +def _param_callable(_param): + return _param_func(_param) + + +def test_param_callable(_param_callable) -> None: + _param_modified = _param_callable + assert _param_modified.get() == 2 + + +@pytest.fixture() +def _string_callable(): + return "Call" diff --git a/src/qcodes/tests/dataset/dond/test_do0d.py b/tests/dataset/dond/test_do0d.py similarity index 100% rename from src/qcodes/tests/dataset/dond/test_do0d.py rename to tests/dataset/dond/test_do0d.py diff --git a/src/qcodes/tests/dataset/dond/test_do1d.py b/tests/dataset/dond/test_do1d.py similarity index 100% rename from src/qcodes/tests/dataset/dond/test_do1d.py rename to tests/dataset/dond/test_do1d.py diff --git a/src/qcodes/tests/dataset/dond/test_do2d.py b/tests/dataset/dond/test_do2d.py similarity index 100% rename from src/qcodes/tests/dataset/dond/test_do2d.py rename to tests/dataset/dond/test_do2d.py diff --git a/src/qcodes/tests/dataset/dond/test_doNd.py b/tests/dataset/dond/test_doNd.py similarity index 100% rename from src/qcodes/tests/dataset/dond/test_doNd.py rename to tests/dataset/dond/test_doNd.py diff --git a/tests/dataset/fixtures/.gitignore b/tests/dataset/fixtures/.gitignore new file mode 100644 index 00000000000..8d319264d86 --- /dev/null +++ b/tests/dataset/fixtures/.gitignore @@ -0,0 +1,2 @@ +!*.json +!*.dat diff --git a/tests/dataset/fixtures/__init__.py b/tests/dataset/fixtures/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/dataset/fixtures/data_2018_01_17/__init__.py b/tests/dataset/fixtures/data_2018_01_17/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/dataset/fixtures/data_2018_01_17/data_001_testsweep_15_42_57/__init__.py b/tests/dataset/fixtures/data_2018_01_17/data_001_testsweep_15_42_57/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/dataset/fixtures/data_2018_01_17/data_001_testsweep_15_42_57/dac_ch1_set.dat b/tests/dataset/fixtures/data_2018_01_17/data_001_testsweep_15_42_57/dac_ch1_set.dat new file mode 100644 index 00000000000..d3987392347 --- /dev/null +++ b/tests/dataset/fixtures/data_2018_01_17/data_001_testsweep_15_42_57/dac_ch1_set.dat @@ -0,0 +1,204 @@ +# dac_ch1_set dmm_voltage +# "Gate ch1" "Gate voltage" +# 201 +0 57 +0.1 51 +0.2 69 +0.3 40 +0.4 97 +0.5 90 +0.6 17 +0.7 74 +0.8 80 +0.9 6 +1 74 +1.1 18 +1.2 8 +1.3 18 +1.4 39 +1.5 79 +1.6 33 +1.7 40 +1.8 14 +1.9 3 +2 16 +2.1 99 +2.2 22 +2.3 82 +2.4 68 +2.5 66 +2.6 70 +2.7 69 +2.8 83 +2.9 82 +3 1 +3.1 33 +3.2 6 +3.3 38 +3.4 52 +3.5 46 +3.6 19 +3.7 59 +3.8 71 +3.9 39 +4 87 +4.1 24 +4.2 46 +4.3 63 +4.4 95 +4.5 38 +4.6 65 +4.7 15 +4.8 93 +4.9 18 +5 83 +5.1 87 +5.2 71 +5.3 43 +5.4 54 +5.5 67 +5.6 49 +5.7 26 +5.8 27 +5.9 72 +6 0 +6.1 32 +6.2 96 +6.3 48 +6.4 77 +6.5 68 +6.6 57 +6.7 27 +6.8 75 +6.9 25 +7 54 +7.1 70 +7.2 42 +7.3 44 +7.4 82 +7.5 37 +7.6 98 +7.7 10 +7.8 9 +7.9 55 +8 46 +8.1 81 +8.2 34 +8.3 46 +8.4 73 +8.5 35 +8.6 68 +8.7 74 +8.8 32 +8.9 65 +9 59 +9.1 32 +9.2 80 +9.3 0 +9.4 38 +9.5 54 +9.6 12 +9.7 5 +9.8 20 +9.9 94 +10 31 +10.1 4 +10.2 60 +10.3 14 +10.4 70 +10.5 1 +10.6 37 +10.7 32 +10.8 10 +10.9 97 +11 16 +11.1 21 +11.2 53 +11.3 19 +11.4 77 +11.5 51 +11.6 8 +11.7 83 +11.8 85 +11.9 22 +12 100 +12.1 45 +12.2 62 +12.3 35 +12.4 92 +12.5 74 +12.6 15 +12.7 29 +12.8 94 +12.9 6 +13 1 +13.1 49 +13.2 91 +13.3 16 +13.4 69 +13.5 33 +13.6 32 +13.7 2 +13.8 36 +13.9 5 +14 96 +14.1 87 +14.2 67 +14.3 79 +14.4 16 +14.5 87 +14.6 7 +14.7 27 +14.8 100 +14.9 24 +15 97 +15.1 52 +15.2 13 +15.3 8 +15.4 40 +15.5 73 +15.6 57 +15.7 92 +15.8 91 +15.9 94 +16 63 +16.1 17 +16.2 50 +16.3 37 +16.4 71 +16.5 23 +16.6 33 +16.7 53 +16.8 68 +16.9 7 +17 17 +17.1 72 +17.2 94 +17.3 62 +17.4 70 +17.5 10 +17.6 56 +17.7 81 +17.8 39 +17.9 14 +18 40 +18.1 60 +18.2 48 +18.3 65 +18.4 98 +18.5 43 +18.6 12 +18.7 5 +18.8 85 +18.9 54 +19 91 +19.1 84 +19.2 98 +19.3 44 +19.4 61 +19.5 52 +19.6 1 +19.7 82 +19.8 59 +19.9 89 +20 17 diff --git a/tests/dataset/fixtures/data_2018_01_17/data_001_testsweep_15_42_57/snapshot.json b/tests/dataset/fixtures/data_2018_01_17/data_001_testsweep_15_42_57/snapshot.json new file mode 100644 index 00000000000..7de44914386 --- /dev/null +++ b/tests/dataset/fixtures/data_2018_01_17/data_001_testsweep_15_42_57/snapshot.json @@ -0,0 +1,230 @@ +{ + "station": { + "instruments": { + "dac": { + "functions": {}, + "submodules": {}, + "__class__": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "parameters": { + "IDN": { + "value": { + "vendor": null, + "model": "dac", + "serial": null, + "firmware": null + }, + "ts": "2018-01-17 15:42:38", + "raw_value": { + "vendor": null, + "model": "dac", + "serial": null, + "firmware": null + }, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dac_IDN", + "vals": "", + "label": "IDN", + "name": "IDN", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dac", + "post_delay": 0, + "unit": "" + }, + "ch1": { + "value": 8, + "ts": "2018-01-17 15:42:44", + "raw_value": 8, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dac_ch1", + "vals": "", + "label": "Gate ch1", + "name": "ch1", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dac", + "post_delay": 0, + "unit": "V" + }, + "ch2": { + "value": 0, + "ts": "2018-01-17 15:42:38", + "raw_value": 0, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dac_ch2", + "vals": "", + "label": "Gate ch2", + "name": "ch2", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dac", + "post_delay": 0, + "unit": "V" + }, + "verbose_channel": { + "value": 5, + "ts": "2018-01-17 15:42:50", + "raw_value": 5, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dac_verbose_channel", + "label": "Verbose Channel", + "name": "verbose_channel", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dac", + "post_delay": 0, + "unit": "V" + } + }, + "name": "dac" + }, + "dmm": { + "functions": {}, + "submodules": {}, + "__class__": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "parameters": { + "IDN": { + "value": { + "vendor": null, + "model": "dmm", + "serial": null, + "firmware": null + }, + "ts": "2018-01-17 15:42:38", + "raw_value": { + "vendor": null, + "model": "dmm", + "serial": null, + "firmware": null + }, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dmm_IDN", + "vals": "", + "label": "IDN", + "name": "IDN", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dmm", + "post_delay": 0, + "unit": "" + }, + "voltage": { + "value": 0, + "ts": "2018-01-17 15:42:38", + "raw_value": 0, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dmm_voltage", + "vals": "", + "label": "Gate voltage", + "name": "voltage", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dmm", + "post_delay": 0, + "unit": "V" + } + }, + "name": "dmm" + } + }, + "parameters": {}, + "components": {}, + "default_measurement": [] + }, + "loop": { + "__class__": "qcodes.loops.ActiveLoop", + "sweep_values": { + "parameter": { + "value": 8, + "ts": "2018-01-17 15:42:44", + "raw_value": 8, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dac_ch1", + "vals": "", + "label": "Gate ch1", + "name": "ch1", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dac", + "post_delay": 0, + "unit": "V" + }, + "values": [ + { + "first": 0.0, + "last": 20.0, + "num": 201, + "type": "linear" + } + ] + }, + "delay": 0.001, + "actions": [ + { + "value": 0, + "ts": "2018-01-17 15:42:38", + "raw_value": 0, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dmm_voltage", + "vals": "", + "label": "Gate voltage", + "name": "voltage", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dmm", + "post_delay": 0, + "unit": "V" + } + ], + "then_actions": [], + "ts_start": "2018-01-17 15:43:00", + "use_threads": false, + "ts_end": "2018-01-17 15:43:04" + }, + "__class__": "qcodes.data.data_set.DataSet", + "location": "data/2018-01-17/#001_testsweep_15-42-57", + "arrays": { + "dac_ch1_set": { + "__class__": "qcodes.data.data_array.DataArray", + "raw_value": 8, + "full_name": "dac_ch1", + "vals": "", + "label": "Gate ch1", + "name": "ch1", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dac", + "post_delay": 0, + "unit": "V", + "array_id": "dac_ch1_set", + "shape": [ + 201 + ], + "action_indices": [], + "is_setpoint": true + }, + "dmm_voltage": { + "__class__": "qcodes.data.data_array.DataArray", + "raw_value": 0, + "full_name": "dmm_voltage", + "vals": "", + "label": "Gate voltage", + "name": "voltage", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dmm", + "post_delay": 0, + "unit": "V", + "array_id": "dmm_voltage", + "shape": [ + 201 + ], + "action_indices": [ + 0 + ], + "is_setpoint": false + } + }, + "formatter": "qcodes.data.gnuplot_format.GNUPlotFormat", + "io": "" +} diff --git a/tests/dataset/fixtures/data_2018_01_17/data_002_2D_test_15_43_14/__init__.py b/tests/dataset/fixtures/data_2018_01_17/data_002_2D_test_15_43_14/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/dataset/fixtures/data_2018_01_17/data_002_2D_test_15_43_14/dac_ch1_set_dac_ch2_set.dat b/tests/dataset/fixtures/data_2018_01_17/data_002_2D_test_15_43_14/dac_ch1_set_dac_ch2_set.dat new file mode 100644 index 00000000000..497d9786d3d --- /dev/null +++ b/tests/dataset/fixtures/data_2018_01_17/data_002_2D_test_15_43_14/dac_ch1_set_dac_ch2_set.dat @@ -0,0 +1,44 @@ +# dac_ch1_set dac_ch2_set dmm_voltage +# "Gate ch1" "Gate ch2" "Gate voltage" +# 6 6 +0 0 9 +0 1 42 +0 2 80 +0 3 15 +0 4 28 +0 5 11 + +1 0 17 +1 1 95 +1 2 46 +1 3 39 +1 4 33 +1 5 40 + +2 0 61 +2 1 51 +2 2 95 +2 3 36 +2 4 73 +2 5 36 + +3 0 96 +3 1 32 +3 2 77 +3 3 81 +3 4 100 +3 5 14 + +4 0 71 +4 1 51 +4 2 41 +4 3 57 +4 4 61 +4 5 94 + +5 0 73 +5 1 81 +5 2 61 +5 3 21 +5 4 94 +5 5 1 diff --git a/tests/dataset/fixtures/data_2018_01_17/data_002_2D_test_15_43_14/snapshot.json b/tests/dataset/fixtures/data_2018_01_17/data_002_2D_test_15_43_14/snapshot.json new file mode 100644 index 00000000000..ed7775ab9a0 --- /dev/null +++ b/tests/dataset/fixtures/data_2018_01_17/data_002_2D_test_15_43_14/snapshot.json @@ -0,0 +1,286 @@ +{ + "station": { + "instruments": { + "dac": { + "functions": {}, + "submodules": {}, + "__class__": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "parameters": { + "IDN": { + "value": { + "vendor": null, + "model": "dac", + "serial": null, + "firmware": null + }, + "ts": "2018-01-17 15:42:38", + "raw_value": { + "vendor": null, + "model": "dac", + "serial": null, + "firmware": null + }, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dac_IDN", + "vals": "", + "label": "IDN", + "name": "IDN", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dac", + "post_delay": 0, + "unit": "" + }, + "ch1": { + "value": 20.0, + "ts": "2018-01-17 15:43:04", + "raw_value": 20.0, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dac_ch1", + "vals": "", + "label": "Gate ch1", + "name": "ch1", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dac", + "post_delay": 0, + "unit": "V" + }, + "ch2": { + "value": 0, + "ts": "2018-01-17 15:42:38", + "raw_value": 0, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dac_ch2", + "vals": "", + "label": "Gate ch2", + "name": "ch2", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dac", + "post_delay": 0, + "unit": "V" + }, + "verbose_channel": { + "value": 5, + "ts": "2018-01-17 15:42:50", + "raw_value": 5, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dac_verbose_channel", + "label": "Verbose Channel", + "name": "verbose_channel", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dac", + "post_delay": 0, + "unit": "V" + } + }, + "name": "dac" + }, + "dmm": { + "functions": {}, + "submodules": {}, + "__class__": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "parameters": { + "IDN": { + "value": { + "vendor": null, + "model": "dmm", + "serial": null, + "firmware": null + }, + "ts": "2018-01-17 15:42:38", + "raw_value": { + "vendor": null, + "model": "dmm", + "serial": null, + "firmware": null + }, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dmm_IDN", + "vals": "", + "label": "IDN", + "name": "IDN", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dmm", + "post_delay": 0, + "unit": "" + }, + "voltage": { + "value": 0, + "ts": "2018-01-17 15:42:38", + "raw_value": 0, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dmm_voltage", + "vals": "", + "label": "Gate voltage", + "name": "voltage", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dmm", + "post_delay": 0, + "unit": "V" + } + }, + "name": "dmm" + } + }, + "parameters": {}, + "components": {}, + "default_measurement": [] + }, + "loop": { + "__class__": "qcodes.loops.ActiveLoop", + "sweep_values": { + "parameter": { + "value": 20.0, + "ts": "2018-01-17 15:43:04", + "raw_value": 20.0, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dac_ch1", + "vals": "", + "label": "Gate ch1", + "name": "ch1", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dac", + "post_delay": 0, + "unit": "V" + }, + "values": [ + { + "first": 0.0, + "last": 5.0, + "num": 6, + "type": "linear" + } + ] + }, + "delay": 0.1, + "actions": [ + { + "__class__": "qcodes.loops.ActiveLoop", + "sweep_values": { + "parameter": { + "value": 0, + "ts": "2018-01-17 15:42:38", + "raw_value": 0, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dac_ch2", + "vals": "", + "label": "Gate ch2", + "name": "ch2", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dac", + "post_delay": 0, + "unit": "V" + }, + "values": [ + { + "first": 0.0, + "last": 5.0, + "num": 6, + "type": "linear" + } + ] + }, + "delay": 0.1, + "actions": [ + { + "value": 0, + "ts": "2018-01-17 15:42:38", + "raw_value": 0, + "__class__": "qcodes.instrument.parameter.Parameter", + "full_name": "dmm_voltage", + "vals": "", + "label": "Gate voltage", + "name": "voltage", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dmm", + "post_delay": 0, + "unit": "V" + } + ], + "then_actions": [] + } + ], + "then_actions": [], + "ts_start": "2018-01-17 15:43:15", + "use_threads": false, + "ts_end": "2018-01-17 15:43:20" + }, + "__class__": "qcodes.data.data_set.DataSet", + "location": "data/2018-01-17/#002_2D_test_15-43-14", + "arrays": { + "dac_ch1_set": { + "__class__": "qcodes.data.data_array.DataArray", + "raw_value": 20.0, + "full_name": "dac_ch1", + "vals": "", + "label": "Gate ch1", + "name": "ch1", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dac", + "post_delay": 0, + "unit": "V", + "array_id": "dac_ch1_set", + "shape": [ + 6 + ], + "action_indices": [], + "is_setpoint": true + }, + "dac_ch2_set": { + "__class__": "qcodes.data.data_array.DataArray", + "raw_value": 0, + "full_name": "dac_ch2", + "vals": "", + "label": "Gate ch2", + "name": "ch2", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dac", + "post_delay": 0, + "unit": "V", + "array_id": "dac_ch2_set", + "shape": [ + 6, + 6 + ], + "action_indices": [ + 0 + ], + "is_setpoint": true + }, + "dmm_voltage": { + "__class__": "qcodes.data.data_array.DataArray", + "raw_value": 0, + "full_name": "dmm_voltage", + "vals": "", + "label": "Gate voltage", + "name": "voltage", + "inter_delay": 0, + "instrument": "qcodes.instrument_drivers.mock_instruments.DummyInstrument", + "instrument_name": "dmm", + "post_delay": 0, + "unit": "V", + "array_id": "dmm_voltage", + "shape": [ + 6, + 6 + ], + "action_indices": [ + 0, + 0 + ], + "is_setpoint": false + } + }, + "formatter": "qcodes.data.gnuplot_format.GNUPlotFormat", + "io": "" +} diff --git a/tests/dataset/helper_functions.py b/tests/dataset/helper_functions.py new file mode 100644 index 00000000000..68c3a295b14 --- /dev/null +++ b/tests/dataset/helper_functions.py @@ -0,0 +1,131 @@ +from __future__ import annotations + +from collections.abc import Hashable, Mapping, Sequence +from functools import reduce +from operator import mul +from typing import TYPE_CHECKING + +import numpy as np +from numpy.testing import assert_array_equal + +if TYPE_CHECKING: + import pandas as pd + + +def verify_data_dict( + data: dict[str, dict[str, np.ndarray]], + dataframe: dict[str, pd.DataFrame] | None, + parameter_names: Sequence[str], + expected_names: Mapping[str, Sequence[str]], + expected_shapes: Mapping[str, Sequence[tuple[int, ...]]], + expected_values: Mapping[str, Sequence[np.ndarray]], +) -> None: + """ + Simple helper function to verify a dict of data. It can also optionally + + The expected names values + and shapes should be given as a dict with keys given by the dependent + parameters. Each value in the dicts should be the sequence of expected + names/shapes/values for that requested parameter and its dependencies. + The first element in the sequence must be the dependent parameter loaded. + + Args: + data: The dict data to verify the shape and content of. + dataframe: The data represented as a dict of Pandas DataFrames. + parameter_names: names of the parameters requested. These are expected + as top level keys in the dict. + expected_names: names of the parameters expected to be loaded for a + given parameter as a sequence indexed by the parameter. + expected_shapes: expected shapes of the parameters loaded. The shapes + should be stored as a tuple per parameter in a sequence containing + all the loaded parameters for a given requested parameter. + expected_values: expected content of the data arrays stored in a + sequence + + """ + # check that all the expected parameters in the dict are + # included in the list of parameters + assert all(param in parameter_names for param in list(data.keys())) + if dataframe is not None: + assert all(param in parameter_names for + param in list(dataframe.keys())) + for param in parameter_names: + innerdata = data[param] + verify_data_dict_for_single_param(innerdata, + expected_names[param], + expected_shapes[param], + expected_values[param]) + if dataframe is not None: + innerdataframe = dataframe[param] + verify_dataframe_for_single_param(innerdataframe, + expected_names[param], + expected_shapes[param], + expected_values[param]) + + +def verify_data_dict_for_single_param( + datadict: dict[str, np.ndarray], + names: Sequence[str], + shapes: Sequence[tuple[int, ...]], + values, +): + # check that there are no unexpected elements in the dict + key_names = list(datadict.keys()) + assert set(key_names) == set(names) + + for name, shape, value in zip(names, shapes, values): + if datadict[name].dtype == np.dtype('O'): + mydata = np.concatenate(datadict[name]) + else: + mydata = datadict[name] + assert mydata.shape == shape + assert_array_equal(mydata, value) + + +def verify_dataframe_for_single_param( + dataframe: pd.DataFrame, + names: Sequence[str], + shapes: Sequence[tuple[int, ...]], + values, +): + import pandas as pd # pylint: disable=import-outside-toplevel + + # check that the dataframe has the same elements as index and columns + pandas_index_names = list(dataframe.index.names) + pandas_column_names = list(dataframe) + pandas_names: list[Hashable] = [] + for i in pandas_index_names: + if i is not None: + pandas_names.append(i) + for j in pandas_column_names: + if j is not None: + pandas_names.append(j) + assert set(pandas_names) == set(names) + + # lets check that the index is made up + # from all but the first column as expected + if len(values) > 1: + expected_index_values = values[1:] + index_values = dataframe.index.values + + nindexes = len(expected_index_values) + nrows = shapes[0] + + for row in range(len(nrows)): + row_index_values = index_values[row] + # one dimensional arrays will have single values for there indexed + # not tuples as they don't use multiindex. Put these in tuples + # for easy comparison + if not isinstance(dataframe.index, pd.MultiIndex): + row_index_values = (row_index_values,) + + expected_values = \ + tuple(expected_index_values[indexnum].ravel()[row] + for indexnum in range(nindexes)) + assert row_index_values == expected_values + + simpledf = dataframe.reset_index() + + for name, shape, value in zip(names, shapes, values): + assert len(simpledf[name]) == reduce(mul, shape) + assert_array_equal(dataframe.reset_index()[name].to_numpy(), value.ravel()) diff --git a/tests/dataset/measurement/__init__.py b/tests/dataset/measurement/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/qcodes/tests/dataset/measurement/test_load_legacy_data.py b/tests/dataset/measurement/test_load_legacy_data.py similarity index 100% rename from src/qcodes/tests/dataset/measurement/test_load_legacy_data.py rename to tests/dataset/measurement/test_load_legacy_data.py diff --git a/src/qcodes/tests/dataset/measurement/test_measurement_context_manager.py b/tests/dataset/measurement/test_measurement_context_manager.py similarity index 100% rename from src/qcodes/tests/dataset/measurement/test_measurement_context_manager.py rename to tests/dataset/measurement/test_measurement_context_manager.py diff --git a/src/qcodes/tests/dataset/measurement/test_register_name.py b/tests/dataset/measurement/test_register_name.py similarity index 100% rename from src/qcodes/tests/dataset/measurement/test_register_name.py rename to tests/dataset/measurement/test_register_name.py diff --git a/src/qcodes/tests/dataset/measurement/test_shapes.py b/tests/dataset/measurement/test_shapes.py similarity index 100% rename from src/qcodes/tests/dataset/measurement/test_shapes.py rename to tests/dataset/measurement/test_shapes.py diff --git a/src/qcodes/tests/dataset/test__get_data_from_ds.py b/tests/dataset/test__get_data_from_ds.py similarity index 100% rename from src/qcodes/tests/dataset/test__get_data_from_ds.py rename to tests/dataset/test__get_data_from_ds.py diff --git a/src/qcodes/tests/dataset/test_concurrent_datasets.py b/tests/dataset/test_concurrent_datasets.py similarity index 100% rename from src/qcodes/tests/dataset/test_concurrent_datasets.py rename to tests/dataset/test_concurrent_datasets.py diff --git a/src/qcodes/tests/dataset/test_converters.py b/tests/dataset/test_converters.py similarity index 100% rename from src/qcodes/tests/dataset/test_converters.py rename to tests/dataset/test_converters.py diff --git a/src/qcodes/tests/dataset/test_data_set_cache.py b/tests/dataset/test_data_set_cache.py similarity index 100% rename from src/qcodes/tests/dataset/test_data_set_cache.py rename to tests/dataset/test_data_set_cache.py diff --git a/src/qcodes/tests/dataset/test_database_creation_and_upgrading.py b/tests/dataset/test_database_creation_and_upgrading.py similarity index 100% rename from src/qcodes/tests/dataset/test_database_creation_and_upgrading.py rename to tests/dataset/test_database_creation_and_upgrading.py diff --git a/src/qcodes/tests/dataset/test_database_extract_runs.py b/tests/dataset/test_database_extract_runs.py similarity index 100% rename from src/qcodes/tests/dataset/test_database_extract_runs.py rename to tests/dataset/test_database_extract_runs.py diff --git a/src/qcodes/tests/dataset/test_datasaver.py b/tests/dataset/test_datasaver.py similarity index 100% rename from src/qcodes/tests/dataset/test_datasaver.py rename to tests/dataset/test_datasaver.py diff --git a/src/qcodes/tests/dataset/test_dataset_basic.py b/tests/dataset/test_dataset_basic.py similarity index 100% rename from src/qcodes/tests/dataset/test_dataset_basic.py rename to tests/dataset/test_dataset_basic.py diff --git a/src/qcodes/tests/dataset/test_dataset_export.py b/tests/dataset/test_dataset_export.py similarity index 100% rename from src/qcodes/tests/dataset/test_dataset_export.py rename to tests/dataset/test_dataset_export.py diff --git a/src/qcodes/tests/dataset/test_dataset_in_mem_import.py b/tests/dataset/test_dataset_in_mem_import.py similarity index 100% rename from src/qcodes/tests/dataset/test_dataset_in_mem_import.py rename to tests/dataset/test_dataset_in_mem_import.py diff --git a/src/qcodes/tests/dataset/test_dataset_in_memory.py b/tests/dataset/test_dataset_in_memory.py similarity index 100% rename from src/qcodes/tests/dataset/test_dataset_in_memory.py rename to tests/dataset/test_dataset_in_memory.py diff --git a/src/qcodes/tests/dataset/test_dataset_in_memory_bacis.py b/tests/dataset/test_dataset_in_memory_bacis.py similarity index 100% rename from src/qcodes/tests/dataset/test_dataset_in_memory_bacis.py rename to tests/dataset/test_dataset_in_memory_bacis.py diff --git a/src/qcodes/tests/dataset/test_dataset_loading.py b/tests/dataset/test_dataset_loading.py similarity index 100% rename from src/qcodes/tests/dataset/test_dataset_loading.py rename to tests/dataset/test_dataset_loading.py diff --git a/src/qcodes/tests/dataset/test_dependencies.py b/tests/dataset/test_dependencies.py similarity index 100% rename from src/qcodes/tests/dataset/test_dependencies.py rename to tests/dataset/test_dependencies.py diff --git a/src/qcodes/tests/dataset/test_descriptions.py b/tests/dataset/test_descriptions.py similarity index 100% rename from src/qcodes/tests/dataset/test_descriptions.py rename to tests/dataset/test_descriptions.py diff --git a/src/qcodes/tests/dataset/test_detect_shape.py b/tests/dataset/test_detect_shape.py similarity index 100% rename from src/qcodes/tests/dataset/test_detect_shape.py rename to tests/dataset/test_detect_shape.py diff --git a/src/qcodes/tests/dataset/test_experiment_container.py b/tests/dataset/test_experiment_container.py similarity index 100% rename from src/qcodes/tests/dataset/test_experiment_container.py rename to tests/dataset/test_experiment_container.py diff --git a/src/qcodes/tests/dataset/test_export_info.py b/tests/dataset/test_export_info.py similarity index 100% rename from src/qcodes/tests/dataset/test_export_info.py rename to tests/dataset/test_export_info.py diff --git a/src/qcodes/tests/dataset/test_fix_functions.py b/tests/dataset/test_fix_functions.py similarity index 100% rename from src/qcodes/tests/dataset/test_fix_functions.py rename to tests/dataset/test_fix_functions.py diff --git a/src/qcodes/tests/dataset/test_guid_helpers.py b/tests/dataset/test_guid_helpers.py similarity index 100% rename from src/qcodes/tests/dataset/test_guid_helpers.py rename to tests/dataset/test_guid_helpers.py diff --git a/src/qcodes/tests/dataset/test_guids.py b/tests/dataset/test_guids.py similarity index 100% rename from src/qcodes/tests/dataset/test_guids.py rename to tests/dataset/test_guids.py diff --git a/src/qcodes/tests/dataset/test_links.py b/tests/dataset/test_links.py similarity index 100% rename from src/qcodes/tests/dataset/test_links.py rename to tests/dataset/test_links.py diff --git a/src/qcodes/tests/dataset/test_measurement_extensions.py b/tests/dataset/test_measurement_extensions.py similarity index 100% rename from src/qcodes/tests/dataset/test_measurement_extensions.py rename to tests/dataset/test_measurement_extensions.py diff --git a/src/qcodes/tests/dataset/test_metadata.py b/tests/dataset/test_metadata.py similarity index 100% rename from src/qcodes/tests/dataset/test_metadata.py rename to tests/dataset/test_metadata.py diff --git a/src/qcodes/tests/dataset/test_nested_measurements.py b/tests/dataset/test_nested_measurements.py similarity index 100% rename from src/qcodes/tests/dataset/test_nested_measurements.py rename to tests/dataset/test_nested_measurements.py diff --git a/src/qcodes/tests/dataset/test_paramspec.py b/tests/dataset/test_paramspec.py similarity index 100% rename from src/qcodes/tests/dataset/test_paramspec.py rename to tests/dataset/test_paramspec.py diff --git a/src/qcodes/tests/dataset/test_plotting.py b/tests/dataset/test_plotting.py similarity index 100% rename from src/qcodes/tests/dataset/test_plotting.py rename to tests/dataset/test_plotting.py diff --git a/src/qcodes/tests/dataset/test_snapshot.py b/tests/dataset/test_snapshot.py similarity index 100% rename from src/qcodes/tests/dataset/test_snapshot.py rename to tests/dataset/test_snapshot.py diff --git a/src/qcodes/tests/dataset/test_sqlite_base.py b/tests/dataset/test_sqlite_base.py similarity index 100% rename from src/qcodes/tests/dataset/test_sqlite_base.py rename to tests/dataset/test_sqlite_base.py diff --git a/src/qcodes/tests/dataset/test_sqlite_connection.py b/tests/dataset/test_sqlite_connection.py similarity index 100% rename from src/qcodes/tests/dataset/test_sqlite_connection.py rename to tests/dataset/test_sqlite_connection.py diff --git a/src/qcodes/tests/dataset/test_sqlitesettings.py b/tests/dataset/test_sqlitesettings.py similarity index 100% rename from src/qcodes/tests/dataset/test_sqlitesettings.py rename to tests/dataset/test_sqlitesettings.py diff --git a/src/qcodes/tests/dataset/test_string_data.py b/tests/dataset/test_string_data.py similarity index 100% rename from src/qcodes/tests/dataset/test_string_data.py rename to tests/dataset/test_string_data.py diff --git a/src/qcodes/tests/dataset/test_subscribing.py b/tests/dataset/test_subscribing.py similarity index 100% rename from src/qcodes/tests/dataset/test_subscribing.py rename to tests/dataset/test_subscribing.py diff --git a/tests/dataset_generators.py b/tests/dataset_generators.py new file mode 100644 index 00000000000..680afa79b73 --- /dev/null +++ b/tests/dataset_generators.py @@ -0,0 +1,33 @@ +import numpy as np + +from qcodes.dataset.descriptions.dependencies import InterDependencies_ +from qcodes.dataset.descriptions.param_spec import ParamSpecBase + + +def dataset_with_outliers_generator(ds, data_offset=5, low_outlier=-3, + high_outlier=1, background_noise=True): + x = ParamSpecBase('x', 'numeric', label='Flux', unit='e^2/hbar') + t = ParamSpecBase('t', 'numeric', label='Time', unit='s') + z = ParamSpecBase('z', 'numeric', label='Majorana number', unit='Anyon') + + idps = InterDependencies_(dependencies={z: (x, t)}) + ds.set_interdependencies(idps) + ds.mark_started() + + npoints = 50 + xvals = np.linspace(0, 1, npoints) + tvals = np.linspace(0, 1, npoints) + for counter, xv in enumerate(xvals): + if background_noise and ( + counter < round(npoints/2.3) or counter > round(npoints/1.8)): + data = np.random.rand(npoints)-data_offset + else: + data = xv * np.linspace(0,1,npoints) + if counter == round(npoints/1.9): + data[round(npoints/1.9)] = high_outlier + if counter == round(npoints/2.1): + data[round(npoints/2.5)] = low_outlier + ds.add_results([{'x': xv, 't': tv, 'z': z} + for z, tv in zip(data, tvals)]) + ds.mark_completed() + return ds diff --git a/tests/delegate/__init__.py b/tests/delegate/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/delegate/conftest.py b/tests/delegate/conftest.py new file mode 100644 index 00000000000..f232bf46a18 --- /dev/null +++ b/tests/delegate/conftest.py @@ -0,0 +1,56 @@ +import os +import pathlib + +import pytest + +from qcodes.instrument_drivers.mock_instruments import MockDAC, MockField, MockLockin +from qcodes.station import Station + +PARENT_DIR = pathlib.Path(__file__).parent.absolute() + + +@pytest.fixture(scope="function") +def dac(): + return MockDAC('dac', num_channels=3) + + +@pytest.fixture(scope="function") +def field_x(): + return MockField('field_x') + + +@pytest.fixture(scope="function") +def lockin(): + _lockin = MockLockin( + name='lockin' + ) + return _lockin + + +@pytest.fixture(scope="function") +def station(dac, lockin, field_x): + _station = Station() + _station.add_component(dac) + _station.add_component(lockin) + _station.add_component(field_x) + return _station + + +@pytest.fixture() +def chip_config(): + return os.path.join(PARENT_DIR, "data/chip.yml") + + +@pytest.fixture() +def chip(station, chip_config): + if hasattr(station, "MockChip_123"): + return station.MockChip_123 + + station.load_config_file(chip_config) + _chip = station.load_MockChip_123(station=station) + return _chip + + +@pytest.fixture() +def chip_config_typo(): + return os.path.join(PARENT_DIR, "data/chip_typo.yml") diff --git a/tests/delegate/data/__init__.py b/tests/delegate/data/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/delegate/data/chip.yml b/tests/delegate/data/chip.yml new file mode 100644 index 00000000000..20dac4f2e04 --- /dev/null +++ b/tests/delegate/data/chip.yml @@ -0,0 +1,77 @@ +instruments: + MockChip_123: + type: qcodes.instrument.delegate.InstrumentGroup + init: + submodules_type: qcodes.instrument.delegate.DelegateInstrument + submodules: + device1: + parameters: + gate: dac.ch01.voltage + source: + - lockin.frequency + - lockin.amplitude + - lockin.phase + - lockin.time_constant + drain: + - lockin.X + - lockin.Y + + channel_device: + parameters: + readout: lockin.phase + channels: + gate_1: dac.ch01 + + channel_device_custom: + parameters: + readout: lockin.phase + channels: + gate_1: + channel: dac.ch01 + type: qcodes.instrument_drivers.mock_instruments.MockCustomChannel + current_valid_range: [-0.5, 0] + fast_gate: + channel: dac.ch02 + type: qcodes.instrument_drivers.mock_instruments.DummyChannel + + set_initial_values_on_load: true + initial_values: + device1: + gate.step: 5e-4 + gate.inter_delay: 12.5e-4 + channel_device: + readout: 1e-5 + gate_1.voltage.post_delay: 0.01 + channel_device_custom: + readout: 1e-5 + + field: + type: qcodes.instrument.delegate.DelegateInstrument + init: + parameters: + X: field_x.field + ramp_rate: field_x.ramp_rate + ramp_X: + - field_x.ramp_rate + - field_x.field + set_initial_values_on_load: true + initial_values: + ramp_rate: 0.02 + setters: + X: + method: field_x.set_field + block: false + units: + X: T + ramp_rate: T/min + + switch: + type: qcodes.instrument.delegate.DelegateChannelInstrument + init: + channels: dac.channels + parameters: + state: + - dac_output + - smc + - gnd + - bus diff --git a/tests/delegate/data/chip_typo.yml b/tests/delegate/data/chip_typo.yml new file mode 100644 index 00000000000..61cf81c48bc --- /dev/null +++ b/tests/delegate/data/chip_typo.yml @@ -0,0 +1,42 @@ +instruments: + MockChip_123: + type: qcodes.instrument.delegate.InstrumentGroup + init: + submodules_type: qcodes.instrument.delegate.DelegateInstrument + submodules: + device1: + gate: dac.ch01.voltage + source: + - lockin.frequency + - lockin.amplitude + - lockin.phase + - lockin.time_constant + drain: + - lockin.X + - lockin.Y + + channel_device: + parameters: + readout: lockin.phase + channels: + gate_1: dac.ch01 + + channel_device_custom: + parameters: + readout: lockin.phase + channels: + type: qcodes.instrument_drivers.mock_instruments.MockCustomChannel + gate_1: + channel: dac.ch01 + current_valid_range: [-0.5, 0] + + set_initial_values_on_load: true + initial_values: + device1: + gate.step: 5e-4 + gate.inter_delay: 12.5e-4 + channel_device: + readout: 1e-5 + gate_1.voltage.post_delay: 0.01 + channel_device_custom: + readout: 1e-5 diff --git a/src/qcodes/tests/delegate/test_delegate_instrument.py b/tests/delegate/test_delegate_instrument.py similarity index 100% rename from src/qcodes/tests/delegate/test_delegate_instrument.py rename to tests/delegate/test_delegate_instrument.py diff --git a/src/qcodes/tests/delegate/test_device.py b/tests/delegate/test_device.py similarity index 100% rename from src/qcodes/tests/delegate/test_device.py rename to tests/delegate/test_device.py diff --git a/tests/driver_test_case.py b/tests/driver_test_case.py new file mode 100644 index 00000000000..f6321cc369f --- /dev/null +++ b/tests/driver_test_case.py @@ -0,0 +1,51 @@ +# ruff: noqa: F401 +""" +Module left for backwards compatibility. Will be deprecated and removed along the rest of qcodes.tests""" + +from __future__ import annotations + +import unittest + +from qcodes.extensions import ( + DriverTestCase, +) +from qcodes.instrument import Instrument + + +def test_instruments(verbosity: int = 1) -> None: + """ + Discover available instruments and test them all + Unlike test_instrument, this does NOT reload tests prior to running them + + optional verbosity (default 1) + """ + import qcodes + import qcodes.instrument_drivers as qcdrivers + + driver_path = list(qcdrivers.__path__)[0] + suite = unittest.defaultTestLoader.discover( + driver_path, top_level_dir=list(qcodes.__path__)[0] + ) + unittest.TextTestRunner(verbosity=verbosity).run(suite) + + +def test_instrument(instrument_testcase, verbosity: int = 2) -> None: + """ + Runs one instrument testcase + Reloads the test case before running it + + optional verbosity (default 2) + """ + import importlib + import sys + + # reload the test case + module_name = instrument_testcase.__module__ + class_name = instrument_testcase.__name__ + del sys.modules[module_name] + + module = importlib.import_module(module_name) + reloaded_testcase = getattr(module, class_name) + + suite = unittest.defaultTestLoader.loadTestsFromTestCase(reloaded_testcase) + unittest.TextTestRunner(verbosity=verbosity).run(suite) diff --git a/tests/drivers/AlazarTech/__init__.py b/tests/drivers/AlazarTech/__init__.py new file mode 100644 index 00000000000..186c0c6e0b0 --- /dev/null +++ b/tests/drivers/AlazarTech/__init__.py @@ -0,0 +1 @@ +"""Test for Alazar card driver and related infrastructure""" diff --git a/src/qcodes/tests/drivers/AlazarTech/test_alazar_api.py b/tests/drivers/AlazarTech/test_alazar_api.py similarity index 100% rename from src/qcodes/tests/drivers/AlazarTech/test_alazar_api.py rename to tests/drivers/AlazarTech/test_alazar_api.py diff --git a/src/qcodes/tests/drivers/AlazarTech/test_alazar_buffer.py b/tests/drivers/AlazarTech/test_alazar_buffer.py similarity index 100% rename from src/qcodes/tests/drivers/AlazarTech/test_alazar_buffer.py rename to tests/drivers/AlazarTech/test_alazar_buffer.py diff --git a/src/qcodes/tests/drivers/AlazarTech/test_dll_wrapper.py b/tests/drivers/AlazarTech/test_dll_wrapper.py similarity index 100% rename from src/qcodes/tests/drivers/AlazarTech/test_dll_wrapper.py rename to tests/drivers/AlazarTech/test_dll_wrapper.py diff --git a/tests/drivers/__init__.py b/tests/drivers/__init__.py new file mode 100644 index 00000000000..792d6005489 --- /dev/null +++ b/tests/drivers/__init__.py @@ -0,0 +1 @@ +# diff --git a/tests/drivers/auxiliary_files/__init__.py b/tests/drivers/auxiliary_files/__init__.py new file mode 100644 index 00000000000..792d6005489 --- /dev/null +++ b/tests/drivers/auxiliary_files/__init__.py @@ -0,0 +1 @@ +# diff --git a/tests/drivers/auxiliary_files/awgSeqDataSets.xsd b/tests/drivers/auxiliary_files/awgSeqDataSets.xsd new file mode 100644 index 00000000000..70571092bd7 --- /dev/null +++ b/tests/drivers/auxiliary_files/awgSeqDataSets.xsd @@ -0,0 +1,147 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/drivers/keysight_b1500/__init__.py b/tests/drivers/keysight_b1500/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/drivers/keysight_b1500/b1500_driver_tests/__init__.py b/tests/drivers/keysight_b1500/b1500_driver_tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/drivers/keysight_b1500/b1500_driver_tests/conftest.py b/tests/drivers/keysight_b1500/b1500_driver_tests/conftest.py new file mode 100644 index 00000000000..069254fdd88 --- /dev/null +++ b/tests/drivers/keysight_b1500/b1500_driver_tests/conftest.py @@ -0,0 +1,42 @@ +from unittest.mock import MagicMock, PropertyMock + +import pytest +from pytest import FixtureRequest +from pyvisa import VisaIOError + +from qcodes.instrument_drivers.Keysight.keysightb1500.KeysightB1500_base import ( + KeysightB1500, +) + + +@pytest.fixture(name="b1500") +def _make_b1500(request: FixtureRequest): + request.addfinalizer(KeysightB1500.close_all) + + try: + resource_name = "insert_Keysight_B2200_VISA_resource_name_here" + instance = KeysightB1500("SPA", address=resource_name) + except (ValueError, VisaIOError): + # Either there is no VISA lib installed or there was no real + # instrument found at the specified address => use simulated instrument + instance = KeysightB1500( + "SPA", address="GPIB::1::INSTR", pyvisa_sim_file="keysight_b1500.yaml" + ) + + instance.get_status() + instance.reset() + + yield instance + + +@pytest.fixture(name="mainframe") +def _make_mainframe(): + PropertyMock() + mainframe = MagicMock() + name_parts = PropertyMock(return_value=["mainframe"]) + type(mainframe).name_parts = name_parts + short_name = PropertyMock(return_value="short_name") + type(mainframe).short_name = short_name + full_name = PropertyMock(return_value="mainframe") + type(mainframe).full_name = full_name + yield mainframe diff --git a/src/qcodes/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1500.py b/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1500.py similarity index 100% rename from src/qcodes/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1500.py rename to tests/drivers/keysight_b1500/b1500_driver_tests/test_b1500.py diff --git a/src/qcodes/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1500_module.py b/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1500_module.py similarity index 100% rename from src/qcodes/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1500_module.py rename to tests/drivers/keysight_b1500/b1500_driver_tests/test_b1500_module.py diff --git a/src/qcodes/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1511b_smu.py b/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1511b_smu.py similarity index 100% rename from src/qcodes/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1511b_smu.py rename to tests/drivers/keysight_b1500/b1500_driver_tests/test_b1511b_smu.py diff --git a/src/qcodes/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1517a_smu.py b/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1517a_smu.py similarity index 100% rename from src/qcodes/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1517a_smu.py rename to tests/drivers/keysight_b1500/b1500_driver_tests/test_b1517a_smu.py diff --git a/src/qcodes/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1520a_cmu.py b/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1520a_cmu.py similarity index 100% rename from src/qcodes/tests/drivers/keysight_b1500/b1500_driver_tests/test_b1520a_cmu.py rename to tests/drivers/keysight_b1500/b1500_driver_tests/test_b1520a_cmu.py diff --git a/src/qcodes/tests/drivers/keysight_b1500/b1500_driver_tests/test_sampling_measurement.py b/tests/drivers/keysight_b1500/b1500_driver_tests/test_sampling_measurement.py similarity index 100% rename from src/qcodes/tests/drivers/keysight_b1500/b1500_driver_tests/test_sampling_measurement.py rename to tests/drivers/keysight_b1500/b1500_driver_tests/test_sampling_measurement.py diff --git a/src/qcodes/tests/drivers/keysight_b1500/test_MessageBuilder.py b/tests/drivers/keysight_b1500/test_MessageBuilder.py similarity index 100% rename from src/qcodes/tests/drivers/keysight_b1500/test_MessageBuilder.py rename to tests/drivers/keysight_b1500/test_MessageBuilder.py diff --git a/src/qcodes/tests/drivers/keysight_b1500/test_commandList.py b/tests/drivers/keysight_b1500/test_commandList.py similarity index 100% rename from src/qcodes/tests/drivers/keysight_b1500/test_commandList.py rename to tests/drivers/keysight_b1500/test_commandList.py diff --git a/src/qcodes/tests/drivers/test_Agilent_E8257D.py b/tests/drivers/test_Agilent_E8257D.py similarity index 100% rename from src/qcodes/tests/drivers/test_Agilent_E8257D.py rename to tests/drivers/test_Agilent_E8257D.py diff --git a/src/qcodes/tests/drivers/test_AimTTi_PL601P.py b/tests/drivers/test_AimTTi_PL601P.py similarity index 100% rename from src/qcodes/tests/drivers/test_AimTTi_PL601P.py rename to tests/drivers/test_AimTTi_PL601P.py diff --git a/src/qcodes/tests/drivers/test_Keithley_2450.py b/tests/drivers/test_Keithley_2450.py similarity index 100% rename from src/qcodes/tests/drivers/test_Keithley_2450.py rename to tests/drivers/test_Keithley_2450.py diff --git a/src/qcodes/tests/drivers/test_Keysight_33XXX.py b/tests/drivers/test_Keysight_33XXX.py similarity index 100% rename from src/qcodes/tests/drivers/test_Keysight_33XXX.py rename to tests/drivers/test_Keysight_33XXX.py diff --git a/src/qcodes/tests/drivers/test_Keysight_N6705B.py b/tests/drivers/test_Keysight_N6705B.py similarity index 100% rename from src/qcodes/tests/drivers/test_Keysight_N6705B.py rename to tests/drivers/test_Keysight_N6705B.py diff --git a/src/qcodes/tests/drivers/test_MercuryiPS.py b/tests/drivers/test_MercuryiPS.py similarity index 100% rename from src/qcodes/tests/drivers/test_MercuryiPS.py rename to tests/drivers/test_MercuryiPS.py diff --git a/src/qcodes/tests/drivers/test_Rigol_DS1074Z.py b/tests/drivers/test_Rigol_DS1074Z.py similarity index 100% rename from src/qcodes/tests/drivers/test_Rigol_DS1074Z.py rename to tests/drivers/test_Rigol_DS1074Z.py diff --git a/src/qcodes/tests/drivers/test_ami430.py b/tests/drivers/test_ami430.py similarity index 100% rename from src/qcodes/tests/drivers/test_ami430.py rename to tests/drivers/test_ami430.py diff --git a/src/qcodes/tests/drivers/test_ami430_visa.py b/tests/drivers/test_ami430_visa.py similarity index 100% rename from src/qcodes/tests/drivers/test_ami430_visa.py rename to tests/drivers/test_ami430_visa.py diff --git a/src/qcodes/tests/drivers/test_keithley_26xx.py b/tests/drivers/test_keithley_26xx.py similarity index 100% rename from src/qcodes/tests/drivers/test_keithley_26xx.py rename to tests/drivers/test_keithley_26xx.py diff --git a/src/qcodes/tests/drivers/test_keithley_3706A.py b/tests/drivers/test_keithley_3706A.py similarity index 100% rename from src/qcodes/tests/drivers/test_keithley_3706A.py rename to tests/drivers/test_keithley_3706A.py diff --git a/src/qcodes/tests/drivers/test_keithley_7510.py b/tests/drivers/test_keithley_7510.py similarity index 100% rename from src/qcodes/tests/drivers/test_keithley_7510.py rename to tests/drivers/test_keithley_7510.py diff --git a/src/qcodes/tests/drivers/test_keithley_s46.py b/tests/drivers/test_keithley_s46.py similarity index 100% rename from src/qcodes/tests/drivers/test_keithley_s46.py rename to tests/drivers/test_keithley_s46.py diff --git a/src/qcodes/tests/drivers/test_keysight_34465a.py b/tests/drivers/test_keysight_34465a.py similarity index 100% rename from src/qcodes/tests/drivers/test_keysight_34465a.py rename to tests/drivers/test_keysight_34465a.py diff --git a/src/qcodes/tests/drivers/test_keysight_34934a.py b/tests/drivers/test_keysight_34934a.py similarity index 100% rename from src/qcodes/tests/drivers/test_keysight_34934a.py rename to tests/drivers/test_keysight_34934a.py diff --git a/src/qcodes/tests/drivers/test_keysight_34980a.py b/tests/drivers/test_keysight_34980a.py similarity index 100% rename from src/qcodes/tests/drivers/test_keysight_34980a.py rename to tests/drivers/test_keysight_34980a.py diff --git a/src/qcodes/tests/drivers/test_keysight_b220x.py b/tests/drivers/test_keysight_b220x.py similarity index 100% rename from src/qcodes/tests/drivers/test_keysight_b220x.py rename to tests/drivers/test_keysight_b220x.py diff --git a/src/qcodes/tests/drivers/test_keysight_e4980a.py b/tests/drivers/test_keysight_e4980a.py similarity index 100% rename from src/qcodes/tests/drivers/test_keysight_e4980a.py rename to tests/drivers/test_keysight_e4980a.py diff --git a/src/qcodes/tests/drivers/test_keysight_n9030b.py b/tests/drivers/test_keysight_n9030b.py similarity index 100% rename from src/qcodes/tests/drivers/test_keysight_n9030b.py rename to tests/drivers/test_keysight_n9030b.py diff --git a/src/qcodes/tests/drivers/test_lakeshore.py b/tests/drivers/test_lakeshore.py similarity index 100% rename from src/qcodes/tests/drivers/test_lakeshore.py rename to tests/drivers/test_lakeshore.py diff --git a/src/qcodes/tests/drivers/test_lakeshore_325.py b/tests/drivers/test_lakeshore_325.py similarity index 100% rename from src/qcodes/tests/drivers/test_lakeshore_325.py rename to tests/drivers/test_lakeshore_325.py diff --git a/src/qcodes/tests/drivers/test_lakeshore_325_legacy.py b/tests/drivers/test_lakeshore_325_legacy.py similarity index 100% rename from src/qcodes/tests/drivers/test_lakeshore_325_legacy.py rename to tests/drivers/test_lakeshore_325_legacy.py diff --git a/src/qcodes/tests/drivers/test_lakeshore_335.py b/tests/drivers/test_lakeshore_335.py similarity index 100% rename from src/qcodes/tests/drivers/test_lakeshore_335.py rename to tests/drivers/test_lakeshore_335.py diff --git a/src/qcodes/tests/drivers/test_lakeshore_336.py b/tests/drivers/test_lakeshore_336.py similarity index 100% rename from src/qcodes/tests/drivers/test_lakeshore_336.py rename to tests/drivers/test_lakeshore_336.py diff --git a/src/qcodes/tests/drivers/test_lakeshore_336_legacy.py b/tests/drivers/test_lakeshore_336_legacy.py similarity index 100% rename from src/qcodes/tests/drivers/test_lakeshore_336_legacy.py rename to tests/drivers/test_lakeshore_336_legacy.py diff --git a/src/qcodes/tests/drivers/test_lakeshore_file_parser.py b/tests/drivers/test_lakeshore_file_parser.py similarity index 100% rename from src/qcodes/tests/drivers/test_lakeshore_file_parser.py rename to tests/drivers/test_lakeshore_file_parser.py diff --git a/src/qcodes/tests/drivers/test_rto_1000.py b/tests/drivers/test_rto_1000.py similarity index 100% rename from src/qcodes/tests/drivers/test_rto_1000.py rename to tests/drivers/test_rto_1000.py diff --git a/src/qcodes/tests/drivers/test_stahl.py b/tests/drivers/test_stahl.py similarity index 100% rename from src/qcodes/tests/drivers/test_stahl.py rename to tests/drivers/test_stahl.py diff --git a/src/qcodes/tests/drivers/test_tektronix_AWG5014C.py b/tests/drivers/test_tektronix_AWG5014C.py similarity index 100% rename from src/qcodes/tests/drivers/test_tektronix_AWG5014C.py rename to tests/drivers/test_tektronix_AWG5014C.py diff --git a/src/qcodes/tests/drivers/test_tektronix_AWG5208.py b/tests/drivers/test_tektronix_AWG5208.py similarity index 100% rename from src/qcodes/tests/drivers/test_tektronix_AWG5208.py rename to tests/drivers/test_tektronix_AWG5208.py diff --git a/src/qcodes/tests/drivers/test_tektronix_AWG70000A.py b/tests/drivers/test_tektronix_AWG70000A.py similarity index 100% rename from src/qcodes/tests/drivers/test_tektronix_AWG70000A.py rename to tests/drivers/test_tektronix_AWG70000A.py diff --git a/src/qcodes/tests/drivers/test_tektronix_dpo7200xx.py b/tests/drivers/test_tektronix_dpo7200xx.py similarity index 100% rename from src/qcodes/tests/drivers/test_tektronix_dpo7200xx.py rename to tests/drivers/test_tektronix_dpo7200xx.py diff --git a/src/qcodes/tests/drivers/test_weinchel.py b/tests/drivers/test_weinchel.py similarity index 100% rename from src/qcodes/tests/drivers/test_weinchel.py rename to tests/drivers/test_weinchel.py diff --git a/src/qcodes/tests/drivers/test_yokogawa_gs200.py b/tests/drivers/test_yokogawa_gs200.py similarity index 100% rename from src/qcodes/tests/drivers/test_yokogawa_gs200.py rename to tests/drivers/test_yokogawa_gs200.py diff --git a/tests/helpers/__init__.py b/tests/helpers/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/qcodes/tests/helpers/test_delegate_attribues.py b/tests/helpers/test_delegate_attribues.py similarity index 100% rename from src/qcodes/tests/helpers/test_delegate_attribues.py rename to tests/helpers/test_delegate_attribues.py diff --git a/src/qcodes/tests/helpers/test_json_encoder.py b/tests/helpers/test_json_encoder.py similarity index 100% rename from src/qcodes/tests/helpers/test_json_encoder.py rename to tests/helpers/test_json_encoder.py diff --git a/src/qcodes/tests/helpers/test_strip_attrs.py b/tests/helpers/test_strip_attrs.py similarity index 100% rename from src/qcodes/tests/helpers/test_strip_attrs.py rename to tests/helpers/test_strip_attrs.py diff --git a/tests/instrument_mocks.py b/tests/instrument_mocks.py new file mode 100644 index 00000000000..679da7ec127 --- /dev/null +++ b/tests/instrument_mocks.py @@ -0,0 +1,52 @@ +# ruff: noqa: F401 +# left for backwards compatibility will be deprecated and removed +# along with the rest of qcodes.tests +from __future__ import annotations + +import logging +import time +from collections.abc import Generator, Sequence +from functools import partial +from typing import Any + +import numpy as np + +from qcodes.instrument import ChannelList, Instrument, InstrumentBase, InstrumentChannel +from qcodes.instrument_drivers.mock_instruments import ( + ArraySetPointParam, + ComplexArraySetPointParam, + DmmExponentialParameter, + DmmGaussParameter, + DummyBase, + DummyChannel, + DummyChannelInstrument, + DummyFailingInstrument, + DummyInstrument, + DummyInstrumentWithMeasurement, + DummyParameterWithSetpoints1D, + DummyParameterWithSetpoints2D, + DummyParameterWithSetpointsComplex, + GeneratedSetPoints, + MockCustomChannel, + MockDAC, + MockDACChannel, + MockField, + MockLockin, + MockMetaParabola, + MockParabola, + Multi2DSetPointParam, + Multi2DSetPointParam2Sizes, + MultiScalarParam, + MultiSetPointParam, + SnapShotTestInstrument, + setpoint_generator, +) +from qcodes.parameters import ( + ArrayParameter, + MultiParameter, + Parameter, + ParameterWithSetpoints, + ParamRawDataType, +) +from qcodes.validators import Arrays, ComplexNumbers, Numbers, OnOff, Strings +from qcodes.validators import Sequence as ValidatorSequence diff --git a/tests/mockers/__init__.py b/tests/mockers/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/qcodes/tests/mockers/test_simulated_ats_api.py b/tests/mockers/test_simulated_ats_api.py similarity index 100% rename from src/qcodes/tests/mockers/test_simulated_ats_api.py rename to tests/mockers/test_simulated_ats_api.py diff --git a/tests/parameter/__init__.py b/tests/parameter/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/parameter/conftest.py b/tests/parameter/conftest.py new file mode 100644 index 00000000000..59ceb6d8324 --- /dev/null +++ b/tests/parameter/conftest.py @@ -0,0 +1,208 @@ +from __future__ import annotations + +from collections import namedtuple +from collections.abc import Generator +from typing import Any, Callable, Literal, TypeVar + +import pytest + +import qcodes.validators as vals +from qcodes.instrument import InstrumentBase +from qcodes.instrument_drivers.mock_instruments import DummyChannelInstrument +from qcodes.parameters import ParamDataType, Parameter, ParamRawDataType + +T = TypeVar("T") + +NOT_PASSED: Literal["NOT_PASSED"] = "NOT_PASSED" + + +@pytest.fixture(params=(True, False, NOT_PASSED)) +def snapshot_get(request: pytest.FixtureRequest) -> bool | Literal["NOT_PASSED"]: + return request.param + + +@pytest.fixture(params=(True, False, NOT_PASSED)) +def snapshot_value(request: pytest.FixtureRequest) -> bool | Literal["NOT_PASSED"]: + return request.param + + +@pytest.fixture(params=(None, False, NOT_PASSED)) +def get_cmd( + request: pytest.FixtureRequest, +) -> None | Literal[False] | Literal["NOT_PASSED"]: + return request.param + + +@pytest.fixture(params=(True, False, NOT_PASSED)) +def get_if_invalid(request: pytest.FixtureRequest) -> bool | Literal["NOT_PASSED"]: + return request.param + + +@pytest.fixture(params=(True, False, None, NOT_PASSED)) +def update(request: pytest.FixtureRequest) -> bool | None | Literal["NOT_PASSED"]: + return request.param + + +@pytest.fixture(params=(True, False)) +def cache_is_valid(request: pytest.FixtureRequest) -> bool: + return request.param + + +@pytest.fixture(name="dummy_instrument") +def _make_dummy_instrument() -> Generator[DummyChannelInstrument, None, None]: + instr = DummyChannelInstrument("dummy") + yield instr + instr.close() + + +class GettableParam(Parameter): + """ Parameter that keeps track of number of get operations""" + def __init__(self, *args: Any, **kwargs: Any): + super().__init__(*args, **kwargs) + self._get_count = 0 + + def get_raw(self) -> int: + self._get_count += 1 + return 42 + + +class BetterGettableParam(Parameter): + """ Parameter that keeps track of number of get operations, + But can actually store values""" + def __init__(self, *args: Any, **kwargs: Any): + super().__init__(*args, **kwargs) + self._get_count = 0 + + def get_raw(self) -> Any: + self._get_count += 1 + return self.cache.raw_value + + +class SettableParam(Parameter): + """ Parameter that keeps track of number of set operations""" + def __init__(self, *args: Any, **kwargs: Any): + self._set_count = 0 + super().__init__(*args, **kwargs) + + def set_raw(self, value: Any) -> None: + self._set_count += 1 + + +class OverwriteGetParam(Parameter): + """ Parameter that overwrites get.""" + def __init__(self, *args: Any, **kwargs: Any): + super().__init__(*args, **kwargs) + self._value = 42 + self.set_count = 0 + self.get_count = 0 + + def get(self) -> int: + self.get_count += 1 + return self._value + + +class OverwriteSetParam(Parameter): + """ Parameter that overwrites set.""" + def __init__(self, *args: Any, **kwargs: Any): + super().__init__(*args, **kwargs) + self._value = 42 + self.set_count = 0 + self.get_count = 0 + + def set(self, value: Any) -> None: + self.set_count += 1 + self._value = value + + +class GetSetRawParameter(Parameter): + """ Parameter that implements get and set raw""" + def __init__(self, *args: Any, **kwargs: Any): + super().__init__(*args, **kwargs) + + def get_raw(self) -> ParamRawDataType: + return self.cache.raw_value + + def set_raw(self, value: ParamRawDataType) -> None: + pass + + +class BookkeepingValidator(vals.Validator[T]): + """ + Validator that keeps track of what it validates + """ + + def __init__( + self, min_value: float = -float("inf"), max_value: float = float("inf") + ): + self.values_validated: list[T] = [] + + def validate(self, value: T, context: str = "") -> None: + self.values_validated.append(value) + + is_numeric = True + +class MemoryParameter(Parameter): + def __init__(self, get_cmd: None | Callable[[], Any] = None, **kwargs: Any): + self.set_values: list[Any] = [] + self.get_values: list[Any] = [] + super().__init__(set_cmd=self.add_set_value, + get_cmd=self.create_get_func(get_cmd), **kwargs) + + def add_set_value(self, value: ParamDataType) -> None: + self.set_values.append(value) + + def create_get_func( + self, func: None | Callable[[], ParamDataType] + ) -> Callable[[], ParamDataType]: + def get_func() -> ParamDataType: + if func is not None: + val = func() + else: + val = self.cache.raw_value + self.get_values.append(val) + return val + return get_func + + +class VirtualParameter(Parameter): + def __init__(self, name: str, param: Parameter, **kwargs: Any): + self._param = param + super().__init__(name=name, **kwargs) + + @property + def underlying_instrument(self) -> InstrumentBase | None: + return self._param.instrument + + def get_raw(self) -> ParamRawDataType: + return self._param.get() + + +blank_instruments = ( + None, # no instrument at all + namedtuple('noname', '')(), # no .name + namedtuple('blank', 'name')('') # blank .name +) +named_instrument = namedtuple('yesname', 'name')('astro') + + +class ParameterMemory: + + def __init__(self) -> None: + self._value: Any | None = None + + def get(self) -> ParamDataType: + return self._value + + def set(self, value: ParamDataType) -> None: + self._value = value + + def set_p_prefixed(self, val: int) -> None: + self._value = f'PVAL: {val:d}' + + @staticmethod + def parse_set_p(val: int) -> str: + return f'{val:d}' + + @staticmethod + def strip_prefix(val: str) -> int: + return int(val[6:]) diff --git a/src/qcodes/tests/parameter/test_array_parameter.py b/tests/parameter/test_array_parameter.py similarity index 100% rename from src/qcodes/tests/parameter/test_array_parameter.py rename to tests/parameter/test_array_parameter.py diff --git a/src/qcodes/tests/parameter/test_combined_par.py b/tests/parameter/test_combined_par.py similarity index 100% rename from src/qcodes/tests/parameter/test_combined_par.py rename to tests/parameter/test_combined_par.py diff --git a/src/qcodes/tests/parameter/test_delegate_parameter.py b/tests/parameter/test_delegate_parameter.py similarity index 100% rename from src/qcodes/tests/parameter/test_delegate_parameter.py rename to tests/parameter/test_delegate_parameter.py diff --git a/src/qcodes/tests/parameter/test_elapsed_time_parameter.py b/tests/parameter/test_elapsed_time_parameter.py similarity index 100% rename from src/qcodes/tests/parameter/test_elapsed_time_parameter.py rename to tests/parameter/test_elapsed_time_parameter.py diff --git a/src/qcodes/tests/parameter/test_function.py b/tests/parameter/test_function.py similarity index 100% rename from src/qcodes/tests/parameter/test_function.py rename to tests/parameter/test_function.py diff --git a/src/qcodes/tests/parameter/test_get_latest.py b/tests/parameter/test_get_latest.py similarity index 100% rename from src/qcodes/tests/parameter/test_get_latest.py rename to tests/parameter/test_get_latest.py diff --git a/src/qcodes/tests/parameter/test_get_set_parser.py b/tests/parameter/test_get_set_parser.py similarity index 100% rename from src/qcodes/tests/parameter/test_get_set_parser.py rename to tests/parameter/test_get_set_parser.py diff --git a/src/qcodes/tests/parameter/test_get_set_wrapping.py b/tests/parameter/test_get_set_wrapping.py similarity index 100% rename from src/qcodes/tests/parameter/test_get_set_wrapping.py rename to tests/parameter/test_get_set_wrapping.py diff --git a/src/qcodes/tests/parameter/test_group_parameter.py b/tests/parameter/test_group_parameter.py similarity index 100% rename from src/qcodes/tests/parameter/test_group_parameter.py rename to tests/parameter/test_group_parameter.py diff --git a/src/qcodes/tests/parameter/test_instrument_ref_parameter.py b/tests/parameter/test_instrument_ref_parameter.py similarity index 100% rename from src/qcodes/tests/parameter/test_instrument_ref_parameter.py rename to tests/parameter/test_instrument_ref_parameter.py diff --git a/src/qcodes/tests/parameter/test_issequence.py b/tests/parameter/test_issequence.py similarity index 100% rename from src/qcodes/tests/parameter/test_issequence.py rename to tests/parameter/test_issequence.py diff --git a/src/qcodes/tests/parameter/test_issequenceof.py b/tests/parameter/test_issequenceof.py similarity index 100% rename from src/qcodes/tests/parameter/test_issequenceof.py rename to tests/parameter/test_issequenceof.py diff --git a/src/qcodes/tests/parameter/test_make_sweep.py b/tests/parameter/test_make_sweep.py similarity index 100% rename from src/qcodes/tests/parameter/test_make_sweep.py rename to tests/parameter/test_make_sweep.py diff --git a/src/qcodes/tests/parameter/test_multi_parameter.py b/tests/parameter/test_multi_parameter.py similarity index 100% rename from src/qcodes/tests/parameter/test_multi_parameter.py rename to tests/parameter/test_multi_parameter.py diff --git a/src/qcodes/tests/parameter/test_non_gettable_parameter.py b/tests/parameter/test_non_gettable_parameter.py similarity index 100% rename from src/qcodes/tests/parameter/test_non_gettable_parameter.py rename to tests/parameter/test_non_gettable_parameter.py diff --git a/src/qcodes/tests/parameter/test_on_off_mapping.py b/tests/parameter/test_on_off_mapping.py similarity index 100% rename from src/qcodes/tests/parameter/test_on_off_mapping.py rename to tests/parameter/test_on_off_mapping.py diff --git a/src/qcodes/tests/parameter/test_parameter_basics.py b/tests/parameter/test_parameter_basics.py similarity index 100% rename from src/qcodes/tests/parameter/test_parameter_basics.py rename to tests/parameter/test_parameter_basics.py diff --git a/src/qcodes/tests/parameter/test_parameter_cache.py b/tests/parameter/test_parameter_cache.py similarity index 100% rename from src/qcodes/tests/parameter/test_parameter_cache.py rename to tests/parameter/test_parameter_cache.py diff --git a/src/qcodes/tests/parameter/test_parameter_context_manager.py b/tests/parameter/test_parameter_context_manager.py similarity index 100% rename from src/qcodes/tests/parameter/test_parameter_context_manager.py rename to tests/parameter/test_parameter_context_manager.py diff --git a/src/qcodes/tests/parameter/test_parameter_ramp.py b/tests/parameter/test_parameter_ramp.py similarity index 100% rename from src/qcodes/tests/parameter/test_parameter_ramp.py rename to tests/parameter/test_parameter_ramp.py diff --git a/src/qcodes/tests/parameter/test_parameter_registration.py b/tests/parameter/test_parameter_registration.py similarity index 100% rename from src/qcodes/tests/parameter/test_parameter_registration.py rename to tests/parameter/test_parameter_registration.py diff --git a/src/qcodes/tests/parameter/test_parameter_scale_offset.py b/tests/parameter/test_parameter_scale_offset.py similarity index 100% rename from src/qcodes/tests/parameter/test_parameter_scale_offset.py rename to tests/parameter/test_parameter_scale_offset.py diff --git a/src/qcodes/tests/parameter/test_parameter_validation.py b/tests/parameter/test_parameter_validation.py similarity index 100% rename from src/qcodes/tests/parameter/test_parameter_validation.py rename to tests/parameter/test_parameter_validation.py diff --git a/src/qcodes/tests/parameter/test_parameter_with_setpoints.py b/tests/parameter/test_parameter_with_setpoints.py similarity index 100% rename from src/qcodes/tests/parameter/test_parameter_with_setpoints.py rename to tests/parameter/test_parameter_with_setpoints.py diff --git a/src/qcodes/tests/parameter/test_permissive_range.py b/tests/parameter/test_permissive_range.py similarity index 100% rename from src/qcodes/tests/parameter/test_permissive_range.py rename to tests/parameter/test_permissive_range.py diff --git a/src/qcodes/tests/parameter/test_scaled_parameter.py b/tests/parameter/test_scaled_parameter.py similarity index 100% rename from src/qcodes/tests/parameter/test_scaled_parameter.py rename to tests/parameter/test_scaled_parameter.py diff --git a/src/qcodes/tests/parameter/test_snapshot.py b/tests/parameter/test_snapshot.py similarity index 100% rename from src/qcodes/tests/parameter/test_snapshot.py rename to tests/parameter/test_snapshot.py diff --git a/src/qcodes/tests/parameter/test_val_mapping.py b/tests/parameter/test_val_mapping.py similarity index 100% rename from src/qcodes/tests/parameter/test_val_mapping.py rename to tests/parameter/test_val_mapping.py diff --git a/src/qcodes/tests/parameter/test_validators.py b/tests/parameter/test_validators.py similarity index 100% rename from src/qcodes/tests/parameter/test_validators.py rename to tests/parameter/test_validators.py diff --git a/tests/sphinx_extension/__init__.py b/tests/sphinx_extension/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/qcodes/tests/sphinx_extension/test_parse_parameter_attr.py b/tests/sphinx_extension/test_parse_parameter_attr.py similarity index 100% rename from src/qcodes/tests/sphinx_extension/test_parse_parameter_attr.py rename to tests/sphinx_extension/test_parse_parameter_attr.py diff --git a/src/qcodes/tests/test_abstract_instrument.py b/tests/test_abstract_instrument.py similarity index 100% rename from src/qcodes/tests/test_abstract_instrument.py rename to tests/test_abstract_instrument.py diff --git a/src/qcodes/tests/test_autoloadable_channels.py b/tests/test_autoloadable_channels.py similarity index 100% rename from src/qcodes/tests/test_autoloadable_channels.py rename to tests/test_autoloadable_channels.py diff --git a/src/qcodes/tests/test_channels.py b/tests/test_channels.py similarity index 100% rename from src/qcodes/tests/test_channels.py rename to tests/test_channels.py diff --git a/src/qcodes/tests/test_command.py b/tests/test_command.py similarity index 100% rename from src/qcodes/tests/test_command.py rename to tests/test_command.py diff --git a/src/qcodes/tests/test_config.py b/tests/test_config.py similarity index 100% rename from src/qcodes/tests/test_config.py rename to tests/test_config.py diff --git a/src/qcodes/tests/test_deprecate.py b/tests/test_deprecate.py similarity index 100% rename from src/qcodes/tests/test_deprecate.py rename to tests/test_deprecate.py diff --git a/src/qcodes/tests/test_field_vector.py b/tests/test_field_vector.py similarity index 100% rename from src/qcodes/tests/test_field_vector.py rename to tests/test_field_vector.py diff --git a/src/qcodes/tests/test_installation_info.py b/tests/test_installation_info.py similarity index 100% rename from src/qcodes/tests/test_installation_info.py rename to tests/test_installation_info.py diff --git a/src/qcodes/tests/test_instrument.py b/tests/test_instrument.py similarity index 100% rename from src/qcodes/tests/test_instrument.py rename to tests/test_instrument.py diff --git a/src/qcodes/tests/test_interactive_widget.py b/tests/test_interactive_widget.py similarity index 100% rename from src/qcodes/tests/test_interactive_widget.py rename to tests/test_interactive_widget.py diff --git a/src/qcodes/tests/test_logger.py b/tests/test_logger.py similarity index 100% rename from src/qcodes/tests/test_logger.py rename to tests/test_logger.py diff --git a/src/qcodes/tests/test_metadata.py b/tests/test_metadata.py similarity index 100% rename from src/qcodes/tests/test_metadata.py rename to tests/test_metadata.py diff --git a/src/qcodes/tests/test_monitor.py b/tests/test_monitor.py similarity index 100% rename from src/qcodes/tests/test_monitor.py rename to tests/test_monitor.py diff --git a/src/qcodes/tests/test_plot_utils.py b/tests/test_plot_utils.py similarity index 100% rename from src/qcodes/tests/test_plot_utils.py rename to tests/test_plot_utils.py diff --git a/src/qcodes/tests/test_snapshot.py b/tests/test_snapshot.py similarity index 100% rename from src/qcodes/tests/test_snapshot.py rename to tests/test_snapshot.py diff --git a/src/qcodes/tests/test_station.py b/tests/test_station.py similarity index 100% rename from src/qcodes/tests/test_station.py rename to tests/test_station.py diff --git a/src/qcodes/tests/test_sweep_values.py b/tests/test_sweep_values.py similarity index 100% rename from src/qcodes/tests/test_sweep_values.py rename to tests/test_sweep_values.py diff --git a/src/qcodes/tests/test_testutils.py b/tests/test_testutils.py similarity index 100% rename from src/qcodes/tests/test_testutils.py rename to tests/test_testutils.py diff --git a/src/qcodes/tests/test_threading.py b/tests/test_threading.py similarity index 100% rename from src/qcodes/tests/test_threading.py rename to tests/test_threading.py diff --git a/src/qcodes/tests/test_visa.py b/tests/test_visa.py similarity index 100% rename from src/qcodes/tests/test_visa.py rename to tests/test_visa.py diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/qcodes/tests/utils/test_attributes_set_to_context_manager.py b/tests/utils/test_attributes_set_to_context_manager.py similarity index 100% rename from src/qcodes/tests/utils/test_attributes_set_to_context_manager.py rename to tests/utils/test_attributes_set_to_context_manager.py diff --git a/src/qcodes/tests/utils/test_class_strings.py b/tests/utils/test_class_strings.py similarity index 100% rename from src/qcodes/tests/utils/test_class_strings.py rename to tests/utils/test_class_strings.py diff --git a/src/qcodes/tests/utils/test_isfunction.py b/tests/utils/test_isfunction.py similarity index 100% rename from src/qcodes/tests/utils/test_isfunction.py rename to tests/utils/test_isfunction.py diff --git a/src/qcodes/tests/utils/test_partial_with_docstring.py b/tests/utils/test_partial_with_docstring.py similarity index 100% rename from src/qcodes/tests/utils/test_partial_with_docstring.py rename to tests/utils/test_partial_with_docstring.py diff --git a/tests/validators/__init__.py b/tests/validators/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/validators/conftest.py b/tests/validators/conftest.py new file mode 100644 index 00000000000..e890ad4d390 --- /dev/null +++ b/tests/validators/conftest.py @@ -0,0 +1,8 @@ +class AClass: + + def method_a(self) -> None: + raise RuntimeError('function should not get called') + + +def a_func() -> None: + pass diff --git a/src/qcodes/tests/validators/test_arrays.py b/tests/validators/test_arrays.py similarity index 100% rename from src/qcodes/tests/validators/test_arrays.py rename to tests/validators/test_arrays.py diff --git a/src/qcodes/tests/validators/test_basic.py b/tests/validators/test_basic.py similarity index 100% rename from src/qcodes/tests/validators/test_basic.py rename to tests/validators/test_basic.py diff --git a/src/qcodes/tests/validators/test_bool.py b/tests/validators/test_bool.py similarity index 100% rename from src/qcodes/tests/validators/test_bool.py rename to tests/validators/test_bool.py diff --git a/src/qcodes/tests/validators/test_callable.py b/tests/validators/test_callable.py similarity index 100% rename from src/qcodes/tests/validators/test_callable.py rename to tests/validators/test_callable.py diff --git a/src/qcodes/tests/validators/test_complex.py b/tests/validators/test_complex.py similarity index 100% rename from src/qcodes/tests/validators/test_complex.py rename to tests/validators/test_complex.py diff --git a/src/qcodes/tests/validators/test_dict.py b/tests/validators/test_dict.py similarity index 100% rename from src/qcodes/tests/validators/test_dict.py rename to tests/validators/test_dict.py diff --git a/src/qcodes/tests/validators/test_enum.py b/tests/validators/test_enum.py similarity index 100% rename from src/qcodes/tests/validators/test_enum.py rename to tests/validators/test_enum.py diff --git a/src/qcodes/tests/validators/test_ints.py b/tests/validators/test_ints.py similarity index 100% rename from src/qcodes/tests/validators/test_ints.py rename to tests/validators/test_ints.py diff --git a/src/qcodes/tests/validators/test_lists.py b/tests/validators/test_lists.py similarity index 100% rename from src/qcodes/tests/validators/test_lists.py rename to tests/validators/test_lists.py diff --git a/src/qcodes/tests/validators/test_multi_type.py b/tests/validators/test_multi_type.py similarity index 100% rename from src/qcodes/tests/validators/test_multi_type.py rename to tests/validators/test_multi_type.py diff --git a/src/qcodes/tests/validators/test_multi_type_and.py b/tests/validators/test_multi_type_and.py similarity index 100% rename from src/qcodes/tests/validators/test_multi_type_and.py rename to tests/validators/test_multi_type_and.py diff --git a/src/qcodes/tests/validators/test_multi_type_or.py b/tests/validators/test_multi_type_or.py similarity index 100% rename from src/qcodes/tests/validators/test_multi_type_or.py rename to tests/validators/test_multi_type_or.py diff --git a/src/qcodes/tests/validators/test_multiples.py b/tests/validators/test_multiples.py similarity index 100% rename from src/qcodes/tests/validators/test_multiples.py rename to tests/validators/test_multiples.py diff --git a/src/qcodes/tests/validators/test_numbers.py b/tests/validators/test_numbers.py similarity index 100% rename from src/qcodes/tests/validators/test_numbers.py rename to tests/validators/test_numbers.py diff --git a/src/qcodes/tests/validators/test_permissive_ints.py b/tests/validators/test_permissive_ints.py similarity index 100% rename from src/qcodes/tests/validators/test_permissive_ints.py rename to tests/validators/test_permissive_ints.py diff --git a/src/qcodes/tests/validators/test_permissive_multiples.py b/tests/validators/test_permissive_multiples.py similarity index 100% rename from src/qcodes/tests/validators/test_permissive_multiples.py rename to tests/validators/test_permissive_multiples.py diff --git a/src/qcodes/tests/validators/test_sequence.py b/tests/validators/test_sequence.py similarity index 100% rename from src/qcodes/tests/validators/test_sequence.py rename to tests/validators/test_sequence.py diff --git a/src/qcodes/tests/validators/test_string.py b/tests/validators/test_string.py similarity index 100% rename from src/qcodes/tests/validators/test_string.py rename to tests/validators/test_string.py From 3abb1963601b19e12948f0ab3c1090d43977348a Mon Sep 17 00:00:00 2001 From: "Jens H. Nielsen" Date: Mon, 6 Nov 2023 11:17:54 +0100 Subject: [PATCH 3/9] Allow toplevel imports in new test locations --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 2a2f3f4ec14..0cb6d261c23 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -284,6 +284,7 @@ known-first-party = ["qcodes"] # in tests and examples "docs/*" = ["TID253"] "src/qcodes/tests/*" = ["TID253"] +"tests/*" = ["TID253"] [tool.ruff.flake8-tidy-imports] # There modules are relatively slow to import From 4cbcf366725ff6df28e520bb446d1548287fb3f3 Mon Sep 17 00:00:00 2001 From: "Jens H. Nielsen" Date: Mon, 6 Nov 2023 11:18:33 +0100 Subject: [PATCH 4/9] move fixtures into tests --- .gitmodules | 2 +- db_files => tests/dataset/fixtures/db_files | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename db_files => tests/dataset/fixtures/db_files (100%) diff --git a/.gitmodules b/.gitmodules index 8f672b56124..8752be00b78 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,5 +1,5 @@ [submodule "qcodes/tests/dataset/fixtures/db_files"] - path = db_files + path = tests/dataset/fixtures/db_files url = https://github.com/QCoDeS/qcodes_db_fixtures.git branch = main [submodule "typings"] diff --git a/db_files b/tests/dataset/fixtures/db_files similarity index 100% rename from db_files rename to tests/dataset/fixtures/db_files From 67e1b27472ce7048da9793d0c38b26272daeaf96 Mon Sep 17 00:00:00 2001 From: "Jens H. Nielsen" Date: Mon, 6 Nov 2023 11:35:35 +0100 Subject: [PATCH 5/9] Cleanup imports in tests --- tests/conftest.py | 3 +- tests/dataset/__init__.py | 2 +- tests/dataset/dond/test_do0d.py | 2 +- tests/dataset/dond/test_do1d.py | 9 +- tests/dataset/dond/test_doNd.py | 22 +- .../test_measurement_context_manager.py | 1055 +++++++++-------- .../test_database_creation_and_upgrading.py | 437 ++++--- tests/dataset/test_database_extract_runs.py | 279 +++-- tests/dataset/test_dataset_basic.py | 592 +++++---- tests/dataset/test_dependencies.py | 268 +++-- tests/dataset/test_fix_functions.py | 60 +- tests/dataset/test_nested_measurements.py | 140 ++- tests/dataset/test_sqlite_base.py | 87 +- tests/dataset/test_sqlite_connection.py | 104 +- tests/dataset/test_subscribing.py | 37 +- tests/driver_test_case.py | 51 - tests/drivers/test_tektronix_AWG70000A.py | 127 +- tests/instrument_mocks.py | 52 - tests/test_instrument.py | 2 +- tests/test_logger.py | 20 +- tests/test_station.py | 327 +++-- 21 files changed, 1839 insertions(+), 1837 deletions(-) delete mode 100644 tests/driver_test_case.py delete mode 100644 tests/instrument_mocks.py diff --git a/tests/conftest.py b/tests/conftest.py index 2d6fd537f72..ddb187f3137 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -28,6 +28,7 @@ if TYPE_CHECKING: from qcodes.configuration import DotDict + def pytest_configure(config: pytest.Config) -> None: config.addinivalue_line("markers", "win32: tests that only run under windows") @@ -158,7 +159,7 @@ def _make_standalone_parameters_dataset( dataset: DataSet, ) -> Generator[DataSet, None, None]: n_params = 3 - n_rows = 10 ** 3 + n_rows = 10**3 params_indep = [ ParamSpecBase(f"param_{i}", "numeric", label=f"param_{i}", unit="V") for i in range(n_params) diff --git a/tests/dataset/__init__.py b/tests/dataset/__init__.py index 7362041bca4..c42508231e7 100644 --- a/tests/dataset/__init__.py +++ b/tests/dataset/__init__.py @@ -1,3 +1,3 @@ import pytest -pytest.register_assert_rewrite('qcodes.tests.dataset.helper_functions') +pytest.register_assert_rewrite("tests.dataset.helper_functions") diff --git a/tests/dataset/dond/test_do0d.py b/tests/dataset/dond/test_do0d.py index 1096050f5ac..39309dd69d1 100644 --- a/tests/dataset/dond/test_do0d.py +++ b/tests/dataset/dond/test_do0d.py @@ -14,7 +14,7 @@ Multi2DSetPointParam2Sizes, MultiSetPointParam, ) -from qcodes.tests.dataset.conftest import ArrayshapedParam +from tests.dataset.conftest import ArrayshapedParam @pytest.mark.usefixtures("plot_close", "experiment") diff --git a/tests/dataset/dond/test_do1d.py b/tests/dataset/dond/test_do1d.py index 88efe8557da..28d825b9164 100644 --- a/tests/dataset/dond/test_do1d.py +++ b/tests/dataset/dond/test_do1d.py @@ -19,13 +19,12 @@ MultiSetPointParam, ) from qcodes.parameters import Parameter -from qcodes.tests.dataset.conftest import ArrayshapedParam +from tests.dataset.conftest import ArrayshapedParam @pytest.mark.usefixtures("plot_close", "experiment") @pytest.mark.parametrize("delay", [0, 0.1, 1]) def test_do1d_with_real_parameter(_param_set, _param, delay) -> None: - start = 0 stop = 1 num_points = 1 @@ -37,7 +36,6 @@ def test_do1d_with_real_parameter(_param_set, _param, delay) -> None: @pytest.mark.parametrize("plot", [None, True, False]) @pytest.mark.parametrize("plot_config", [None, True, False]) def test_do1d_plot(_param_set, _param, plot, plot_config) -> None: - if plot_config is not None: config.dataset.dond_plot = plot_config @@ -56,7 +54,6 @@ def test_do1d_plot(_param_set, _param, plot, plot_config) -> None: @pytest.mark.usefixtures("plot_close", "experiment") @pytest.mark.parametrize("delay", [0, 0.1, 1]) def test_do1d_with_complex_parameter(_param_set, _param_complex, delay) -> None: - start = 0 stop = 1 num_points = 1 @@ -67,7 +64,6 @@ def test_do1d_with_complex_parameter(_param_set, _param_complex, delay) -> None: @pytest.mark.usefixtures("plot_close", "experiment") @pytest.mark.parametrize("delay", [0, 0.1, 1]) def test_do1d_with_2_parameter(_param_set, _param, _param_complex, delay) -> None: - start = 0 stop = 1 num_points = 1 @@ -78,7 +74,6 @@ def test_do1d_with_2_parameter(_param_set, _param, _param_complex, delay) -> Non @pytest.mark.usefixtures("plot_close", "experiment") @pytest.mark.parametrize("delay", [0, 0.1, 1]) def test_do1d_output_type_real_parameter(_param_set, _param, delay) -> None: - start = 0 stop = 1 num_points = 1 @@ -89,7 +84,6 @@ def test_do1d_output_type_real_parameter(_param_set, _param, delay) -> None: @pytest.mark.usefixtures("plot_close", "experiment") def test_do1d_output_data(_param, _param_set) -> None: - start = 0 stop = 1 num_points = 5 @@ -311,7 +305,6 @@ def test_do1d_additional_setpoints_shape( @pytest.mark.usefixtures("plot_close", "experiment") def test_do1d_break_condition(caplog: LogCaptureFixture, _param_set, _param) -> None: - start = 0 stop = 1 num_points = 5 diff --git a/tests/dataset/dond/test_doNd.py b/tests/dataset/dond/test_doNd.py index b7aae98623f..8ee46525cc6 100644 --- a/tests/dataset/dond/test_doNd.py +++ b/tests/dataset/dond/test_doNd.py @@ -33,8 +33,8 @@ MultiSetPointParam, ) from qcodes.parameters import ManualParameter, Parameter, ParameterBase -from qcodes.tests.dataset.conftest import ArrayshapedParam from qcodes.validators import Ints +from tests.dataset.conftest import ArrayshapedParam class TrackingParameter(Parameter): @@ -112,6 +112,7 @@ def test_cache_config(_param, _param_2, cache_config, cache_setting) -> None: else: assert ds.cache.live is None + def test_linear_sweep_properties(_param, _param_complex) -> None: start = 0 stop = 1 @@ -302,7 +303,6 @@ def test_dond_multi_datasets_explicit_meas_names( def test_dond_multi_datasets_meas_names_len_mismatch(_param, experiment) -> None: - with pytest.raises( ValueError, match=re.escape("Got 2 measurement names but should create 1 dataset(s)."), @@ -552,7 +552,6 @@ def test_dond_1d_additional_setpoints_shape(_param, _param_set, num_points_p1) - @pytest.mark.parametrize("plot", [None, True, False]) @pytest.mark.parametrize("plot_config", [None, True, False]) def test_dond_1d_plot(_param_set, _param, plot, plot_config) -> None: - if plot_config is not None: config.dataset.dond_plot = plot_config @@ -568,7 +567,6 @@ def test_dond_1d_plot(_param_set, _param, plot, plot_config) -> None: @pytest.mark.usefixtures("plot_close", "experiment") def test_dond_1d_output_data(_param, _param_complex, _param_set) -> None: - sweep_1 = LinSweep(_param_set, 0, 0.5, 5, 0) exp_1 = dond(sweep_1, _param, _param_complex) @@ -600,7 +598,6 @@ def test_dond_1d_output_data(_param, _param_complex, _param_set) -> None: @pytest.mark.usefixtures("plot_close", "experiment") def test_dond_1d_output_type(_param, _param_complex, _param_set) -> None: - sweep_1 = LinSweep(_param_set, 0, 0.5, 2, 0) data_1 = dond(sweep_1, _param, _param_complex) @@ -773,7 +770,6 @@ def test_dond_2d_additional_setpoints_shape( @pytest.mark.parametrize("plot", [None, True, False]) @pytest.mark.parametrize("plot_config", [None, True, False]) def test_dond_2d_plot(_param_set, _param_set_2, _param, plot, plot_config) -> None: - if plot_config is not None: config.dataset.dond_plot = plot_config @@ -791,7 +787,6 @@ def test_dond_2d_plot(_param_set, _param_set_2, _param, plot, plot_config) -> No @pytest.mark.usefixtures("plot_close", "experiment") def test_dond_2d_output_type(_param, _param_complex, _param_set, _param_set_2) -> None: - sweep_1 = LinSweep(_param_set, 0, 0.5, 2, 0) sweep_2 = LinSweep(_param_set_2, 0.5, 1, 2, 0) @@ -846,7 +841,6 @@ def test_dond_2d_output_data(_param, _param_complex, _param_set, _param_set_2) - def test_dond_2d_multi_datasets_output_type( _param, _param_complex, _param_set, _param_set_2 ) -> None: - sweep_1 = LinSweep(_param_set, 0, 0.5, 2, 0) sweep_2 = LinSweep(_param_set_2, 0.5, 1, 2, 0) @@ -907,7 +901,6 @@ def test_dond_2d_multi_datasets_multi_exp_inconsistent_raises( def test_dond_2d_multiple_datasets_plot( _param_set, _param_set_2, _param, _param_2, plot, plot_config ) -> None: - if plot_config is not None: config.dataset.dond_plot = plot_config @@ -1033,7 +1026,6 @@ def test_dond_2d_multi_datasets_with_callable_output_data( @pytest.mark.usefixtures("plot_close", "experiment") def test_dond_together_sweep(_param_set, _param_set_2, _param, _param_2) -> None: - sweep_1 = LinSweep(_param_set, 0, 1, 10, 0) sweep_2 = LinSweep(_param_set_2, 1, 2, 10, 0) @@ -1062,7 +1054,6 @@ def test_dond_together_sweep(_param_set, _param_set_2, _param, _param_2) -> None @pytest.mark.usefixtures("plot_close", "experiment") def test_dond_together_sweep_sweeper(_param_set, _param_set_2, _param) -> None: - sweep_len = 10 delay_1 = 0.1 @@ -1371,7 +1362,6 @@ def test_together_sweep_validation(n_points_1, n_points_2) -> None: def test_empty_together_sweep_raises() -> None: - with pytest.raises( ValueError, match="A TogetherSweep must contain at least one sweep." ): @@ -1490,7 +1480,6 @@ def test_dond_together_sweep_sweeper_wrong_mp_in_dataset_dependencies() -> None: @pytest.mark.usefixtures("plot_close", "experiment") def test_dond_together_sweep_parameter_with_setpoints(dummyinstrument) -> None: - outer_shape = 10 inner_shape = 15 @@ -1601,7 +1590,6 @@ def test_dond_together_sweep_parameter_with_setpoints_explicit_mapping( def test_dond_together_sweep_parameter_with_setpoints_explicit_mapping_and_callable( dummyinstrument, ) -> None: - outer_shape = 10 inner_shape = 15 @@ -1692,7 +1680,6 @@ def test_dond_sweeper_combinations(_param_set, _param_set_2, _param) -> None: @pytest.mark.usefixtures("plot_close", "experiment") def test_sweep_int_vs_float() -> None: - float_param = ManualParameter("float_param", initial_value=0.0) int_param = ManualParameter("int_param", vals=Ints(0, 100)) @@ -1736,7 +1723,6 @@ def test_post_action(mocker) -> None: @pytest.mark.usefixtures("plot_close", "experiment") def test_extra_log_info(caplog: LogCaptureFixture) -> None: - param_1 = ManualParameter("param_1", initial_value=0.0) param_2 = ManualParameter("param_2", initial_value=0.0) @@ -1749,7 +1735,6 @@ def test_extra_log_info(caplog: LogCaptureFixture) -> None: @pytest.mark.usefixtures("plot_close", "experiment") def test_default_log_info(caplog: LogCaptureFixture) -> None: - param_1 = ManualParameter("param_1", initial_value=0.0) param_2 = ManualParameter("param_2", initial_value=0.0) @@ -1761,7 +1746,6 @@ def test_default_log_info(caplog: LogCaptureFixture) -> None: @pytest.mark.usefixtures("plot_close", "experiment") def test_dond_get_after_set(_param_set, _param_set_2, _param) -> None: - n_points = 10 a = TrackingParameter("a", initial_value=0) @@ -1785,7 +1769,6 @@ def test_dond_get_after_set(_param_set, _param_set_2, _param) -> None: @pytest.mark.usefixtures("plot_close", "experiment") def test_dond_no_get_after_set(_param_set, _param_set_2, _param) -> None: - n_points = 10 a = TrackingParameter("a", initial_value=0) @@ -1809,7 +1792,6 @@ def test_dond_no_get_after_set(_param_set, _param_set_2, _param) -> None: @pytest.mark.usefixtures("plot_close", "experiment") def test_dond_get_after_set_stores_get_value(_param_set, _param_set_2, _param) -> None: - n_points = 11 a = GetReturnsCountParameter("a", initial_value=0) diff --git a/tests/dataset/measurement/test_measurement_context_manager.py b/tests/dataset/measurement/test_measurement_context_manager.py index acc3a7e527f..0b7186d9b99 100644 --- a/tests/dataset/measurement/test_measurement_context_manager.py +++ b/tests/dataset/measurement/test_measurement_context_manager.py @@ -25,7 +25,7 @@ from qcodes.dataset.sqlite.connection import atomic_transaction from qcodes.parameters import ManualParameter, Parameter, expand_setpoints_helper from qcodes.station import Station -from qcodes.tests.common import retry_until_does_not_throw +from tests.common import retry_until_does_not_throw def test_log_messages(caplog: LogCaptureFixture, meas_with_registered_param) -> None: @@ -72,13 +72,13 @@ def test_register_parameter_numbers(DAC, DMM) -> None: assert paramspec.name == str(my_param) assert paramspec.label == my_param.label assert paramspec.unit == my_param.unit - assert paramspec.type == 'numeric' + assert paramspec.type == "numeric" # we allow the registration of the EXACT same parameter twice... meas.register_parameter(my_param) # ... but not a different parameter with a new name - attrs = ['label', 'unit'] - vals = ['new label', 'new unit'] + attrs = ["label", "unit"] + vals = ["new label", "new unit"] for attr, val in zip(attrs, vals): old_val = getattr(my_param, attr) setattr(my_param, attr, val) @@ -92,7 +92,7 @@ def test_register_parameter_numbers(DAC, DMM) -> None: assert paramspec.name == str(my_param) assert paramspec.label == my_param.label assert paramspec.unit == my_param.unit - assert paramspec.type == 'numeric' + assert paramspec.type == "numeric" for parameter in parameters: with pytest.raises(ValueError): @@ -104,12 +104,14 @@ def test_register_parameter_numbers(DAC, DMM) -> None: meas.register_parameter(DMM.v1) meas.register_parameter(DMM.v2) meas.unregister_parameter(my_param) - meas.register_parameter(my_param, basis=(DAC.ch2,), - setpoints=(DMM.v1, DMM.v2)) - - assert set(meas.parameters.keys()) == {str(DAC.ch2), - str(DMM.v1), str(DMM.v2), - str(my_param)} + meas.register_parameter(my_param, basis=(DAC.ch2,), setpoints=(DMM.v1, DMM.v2)) + + assert set(meas.parameters.keys()) == { + str(DAC.ch2), + str(DMM.v1), + str(DMM.v2), + str(my_param), + } paramspec = meas.parameters[str(my_param)] assert paramspec.name == str(my_param) @@ -127,9 +129,9 @@ def test_register_custom_parameter(DAC) -> None: """ meas = Measurement() - name = 'V_modified' - unit = 'V^2' - label = 'square of the voltage' + name = "V_modified" + unit = "V^2" + label = "square of the voltage" meas.register_custom_parameter(name, label, unit) @@ -137,10 +139,10 @@ def test_register_custom_parameter(DAC) -> None: assert isinstance(meas.parameters[name], ParamSpecBase) assert meas.parameters[name].unit == unit assert meas.parameters[name].label == label - assert meas.parameters[name].type == 'numeric' + assert meas.parameters[name].type == "numeric" - newunit = 'V^3' - newlabel = 'cube of the voltage' + newunit = "V^3" + newlabel = "cube of the voltage" meas.unregister_parameter(name) meas.register_custom_parameter(name, newlabel, newunit) @@ -151,27 +153,26 @@ def test_register_custom_parameter(DAC) -> None: assert meas.parameters[name].label == newlabel with pytest.raises(ValueError): - meas.register_custom_parameter(name, label, unit, - setpoints=(DAC.ch1,)) + meas.register_custom_parameter(name, label, unit, setpoints=(DAC.ch1,)) with pytest.raises(ValueError): - meas.register_custom_parameter(name, label, unit, - basis=(DAC.ch2,)) + meas.register_custom_parameter(name, label, unit, basis=(DAC.ch2,)) meas.register_parameter(DAC.ch1) meas.register_parameter(DAC.ch2) - meas.register_custom_parameter('strange_dac') + meas.register_custom_parameter("strange_dac") meas.unregister_parameter(name) - meas.register_custom_parameter(name, label, unit, - setpoints=(DAC.ch1, str(DAC.ch2)), - basis=('strange_dac',)) + meas.register_custom_parameter( + name, label, unit, setpoints=(DAC.ch1, str(DAC.ch2)), basis=("strange_dac",) + ) assert len(meas.parameters) == 4 meas.parameters[name] with pytest.raises(ValueError): - meas.register_custom_parameter('double dependence', - 'label', 'unit', setpoints=(name,)) + meas.register_custom_parameter( + "double dependence", "label", "unit", setpoints=(name,) + ) def test_unregister_parameter(DAC, DMM) -> None: @@ -179,16 +180,14 @@ def test_unregister_parameter(DAC, DMM) -> None: Test the unregistering of parameters. """ - DAC.add_parameter('impedance', - get_cmd=lambda: 5) + DAC.add_parameter("impedance", get_cmd=lambda: 5) meas = Measurement() meas.register_parameter(DAC.ch2) meas.register_parameter(DMM.v1) meas.register_parameter(DMM.v2) - meas.register_parameter(DAC.ch1, basis=(DMM.v1, DMM.v2), - setpoints=(DAC.ch2,)) + meas.register_parameter(DAC.ch1, basis=(DMM.v1, DMM.v2), setpoints=(DAC.ch2,)) with pytest.raises(ValueError): meas.unregister_parameter(DAC.ch2) @@ -200,8 +199,7 @@ def test_unregister_parameter(DAC, DMM) -> None: meas.unregister_parameter(DMM.v2) meas.unregister_parameter(DAC.ch1) - assert set(meas.parameters.keys()) == {str(DAC.ch2), str(DMM.v1), - str(DMM.v2)} + assert set(meas.parameters.keys()) == {str(DAC.ch2), str(DMM.v1), str(DMM.v2)} meas.unregister_parameter(DAC.ch2) assert set(meas.parameters.keys()) == {str(DMM.v1), str(DMM.v2)} @@ -212,7 +210,7 @@ def test_unregister_parameter(DAC, DMM) -> None: meas.unregister_parameter(notparam) # unregistering something not registered should silently "succeed" - meas.unregister_parameter('totes_not_registered') + meas.unregister_parameter("totes_not_registered") meas.unregister_parameter(DAC.ch2) meas.unregister_parameter(DAC.ch2) @@ -224,22 +222,24 @@ def test_mixing_array_and_numeric(DAC, bg_writing) -> None: Test that mixing array and numeric types is okay """ meas = Measurement() - meas.register_parameter(DAC.ch1, paramtype='numeric') - meas.register_parameter(DAC.ch2, paramtype='array') + meas.register_parameter(DAC.ch1, paramtype="numeric") + meas.register_parameter(DAC.ch2, paramtype="array") with meas.run(write_in_background=bg_writing) as datasaver: - datasaver.add_result((DAC.ch1, np.array([DAC.ch1(), DAC.ch1()])), - (DAC.ch2, np.array([DAC.ch2(), DAC.ch1()]))) + datasaver.add_result( + (DAC.ch1, np.array([DAC.ch1(), DAC.ch1()])), + (DAC.ch2, np.array([DAC.ch2(), DAC.ch1()])), + ) def test_measurement_name_default(experiment, DAC, DMM) -> None: fmt = experiment.format_string exp_id = experiment.exp_id - default_name = 'results' + default_name = "results" meas = Measurement() - assert meas.name == '' + assert meas.name == "" meas.register_parameter(DAC.ch1) meas.register_parameter(DMM.v1, setpoints=[DAC.ch1]) @@ -257,7 +257,7 @@ def test_measurement_name_changed_via_attribute(experiment, DAC, DMM) -> None: fmt = experiment.format_string exp_id = experiment.exp_id - name = 'yolo' + name = "yolo" meas = Measurement() meas.name = name @@ -267,7 +267,7 @@ def test_measurement_name_changed_via_attribute(experiment, DAC, DMM) -> None: with meas.run() as datasaver: run_id = datasaver.run_id - expected_name = fmt.format('results', exp_id, run_id) + expected_name = fmt.format("results", exp_id, run_id) ds = datasaver.dataset assert isinstance(ds, DataSet) assert ds.table_name == expected_name @@ -278,7 +278,7 @@ def test_measurement_name_set_as_argument(experiment, DAC, DMM) -> None: fmt = experiment.format_string exp_id = experiment.exp_id - name = 'yolo' + name = "yolo" meas = Measurement(name=name, exp=experiment) @@ -287,7 +287,7 @@ def test_measurement_name_set_as_argument(experiment, DAC, DMM) -> None: with meas.run() as datasaver: run_id = datasaver.run_id - expected_name = fmt.format('results', exp_id, run_id) + expected_name = fmt.format("results", exp_id, run_id) ds = datasaver.dataset assert isinstance(ds, DataSet) assert ds.table_name == expected_name @@ -295,13 +295,12 @@ def test_measurement_name_set_as_argument(experiment, DAC, DMM) -> None: @settings(deadline=None) -@given(wp=hst.one_of(hst.integers(), hst.floats(allow_nan=False), - hst.text())) +@given(wp=hst.one_of(hst.integers(), hst.floats(allow_nan=False), hst.text())) @pytest.mark.usefixtures("empty_temp_db") def test_setting_write_period(wp) -> None: - new_experiment('firstexp', sample_name='no sample') + new_experiment("firstexp", sample_name="no sample") meas = Measurement() - meas.register_custom_parameter(name='dummy') + meas.register_custom_parameter(name="dummy") if isinstance(wp, str): with pytest.raises(ValueError): @@ -318,8 +317,7 @@ def test_setting_write_period(wp) -> None: @settings(deadline=None) -@given(wp=hst.one_of(hst.integers(), hst.floats(allow_nan=False), - hst.text())) +@given(wp=hst.one_of(hst.integers(), hst.floats(allow_nan=False), hst.text())) @pytest.mark.usefixtures("experiment") def test_setting_write_period_from_config(wp) -> None: qc.config.dataset.write_period = wp @@ -355,13 +353,11 @@ def test_setting_write_in_background_from_config(write_in_background) -> None: def test_method_chaining(DAC) -> None: ( Measurement() - .register_parameter(DAC.ch1) - .register_custom_parameter(name='freqax', - label='Frequency axis', - unit='Hz') - .add_before_run((lambda: None), ()) - .add_after_run((lambda: None), ()) - .add_subscriber((lambda values, idx, state: None), state=[]) + .register_parameter(DAC.ch1) + .register_custom_parameter(name="freqax", label="Frequency axis", unit="Hz") + .add_before_run((lambda: None), ()) + .add_after_run((lambda: None), ()) + .add_subscriber((lambda values, idx, state: None), state=[]) ) @@ -397,7 +393,7 @@ def action(lst, word): meas = Measurement() with pytest.raises(ValueError): - meas.add_before_run(action, 'no list!') + meas.add_before_run(action, "no list!") with pytest.raises(ValueError): meas.add_after_run(action, testlist) @@ -443,14 +439,12 @@ def collect_values_larger_than_7(results, length, state): meas.add_subscriber(collect_all_results, state=all_results_dict) assert len(meas.subscribers) == 1 - meas.add_subscriber(collect_values_larger_than_7, - state=values_larger_than_7) + meas.add_subscriber(collect_values_larger_than_7, state=values_larger_than_7) assert len(meas.subscribers) == 2 meas.write_period = 0.2 with meas.run() as datasaver: - # Assert that the measurement, runner, and datasaver # have added subscribers to the dataset ds = datasaver.dataset @@ -492,11 +486,13 @@ def collect_values_larger_than_7(results, length, state): # wrap the assertions. This is going to ensure that some time is # given to the Subscriber threads to finish exhausting the queue. @retry_until_does_not_throw( - exception_class_to_expect=AssertionError, delay=0.5, tries=20) + exception_class_to_expect=AssertionError, delay=0.5, tries=20 + ) def assert_states_updated_from_callbacks(): assert values_larger_than_7 == values_larger_than_7__expected - assert list(all_results_dict.keys()) == \ - [result_index for result_index in range(1, num + 1 + 1)] + assert list(all_results_dict.keys()) == [ + result_index for result_index in range(1, num + 1 + 1) + ] assert_states_updated_from_callbacks() @@ -558,8 +554,11 @@ def collect_x_vals(results, length, state): @pytest.mark.serial @pytest.mark.flaky(reruns=5) -@settings(deadline=None, max_examples=25, - suppress_health_check=(HealthCheck.function_scoped_fixture,)) +@settings( + deadline=None, + max_examples=25, + suppress_health_check=(HealthCheck.function_scoped_fixture,), +) @given(N=hst.integers(min_value=2000, max_value=3000)) def test_subscribers_called_for_all_data_points(experiment, DAC, DMM, N) -> None: def sub_get_x_vals(results, length, state): @@ -623,14 +622,14 @@ def test_datasaver_scalars( meas.register_parameter(DMM.v1, setpoints=(DAC.ch1,)) with meas.run() as datasaver: - for set_v, get_v in zip(set_values[:breakpoint], - get_values[:breakpoint]): + for set_v, get_v in zip(set_values[:breakpoint], get_values[:breakpoint]): datasaver.add_result((DAC.ch1, set_v), (DMM.v1, get_v)) assert datasaver._dataset.number_of_results == 0 sleep(write_period * 1.1) - datasaver.add_result((DAC.ch1, set_values[breakpoint]), - (DMM.v1, get_values[breakpoint])) + datasaver.add_result( + (DAC.ch1, set_values[breakpoint]), (DMM.v1, get_values[breakpoint]) + ) assert datasaver.points_written == breakpoint + 1 assert datasaver.run_id == no_of_runs + 1 @@ -660,33 +659,35 @@ def test_datasaver_inst_metadata(experiment, DAC_with_metadata, DMM) -> None: DAC_with_metadata.ch1.set(set_v) datasaver.add_result((DAC_with_metadata.ch1, set_v), (DMM.v1, DMM.v1.get())) assert datasaver.dataset.snapshot is not None - station_snapshot = datasaver.dataset.snapshot['station'] - assert station_snapshot['instruments']['dummy_dac']['metadata'] == {"dac": "metadata"} + station_snapshot = datasaver.dataset.snapshot["station"] + assert station_snapshot["instruments"]["dummy_dac"]["metadata"] == { + "dac": "metadata" + } def test_exception_happened_during_measurement_is_stored_in_dataset_metadata( experiment, ) -> None: meas = Measurement() - meas.register_custom_parameter(name='nodata') + meas.register_custom_parameter(name="nodata") class SomeMeasurementException(Exception): pass dataset = None # `pytest.raises`` is used here instead of custom try-except for convenience - with pytest.raises(SomeMeasurementException, match='foo') as e: - + with pytest.raises(SomeMeasurementException, match="foo") as e: with meas.run() as datasaver: dataset = datasaver.dataset - raise SomeMeasurementException('foo') + raise SomeMeasurementException("foo") assert dataset is not None metadata = dataset.metadata assert "measurement_exception" in metadata expected_exception_string = "".join( - traceback.format_exception(e.type, e.value, e.tb)) + traceback.format_exception(e.type, e.value, e.tb) + ) exception_string = metadata["measurement_exception"] assert exception_string == expected_exception_string @@ -696,23 +697,23 @@ class SomeMeasurementException(Exception): @given(N=hst.integers(min_value=2, max_value=500)) @pytest.mark.usefixtures("empty_temp_db") def test_datasaver_arrays_lists_tuples(bg_writing, N) -> None: - new_experiment('firstexp', sample_name='no sample') + new_experiment("firstexp", sample_name="no sample") meas = Measurement() - meas.register_custom_parameter(name='freqax', - label='Frequency axis', - unit='Hz') - meas.register_custom_parameter(name='signal', - label='qubit signal', - unit='Majorana number', - setpoints=('freqax',)) + meas.register_custom_parameter(name="freqax", label="Frequency axis", unit="Hz") + meas.register_custom_parameter( + name="signal", + label="qubit signal", + unit="Majorana number", + setpoints=("freqax",), + ) with meas.run(write_in_background=bg_writing) as datasaver: freqax = np.linspace(1e6, 2e6, N) signal = np.random.randn(N) - datasaver.add_result(('freqax', freqax), ('signal', signal)) + datasaver.add_result(("freqax", freqax), ("signal", signal)) assert datasaver.points_written == N ds = datasaver.dataset @@ -724,16 +725,18 @@ def test_datasaver_arrays_lists_tuples(bg_writing, N) -> None: signal = np.random.randn(N - 1) with pytest.raises(ValueError): - datasaver.add_result(('freqax', freqax), ('signal', signal)) + datasaver.add_result(("freqax", freqax), ("signal", signal)) - meas.register_custom_parameter(name='gate_voltage', - label='Gate tuning potential', - unit='V') - meas.unregister_parameter('signal') - meas.register_custom_parameter(name='signal', - label='qubit signal', - unit='Majorana flux', - setpoints=('freqax', 'gate_voltage')) + meas.register_custom_parameter( + name="gate_voltage", label="Gate tuning potential", unit="V" + ) + meas.unregister_parameter("signal") + meas.register_custom_parameter( + name="signal", + label="qubit signal", + unit="Majorana flux", + setpoints=("freqax", "gate_voltage"), + ) # save arrays with meas.run(write_in_background=bg_writing) as datasaver: @@ -781,24 +784,25 @@ def test_datasaver_numeric_and_array_paramtype(bg_writing, N) -> None: Test saving one parameter with 'numeric' paramtype and one parameter with 'array' paramtype """ - new_experiment('firstexp', sample_name='no sample') + new_experiment("firstexp", sample_name="no sample") meas = Measurement() - meas.register_custom_parameter(name='numeric_1', - label='Magnetic field', - unit='T', - paramtype='numeric') - meas.register_custom_parameter(name='array_1', - label='Alazar signal', - unit='V', - paramtype='array', - setpoints=('numeric_1',)) + meas.register_custom_parameter( + name="numeric_1", label="Magnetic field", unit="T", paramtype="numeric" + ) + meas.register_custom_parameter( + name="array_1", + label="Alazar signal", + unit="V", + paramtype="array", + setpoints=("numeric_1",), + ) signal = np.random.randn(113) with meas.run(bg_writing) as datasaver: - datasaver.add_result(('numeric_1', 3.75), ('array_1', signal)) + datasaver.add_result(("numeric_1", 3.75), ("array_1", signal)) assert datasaver.points_written == 1 ds = datasaver.dataset @@ -816,25 +820,26 @@ def test_datasaver_numeric_after_array_paramtype(bg_writing) -> None: Test that passing values for 'array' parameter in `add_result` before passing values for 'numeric' parameter works. """ - new_experiment('firstexp', sample_name='no sample') + new_experiment("firstexp", sample_name="no sample") meas = Measurement() - meas.register_custom_parameter(name='numeric_1', - label='Magnetic field', - unit='T', - paramtype='numeric') - meas.register_custom_parameter(name='array_1', - label='Alazar signal', - unit='V', - paramtype='array', - setpoints=('numeric_1',)) + meas.register_custom_parameter( + name="numeric_1", label="Magnetic field", unit="T", paramtype="numeric" + ) + meas.register_custom_parameter( + name="array_1", + label="Alazar signal", + unit="V", + paramtype="array", + setpoints=("numeric_1",), + ) signal = np.random.randn(113) with meas.run(write_in_background=bg_writing) as datasaver: # it is important that first comes the 'array' data and then 'numeric' - datasaver.add_result(('array_1', signal), ('numeric_1', 3.75)) + datasaver.add_result(("array_1", signal), ("numeric_1", 3.75)) assert datasaver.points_written == 1 ds = datasaver.dataset @@ -850,9 +855,9 @@ def test_datasaver_numeric_after_array_paramtype(bg_writing) -> None: def test_datasaver_foul_input(bg_writing) -> None: meas = Measurement() - meas.register_custom_parameter('foul', - label='something unnatural', - unit='Fahrenheit') + meas.register_custom_parameter( + "foul", label="something unnatural", unit="Fahrenheit" + ) foul_stuff = [Parameter("foul"), {1, 2, 3}] @@ -866,21 +871,22 @@ def test_datasaver_foul_input(bg_writing) -> None: @given(N=hst.integers(min_value=2, max_value=500)) @pytest.mark.usefixtures("empty_temp_db") @pytest.mark.parametrize("bg_writing", [True, False]) -@pytest.mark.parametrize("storage_type", ['numeric', 'array']) +@pytest.mark.parametrize("storage_type", ["numeric", "array"]) def test_datasaver_unsized_arrays(N, storage_type, bg_writing) -> None: - new_experiment('firstexp', sample_name='no sample') + new_experiment("firstexp", sample_name="no sample") meas = Measurement() - meas.register_custom_parameter(name='freqax', - label='Frequency axis', - unit='Hz', - paramtype=storage_type) - meas.register_custom_parameter(name='signal', - label='qubit signal', - unit='Majorana number', - setpoints=('freqax',), - paramtype=storage_type) + meas.register_custom_parameter( + name="freqax", label="Frequency axis", unit="Hz", paramtype=storage_type + ) + meas.register_custom_parameter( + name="signal", + label="qubit signal", + unit="Majorana number", + setpoints=("freqax",), + paramtype=storage_type, + ) # note that np.array(some_number) is not the same as the number # its also not an array with a shape. Check here that we handle it # correctly @@ -893,7 +899,7 @@ def test_datasaver_unsized_arrays(N, storage_type, bg_writing) -> None: assert myfreq.shape == () mysignal = np.array(signal[i]) assert mysignal.shape == () - datasaver.add_result(('freqax', myfreq), ('signal', mysignal)) + datasaver.add_result(("freqax", myfreq), ("signal", mysignal)) assert datasaver.points_written == N ds = datasaver.dataset @@ -904,19 +910,24 @@ def test_datasaver_unsized_arrays(N, storage_type, bg_writing) -> None: expected_signal = np.random.randn(N) expected_freqax = np.linspace(1e6, 2e6, N) - if storage_type == 'array': + if storage_type == "array": expected_freqax = expected_freqax.reshape((N, 1)) expected_signal = expected_signal.reshape((N, 1)) - assert_allclose(loaded_data['freqax'], expected_freqax) - assert_allclose(loaded_data['signal'], expected_signal) + assert_allclose(loaded_data["freqax"], expected_freqax) + assert_allclose(loaded_data["signal"], expected_signal) -@settings(max_examples=5, deadline=None, - suppress_health_check=(HealthCheck.function_scoped_fixture,)) -@given(N=hst.integers(min_value=5, max_value=6), - M=hst.integers(min_value=4, max_value=5), - seed=hst.integers(min_value=0, max_value=np.iinfo(np.uint32).max)) +@settings( + max_examples=5, + deadline=None, + suppress_health_check=(HealthCheck.function_scoped_fixture,), +) +@given( + N=hst.integers(min_value=5, max_value=6), + M=hst.integers(min_value=4, max_value=5), + seed=hst.integers(min_value=0, max_value=np.iinfo(np.uint32).max), +) @pytest.mark.usefixtures("experiment") @pytest.mark.parametrize("bg_writing", [True, False]) @pytest.mark.parametrize("param_type", ["np_array", "tuple", "list"]) @@ -930,15 +941,15 @@ def test_datasaver_arrayparams( numeric """ - if param_type == 'list': + if param_type == "list": spectrum = SpectrumAnalyzer.listspectrum - spectrum_name = 'dummy_SA_listspectrum' - elif param_type == 'tuple': + spectrum_name = "dummy_SA_listspectrum" + elif param_type == "tuple": spectrum = SpectrumAnalyzer.tuplespectrum - spectrum_name = 'dummy_SA_tuplespectrum' - elif param_type == 'np_array': + spectrum_name = "dummy_SA_tuplespectrum" + elif param_type == "np_array": spectrum = SpectrumAnalyzer.spectrum - spectrum_name = 'dummy_SA_spectrum' + spectrum_name = "dummy_SA_spectrum" else: raise RuntimeError("Invalid storage_type") @@ -947,11 +958,11 @@ def test_datasaver_arrayparams( meas.register_parameter(spectrum, paramtype=storage_type) assert len(meas.parameters) == 2 - setpoint_paramspec = meas.parameters['dummy_SA_Frequency'] + setpoint_paramspec = meas.parameters["dummy_SA_Frequency"] spectrum_paramspec = meas.parameters[str(spectrum)] assert setpoint_paramspec in meas._interdeps.dependencies[spectrum_paramspec] assert meas.parameters[str(spectrum)].type == storage_type - assert meas.parameters['dummy_SA_Frequency'].type == storage_type + assert meas.parameters["dummy_SA_Frequency"].type == storage_type # Now for a real measurement @@ -967,21 +978,19 @@ def test_datasaver_arrayparams( np.random.seed(seed) with meas.run(write_in_background=bg_writing) as datasaver: for set_v in np.linspace(0, 0.01, N): - datasaver.add_result((DAC.ch1, set_v), - (spectrum, spectrum.get())) + datasaver.add_result((DAC.ch1, set_v), (spectrum, spectrum.get())) - if storage_type == 'numeric': + if storage_type == "numeric": assert datasaver.points_written == N * M - elif storage_type == 'array': + elif storage_type == "array": assert datasaver.points_written == N np.random.seed(seed) expected_dac_data = np.repeat(np.linspace(0, 0.01, N), M) expected_freq_axis = np.tile(spectrum.setpoints[0], N) - expected_output = np.array([spectrum.get() for _ in range(N)]).reshape( - N * M) + expected_output = np.array([spectrum.get() for _ in range(N)]).reshape(N * M) - if storage_type == 'array': + if storage_type == "array": expected_dac_data = expected_dac_data.reshape(N, M) expected_freq_axis = expected_freq_axis.reshape(N, M) expected_output = expected_output.reshape(N, M) @@ -990,16 +999,19 @@ def test_datasaver_arrayparams( assert isinstance(ds, DataSet) data = ds.get_parameter_data()[spectrum_name] - assert_allclose(data['dummy_dac_ch1'], expected_dac_data) - assert_allclose(data['dummy_SA_Frequency'], expected_freq_axis) + assert_allclose(data["dummy_dac_ch1"], expected_dac_data) + assert_allclose(data["dummy_SA_Frequency"], expected_freq_axis) assert_allclose(data[spectrum_name], expected_output) -@settings(max_examples=5, deadline=None, - suppress_health_check=(HealthCheck.function_scoped_fixture,)) +@settings( + max_examples=5, + deadline=None, + suppress_health_check=(HealthCheck.function_scoped_fixture,), +) @given(N=hst.integers(min_value=5, max_value=500)) @pytest.mark.parametrize("bg_writing", [True, False]) -@pytest.mark.parametrize("storage_type", ['numeric', 'array']) +@pytest.mark.parametrize("storage_type", ["numeric", "array"]) @pytest.mark.usefixtures("experiment") def test_datasaver_array_parameters_channel( channel_array_instrument, DAC, N, storage_type, bg_writing @@ -1011,7 +1023,7 @@ def test_datasaver_array_parameters_channel( meas.register_parameter(array_param, paramtype=storage_type) assert len(meas.parameters) == 2 - dependency_name = 'dummy_channel_inst_ChanA_array_setpoint_param_this_setpoint' + dependency_name = "dummy_channel_inst_ChanA_array_setpoint_param_this_setpoint" dep_paramspec = meas.parameters[dependency_name] array_paramspec = meas.parameters[str(array_param)] assert dep_paramspec in meas._interdeps.dependencies[array_paramspec] @@ -1031,36 +1043,42 @@ def test_datasaver_array_parameters_channel( with meas.run(write_in_background=bg_writing) as datasaver: for set_v in np.linspace(0, 0.01, N): - datasaver.add_result((DAC.ch1, set_v), - (array_param, array_param.get())) - if storage_type == 'numeric': + datasaver.add_result((DAC.ch1, set_v), (array_param, array_param.get())) + if storage_type == "numeric": n_points_written_expected = N * M - elif storage_type == 'array': + elif storage_type == "array": n_points_written_expected = N else: raise RuntimeError("Unknown storage_type") assert datasaver.points_written == n_points_written_expected - expected_params = ('dummy_dac_ch1', - dependency_name, - 'dummy_channel_inst_ChanA_dummy_array_parameter') + expected_params = ( + "dummy_dac_ch1", + dependency_name, + "dummy_channel_inst_ChanA_dummy_array_parameter", + ) ds = load_by_id(datasaver.run_id) assert isinstance(ds, DataSet) - loaded_data = ds.get_parameter_data()['dummy_channel_inst_ChanA_dummy_array_parameter'] + loaded_data = ds.get_parameter_data()[ + "dummy_channel_inst_ChanA_dummy_array_parameter" + ] for param in expected_params: - if storage_type == 'array': + if storage_type == "array": expected_shape: tuple[int, ...] = (N, M) else: - expected_shape = (N*M, ) + expected_shape = (N * M,) assert loaded_data[param].shape == expected_shape -@settings(max_examples=5, deadline=None, - suppress_health_check=(HealthCheck.function_scoped_fixture,)) +@settings( + max_examples=5, + deadline=None, + suppress_health_check=(HealthCheck.function_scoped_fixture,), +) @given(n=hst.integers(min_value=5, max_value=500)) @pytest.mark.parametrize("bg_writing", [True, False]) -@pytest.mark.parametrize("storage_type", ['numeric', 'array']) +@pytest.mark.parametrize("storage_type", ["numeric", "array"]) @pytest.mark.usefixtures("experiment") def test_datasaver_parameter_with_setpoints( channel_array_instrument, DAC, n, storage_type, bg_writing @@ -1075,7 +1093,7 @@ def test_datasaver_parameter_with_setpoints( meas.register_parameter(param, paramtype=storage_type) assert len(meas.parameters) == 2 - dependency_name = 'dummy_channel_inst_ChanA_dummy_sp_axis' + dependency_name = "dummy_channel_inst_ChanA_dummy_sp_axis" dep_ps = meas.parameters[dependency_name] param_ps = meas.parameters[str(param)] @@ -1090,52 +1108,58 @@ def test_datasaver_parameter_with_setpoints( # so we can test that we get the expected numbers np.random.seed(random_seed) datasaver.add_result((param, param.get())) - if storage_type == 'numeric': + if storage_type == "numeric": expected_points_written = n - elif storage_type == 'array': + elif storage_type == "array": expected_points_written = 1 else: raise RuntimeError("Unknown storage_type") assert datasaver.points_written == expected_points_written - expected_params = (dependency_name, - 'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints') + expected_params = ( + dependency_name, + "dummy_channel_inst_ChanA_dummy_parameter_with_setpoints", + ) ds = load_by_id(datasaver.run_id) assert isinstance(ds, DataSet) loaded_data = ds.get_parameter_data() for param in expected_params: - data = loaded_data['dummy_channel_inst_ChanA_dummy_parameter_with_setpoints'][param] - if storage_type == 'array': + data = loaded_data["dummy_channel_inst_ChanA_dummy_parameter_with_setpoints"][ + param + ] + if storage_type == "array": assert data.shape == (expected_points_written, n) else: assert data.shape == (expected_points_written,) assert len(loaded_data) == 1 - subdata = loaded_data[ - 'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints'] + subdata = loaded_data["dummy_channel_inst_ChanA_dummy_parameter_with_setpoints"] - expected_dep_data = np.linspace(chan.dummy_start(), - chan.dummy_stop(), - chan.dummy_n_points()) + expected_dep_data = np.linspace( + chan.dummy_start(), chan.dummy_stop(), chan.dummy_n_points() + ) np.random.seed(random_seed) expected_data = np.random.rand(n) - if storage_type == 'array': - expected_dep_data = expected_dep_data.reshape((1, - chan.dummy_n_points())) + if storage_type == "array": + expected_dep_data = expected_dep_data.reshape((1, chan.dummy_n_points())) expected_data = expected_data.reshape((1, chan.dummy_n_points())) assert_allclose(subdata[dependency_name], expected_dep_data) - assert_allclose(subdata['dummy_channel_inst_ChanA_' - 'dummy_parameter_with_setpoints'], - expected_data) + assert_allclose( + subdata["dummy_channel_inst_ChanA_dummy_parameter_with_setpoints"], + expected_data, + ) -@settings(max_examples=5, deadline=None, - suppress_health_check=(HealthCheck.function_scoped_fixture,)) +@settings( + max_examples=5, + deadline=None, + suppress_health_check=(HealthCheck.function_scoped_fixture,), +) @given(n=hst.integers(min_value=5, max_value=500)) @pytest.mark.parametrize("bg_writing", [True, False]) -@pytest.mark.parametrize("storage_type", ['numeric', 'array']) +@pytest.mark.parametrize("storage_type", ["numeric", "array"]) @pytest.mark.usefixtures("experiment") def test_datasaver_parameter_with_setpoints_explicitly_expanded( channel_array_instrument, DAC, n, storage_type, bg_writing @@ -1150,7 +1174,7 @@ def test_datasaver_parameter_with_setpoints_explicitly_expanded( meas.register_parameter(param, paramtype=storage_type) assert len(meas.parameters) == 2 - dependency_name = 'dummy_channel_inst_ChanA_dummy_sp_axis' + dependency_name = "dummy_channel_inst_ChanA_dummy_sp_axis" dep_ps = meas.parameters[dependency_name] param_ps = meas.parameters[str(param)] @@ -1165,45 +1189,48 @@ def test_datasaver_parameter_with_setpoints_explicitly_expanded( # so we can test that we get the expected numbers np.random.seed(random_seed) datasaver.add_result(*expand_setpoints_helper(param)) - if storage_type == 'numeric': + if storage_type == "numeric": expected_points_written = n - elif storage_type == 'array': + elif storage_type == "array": expected_points_written = 1 else: raise RuntimeError("Unknown storage_type") assert datasaver.points_written == expected_points_written - expected_params = (dependency_name, - 'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints') + expected_params = ( + dependency_name, + "dummy_channel_inst_ChanA_dummy_parameter_with_setpoints", + ) ds = load_by_id(datasaver.run_id) assert isinstance(ds, DataSet) loaded_data = ds.get_parameter_data() for param in expected_params: - data = loaded_data['dummy_channel_inst_ChanA_dummy_parameter_with_setpoints'][param] - if storage_type == 'array': + data = loaded_data["dummy_channel_inst_ChanA_dummy_parameter_with_setpoints"][ + param + ] + if storage_type == "array": assert data.shape == (expected_points_written, n) else: assert data.shape == (expected_points_written,) assert len(loaded_data) == 1 - subdata = loaded_data[ - 'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints'] + subdata = loaded_data["dummy_channel_inst_ChanA_dummy_parameter_with_setpoints"] - expected_dep_data = np.linspace(chan.dummy_start(), - chan.dummy_stop(), - chan.dummy_n_points()) + expected_dep_data = np.linspace( + chan.dummy_start(), chan.dummy_stop(), chan.dummy_n_points() + ) np.random.seed(random_seed) expected_data = np.random.rand(n) - if storage_type == 'array': - expected_dep_data = expected_dep_data.reshape((1, - chan.dummy_n_points())) + if storage_type == "array": + expected_dep_data = expected_dep_data.reshape((1, chan.dummy_n_points())) expected_data = expected_data.reshape((1, chan.dummy_n_points())) assert_allclose(subdata[dependency_name], expected_dep_data) - assert_allclose(subdata['dummy_channel_inst_ChanA_' - 'dummy_parameter_with_setpoints'], - expected_data) + assert_allclose( + subdata["dummy_channel_inst_ChanA_dummy_parameter_with_setpoints"], + expected_data, + ) @pytest.mark.usefixtures("experiment") @@ -1225,7 +1252,7 @@ def test_datasaver_parameter_with_setpoints_partially_expanded_raises( sp_param_1 = chan.dummy_sp_axis assert len(meas.parameters) == 3 - dependency_name = 'dummy_channel_inst_ChanA_dummy_sp_axis' + dependency_name = "dummy_channel_inst_ChanA_dummy_sp_axis" dep_ps = meas.parameters[dependency_name] param_ps = meas.parameters[str(param)] @@ -1237,13 +1264,15 @@ def test_datasaver_parameter_with_setpoints_partially_expanded_raises( # so we can test that we get the expected numbers np.random.seed(random_seed) with pytest.raises(ValueError, match="Some of the setpoints of"): - datasaver.add_result((param, param.get()), - (sp_param_1, sp_param_1.get())) + datasaver.add_result((param, param.get()), (sp_param_1, sp_param_1.get())) @pytest.mark.parametrize("bg_writing", [True, False]) -@settings(max_examples=5, deadline=None, - suppress_health_check=(HealthCheck.function_scoped_fixture,)) +@settings( + max_examples=5, + deadline=None, + suppress_health_check=(HealthCheck.function_scoped_fixture,), +) @given(n=hst.integers(min_value=5, max_value=500)) @pytest.mark.usefixtures("experiment") def test_datasaver_parameter_with_setpoints_complex( @@ -1256,20 +1285,21 @@ def test_datasaver_parameter_with_setpoints_complex( chan.dummy_start(0) chan.dummy_stop(100) meas = Measurement() - meas.register_parameter(param, paramtype='array') + meas.register_parameter(param, paramtype="array") assert len(meas.parameters) == 2 - dependency_name = 'dummy_channel_inst_ChanA_dummy_sp_axis' + dependency_name = "dummy_channel_inst_ChanA_dummy_sp_axis" dependent_parameter = meas.parameters[str(param)] indepdendent_parameter = meas.parameters[dependency_name] - assert meas._interdeps.dependencies[dependent_parameter] \ - == (indepdendent_parameter, ) + assert meas._interdeps.dependencies[dependent_parameter] == ( + indepdendent_parameter, + ) - assert dependent_parameter.type == 'array' - assert indepdendent_parameter.type == 'array' + assert dependent_parameter.type == "array" + assert indepdendent_parameter.type == "array" # Now for a real measurement with meas.run(write_in_background=bg_writing) as datasaver: @@ -1284,20 +1314,27 @@ def test_datasaver_parameter_with_setpoints_complex( datadict = ds.get_parameter_data() assert len(datadict) == 1 subdata = datadict[ - 'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints_complex'] - assert_allclose(subdata[dependency_name], - np.linspace(chan.dummy_start(), - chan.dummy_stop(), - chan.dummy_n_points()).reshape(1, chan.dummy_n_points())) + "dummy_channel_inst_ChanA_dummy_parameter_with_setpoints_complex" + ] + assert_allclose( + subdata[dependency_name], + np.linspace( + chan.dummy_start(), chan.dummy_stop(), chan.dummy_n_points() + ).reshape(1, chan.dummy_n_points()), + ) np.random.seed(random_seed) - assert_allclose(subdata['dummy_channel_inst_ChanA_' - 'dummy_parameter_with_setpoints_complex'], - (np.random.rand(n) + 1j * np.random.rand(n)).reshape(1, chan.dummy_n_points())) + assert_allclose( + subdata["dummy_channel_inst_ChanA_dummy_parameter_with_setpoints_complex"], + (np.random.rand(n) + 1j * np.random.rand(n)).reshape(1, chan.dummy_n_points()), + ) @pytest.mark.parametrize("bg_writing", [True, False]) -@settings(max_examples=5, deadline=None, - suppress_health_check=(HealthCheck.function_scoped_fixture,)) +@settings( + max_examples=5, + deadline=None, + suppress_health_check=(HealthCheck.function_scoped_fixture,), +) @given(n=hst.integers(min_value=5, max_value=500)) @pytest.mark.usefixtures("experiment") def test_datasaver_parameter_with_setpoints_complex_explicitly_expanded( @@ -1310,20 +1347,21 @@ def test_datasaver_parameter_with_setpoints_complex_explicitly_expanded( chan.dummy_start(0) chan.dummy_stop(100) meas = Measurement() - meas.register_parameter(param, paramtype='array') + meas.register_parameter(param, paramtype="array") assert len(meas.parameters) == 2 - dependency_name = 'dummy_channel_inst_ChanA_dummy_sp_axis' + dependency_name = "dummy_channel_inst_ChanA_dummy_sp_axis" dependent_parameter = meas.parameters[str(param)] indepdendent_parameter = meas.parameters[dependency_name] - assert meas._interdeps.dependencies[dependent_parameter] \ - == (indepdendent_parameter, ) + assert meas._interdeps.dependencies[dependent_parameter] == ( + indepdendent_parameter, + ) - assert dependent_parameter.type == 'array' - assert indepdendent_parameter.type == 'array' + assert dependent_parameter.type == "array" + assert indepdendent_parameter.type == "array" # Now for a real measurement with meas.run(write_in_background=bg_writing) as datasaver: @@ -1338,19 +1376,23 @@ def test_datasaver_parameter_with_setpoints_complex_explicitly_expanded( datadict = ds.get_parameter_data() assert len(datadict) == 1 subdata = datadict[ - 'dummy_channel_inst_ChanA_dummy_parameter_with_setpoints_complex'] - assert_allclose(subdata[dependency_name], - np.linspace(chan.dummy_start(), - chan.dummy_stop(), - chan.dummy_n_points()).reshape(1, chan.dummy_n_points())) + "dummy_channel_inst_ChanA_dummy_parameter_with_setpoints_complex" + ] + assert_allclose( + subdata[dependency_name], + np.linspace( + chan.dummy_start(), chan.dummy_stop(), chan.dummy_n_points() + ).reshape(1, chan.dummy_n_points()), + ) np.random.seed(random_seed) - assert_allclose(subdata['dummy_channel_inst_ChanA_' - 'dummy_parameter_with_setpoints_complex'], - (np.random.rand(n) + 1j * np.random.rand(n)).reshape(1, chan.dummy_n_points())) + assert_allclose( + subdata["dummy_channel_inst_ChanA_dummy_parameter_with_setpoints_complex"], + (np.random.rand(n) + 1j * np.random.rand(n)).reshape(1, chan.dummy_n_points()), + ) @pytest.mark.parametrize("bg_writing", [True, False]) -@pytest.mark.parametrize("storage_type", ['numeric', 'array']) +@pytest.mark.parametrize("storage_type", ["numeric", "array"]) @pytest.mark.usefixtures("experiment") def test_datasaver_parameter_with_setpoints_missing_reg_raises( channel_array_instrument, DAC, storage_type, bg_writing @@ -1372,24 +1414,28 @@ def test_datasaver_parameter_with_setpoints_missing_reg_raises( param.setpoints = old_setpoints with meas.run(write_in_background=bg_writing) as datasaver: - sp_param_name = 'dummy_channel_inst_ChanA_dummy_sp_axis' - match = re.escape('Can not add result for parameter ' - f'{sp_param_name}, no such parameter registered ' - 'with this measurement.') + sp_param_name = "dummy_channel_inst_ChanA_dummy_sp_axis" + match = re.escape( + "Can not add result for parameter " + f"{sp_param_name}, no such parameter registered " + "with this measurement." + ) with pytest.raises(ValueError, match=match): datasaver.add_result(*expand_setpoints_helper(param)) with meas.run(write_in_background=bg_writing) as datasaver: - sp_param_name = 'dummy_channel_inst_ChanA_dummy_sp_axis' - match = re.escape('Can not add result for parameter ' - f'{sp_param_name}, no such parameter registered ' - 'with this measurement.') + sp_param_name = "dummy_channel_inst_ChanA_dummy_sp_axis" + match = re.escape( + "Can not add result for parameter " + f"{sp_param_name}, no such parameter registered " + "with this measurement." + ) with pytest.raises(ValueError, match=match): datasaver.add_result((param, param.get())) @pytest.mark.parametrize("bg_writing", [True, False]) -@pytest.mark.parametrize("storage_type", ['numeric', 'array']) +@pytest.mark.parametrize("storage_type", ["numeric", "array"]) @pytest.mark.usefixtures("experiment") def test_datasaver_parameter_with_setpoints_reg_but_missing_validator( channel_array_instrument, DAC, storage_type, bg_writing @@ -1442,7 +1488,7 @@ def test_datasaver_parameter_with_setpoints_reg_but_missing_validator( @pytest.mark.parametrize("bg_writing", [True, False]) -@pytest.mark.parametrize("storage_type", ['numeric', 'array']) +@pytest.mark.parametrize("storage_type", ["numeric", "array"]) @pytest.mark.usefixtures("experiment") def test_datasaver_parameter_with_setpoints_reg_but_missing( channel_array_instrument, DAC, storage_type, bg_writing @@ -1467,23 +1513,24 @@ def test_datasaver_parameter_with_setpoints_reg_but_missing( param.setpoints = old_setpoints with meas.run(write_in_background=bg_writing) as datasaver: - match = re.escape('Can not add result, some required parameters ' - 'are missing.') + match = re.escape("Can not add result, some required parameters are missing.") with pytest.raises(ValueError, match=match): datasaver.add_result(*expand_setpoints_helper(param)) with meas.run(write_in_background=bg_writing) as datasaver: - match = re.escape('Can not add result, some required parameters ' - 'are missing.') + match = re.escape("Can not add result, some required parameters are missing.") with pytest.raises(ValueError, match=match): datasaver.add_result((param, param.get())) -@settings(max_examples=5, deadline=None, - suppress_health_check=(HealthCheck.function_scoped_fixture,)) +@settings( + max_examples=5, + deadline=None, + suppress_health_check=(HealthCheck.function_scoped_fixture,), +) @given(N=hst.integers(min_value=5, max_value=500)) @pytest.mark.usefixtures("experiment") -@pytest.mark.parametrize("storage_type", ['numeric', 'array']) +@pytest.mark.parametrize("storage_type", ["numeric", "array"]) @pytest.mark.parametrize("bg_writing", [True, False]) def test_datasaver_array_parameters_array( channel_array_instrument, DAC, N, storage_type, bg_writing @@ -1498,7 +1545,7 @@ def test_datasaver_array_parameters_array( meas.register_parameter(array_param, paramtype=storage_type) assert len(meas.parameters) == 2 - dependency_name = 'dummy_channel_inst_ChanA_array_setpoint_param_this_setpoint' + dependency_name = "dummy_channel_inst_ChanA_array_setpoint_param_this_setpoint" dependency_ps = meas.parameters[dependency_name] array_param_ps = meas.parameters[str(array_param)] assert dependency_ps in meas._interdeps.dependencies[array_param_ps] @@ -1509,7 +1556,7 @@ def test_datasaver_array_parameters_array( meas = Measurement() - meas.register_parameter(DAC.ch1, paramtype='numeric') + meas.register_parameter(DAC.ch1, paramtype="numeric") meas.register_parameter(array_param, setpoints=[DAC.ch1], paramtype=storage_type) assert len(meas.parameters) == 3 @@ -1518,12 +1565,11 @@ def test_datasaver_array_parameters_array( dac_datapoints = np.linspace(0, 0.01, N) with meas.run(write_in_background=bg_writing) as datasaver: for set_v in dac_datapoints: - datasaver.add_result((DAC.ch1, set_v), - (array_param, array_param.get())) + datasaver.add_result((DAC.ch1, set_v), (array_param, array_param.get())) - if storage_type == 'numeric': - expected_npoints = N*M - elif storage_type == 'array': + if storage_type == "numeric": + expected_npoints = N * M + elif storage_type == "array": expected_npoints = N else: raise RuntimeError("Unknown storage_type") @@ -1531,41 +1577,45 @@ def test_datasaver_array_parameters_array( assert datasaver.points_written == expected_npoints ds = load_by_id(datasaver.run_id) assert isinstance(ds, DataSet) - loaded_data = ds.get_parameter_data()['dummy_channel_inst_ChanA_dummy_array_parameter'] + loaded_data = ds.get_parameter_data()[ + "dummy_channel_inst_ChanA_dummy_array_parameter" + ] - data_num = loaded_data['dummy_dac_ch1'] + data_num = loaded_data["dummy_dac_ch1"] assert len(data_num) == expected_npoints setpoint_arrays = loaded_data[dependency_name] - data_arrays = loaded_data['dummy_channel_inst_ChanA_dummy_array_parameter'] + data_arrays = loaded_data["dummy_channel_inst_ChanA_dummy_array_parameter"] assert len(setpoint_arrays) == expected_npoints assert len(data_arrays) == expected_npoints expected_dac_data = np.repeat(np.linspace(0, 0.01, N), M) expected_sp_data = np.tile(array_param.setpoints[0], N) - expected_output = np.array([array_param.get() for _ in range(N)]).reshape( - N * M) + expected_output = np.array([array_param.get() for _ in range(N)]).reshape(N * M) - if storage_type == 'array': + if storage_type == "array": expected_dac_data = expected_dac_data.reshape(N, M) expected_sp_data = expected_sp_data.reshape(N, M) expected_output = expected_output.reshape(N, M) - assert_allclose(loaded_data['dummy_dac_ch1'], expected_dac_data) - assert_allclose(loaded_data[dependency_name], - expected_sp_data) - assert_allclose(loaded_data['dummy_channel_inst_ChanA_dummy_array_parameter'], - expected_output) + assert_allclose(loaded_data["dummy_dac_ch1"], expected_dac_data) + assert_allclose(loaded_data[dependency_name], expected_sp_data) + assert_allclose( + loaded_data["dummy_channel_inst_ChanA_dummy_array_parameter"], expected_output + ) - if storage_type == 'array': + if storage_type == "array": for data_array, setpoint_array in zip(data_arrays, setpoint_arrays): assert_array_equal(setpoint_array, np.linspace(5, 9, 5)) - assert_array_equal(data_array, np.array([2., 2., 2., 2., 2.])) + assert_array_equal(data_array, np.array([2.0, 2.0, 2.0, 2.0, 2.0])) @pytest.mark.parametrize("bg_writing", [True, False]) -@settings(max_examples=5, deadline=None, - suppress_health_check=(HealthCheck.function_scoped_fixture,)) +@settings( + max_examples=5, + deadline=None, + suppress_health_check=(HealthCheck.function_scoped_fixture,), +) @given(N=hst.integers(min_value=5, max_value=500)) @pytest.mark.usefixtures("experiment") def test_datasaver_complex_array_parameters_array( @@ -1579,26 +1629,27 @@ def test_datasaver_complex_array_parameters_array( array_param = channel_array_instrument.A.dummy_complex_array_parameter - meas.register_parameter(array_param, paramtype='array') + meas.register_parameter(array_param, paramtype="array") assert len(meas.parameters) == 2 - dependency_name = 'dummy_channel_inst_ChanA_this_setpoint' + dependency_name = "dummy_channel_inst_ChanA_this_setpoint" dependent_parameter = meas.parameters[str(array_param)] indepdendent_parameter = meas.parameters[dependency_name] - assert meas._interdeps.dependencies[dependent_parameter] \ - == (indepdendent_parameter, ) + assert meas._interdeps.dependencies[dependent_parameter] == ( + indepdendent_parameter, + ) - assert dependent_parameter.type == 'array' - assert indepdendent_parameter.type == 'array' + assert dependent_parameter.type == "array" + assert indepdendent_parameter.type == "array" # Now for a real measurement meas = Measurement() - meas.register_parameter(DAC.ch1, paramtype='numeric') - meas.register_parameter(array_param, setpoints=[DAC.ch1], paramtype='array') + meas.register_parameter(DAC.ch1, paramtype="numeric") + meas.register_parameter(array_param, setpoints=[DAC.ch1], paramtype="array") assert len(meas.parameters) == 3 @@ -1606,25 +1657,26 @@ def test_datasaver_complex_array_parameters_array( dac_datapoints = np.linspace(0, 0.01, N) with meas.run(write_in_background=bg_writing) as datasaver: for set_v in dac_datapoints: - datasaver.add_result((DAC.ch1, set_v), - (array_param, array_param.get())) + datasaver.add_result((DAC.ch1, set_v), (array_param, array_param.get())) assert datasaver.points_written == N ds = load_by_id(datasaver.run_id) assert isinstance(ds, DataSet) - loaded_data = ds.get_parameter_data()["dummy_channel_inst_ChanA_dummy_complex_array_parameter"] - data_num = loaded_data['dummy_dac_ch1'] + loaded_data = ds.get_parameter_data()[ + "dummy_channel_inst_ChanA_dummy_complex_array_parameter" + ] + data_num = loaded_data["dummy_dac_ch1"] assert data_num.shape == (N, M) - param_name = 'dummy_channel_inst_ChanA_dummy_complex_array_parameter' + param_name = "dummy_channel_inst_ChanA_dummy_complex_array_parameter" - setpoint_arrays = loaded_data['dummy_channel_inst_ChanA_this_setpoint'] + setpoint_arrays = loaded_data["dummy_channel_inst_ChanA_this_setpoint"] data_arrays = loaded_data[param_name] assert setpoint_arrays.shape == (N, M) assert data_arrays.shape == (N, M) for data_array, setpoint_array in zip(data_arrays, setpoint_arrays): assert_array_equal(setpoint_array, np.linspace(5, 9, 5)) - assert_array_equal(data_array, np.arange(5) - 1j*np.arange(5)) + assert_array_equal(data_array, np.arange(5) - 1j * np.arange(5)) @pytest.mark.parametrize("bg_writing", [True, False]) @@ -1643,20 +1695,24 @@ def test_datasaver_multidim_array(experiment, bg_writing) -> None: # noqa: F811 y1 = ManualParameter("y1") y2 = ManualParameter("y2") - meas.register_parameter(x1, paramtype='array') - meas.register_parameter(x2, paramtype='array') - meas.register_parameter(y1, setpoints=[x1, x2], paramtype='array') - meas.register_parameter(y2, setpoints=[x1, x2], paramtype='array') + meas.register_parameter(x1, paramtype="array") + meas.register_parameter(x2, paramtype="array") + meas.register_parameter(y1, setpoints=[x1, x2], paramtype="array") + meas.register_parameter(y2, setpoints=[x1, x2], paramtype="array") data = np.random.rand(4, size1, size2) - expected = {'x1': data[0, :, :], - 'x2': data[1, :, :], - 'y1': data[2, :, :], - 'y2': data[3, :, :]} + expected = { + "x1": data[0, :, :], + "x2": data[1, :, :], + "y1": data[2, :, :], + "y2": data[3, :, :], + } with meas.run(write_in_background=bg_writing) as datasaver: - datasaver.add_result((str(x1), expected['x1']), - (str(x2), expected['x2']), - (str(y1), expected['y1']), - (str(y2), expected['y2'])) + datasaver.add_result( + (str(x1), expected["x1"]), + (str(x2), expected["x2"]), + (str(y1), expected["y1"]), + (str(y2), expected["y2"]), + ) # We expect one "point" i.e. row in the DB to be written per top-level # parameter. @@ -1664,8 +1720,8 @@ def test_datasaver_multidim_array(experiment, bg_writing) -> None: # noqa: F811 dataset = load_by_id(datasaver.run_id) assert isinstance(dataset, DataSet) loaded_data = dataset.get_parameter_data() - for outerid in ('y1', 'y2'): - for innerid in ('x1', 'x2', outerid): + for outerid in ("y1", "y2"): + for innerid in ("x1", "x2", outerid): mydata = loaded_data[outerid][innerid] assert mydata.shape == (1, size1, size2) assert_array_equal(mydata[0], expected[innerid]) @@ -1689,15 +1745,17 @@ def test_datasaver_export( y1 = ManualParameter("y1") y2 = ManualParameter("y2") - meas.register_parameter(x1, paramtype='array') - meas.register_parameter(x2, paramtype='array') - meas.register_parameter(y1, setpoints=[x1, x2], paramtype='array') - meas.register_parameter(y2, setpoints=[x1, x2], paramtype='array') + meas.register_parameter(x1, paramtype="array") + meas.register_parameter(x2, paramtype="array") + meas.register_parameter(y1, setpoints=[x1, x2], paramtype="array") + meas.register_parameter(y2, setpoints=[x1, x2], paramtype="array") data = np.random.rand(4, size1, size2) - expected = {'x1': data[0, :, :], - 'x2': data[1, :, :], - 'y1': data[2, :, :], - 'y2': data[3, :, :]} + expected = { + "x1": data[0, :, :], + "x2": data[1, :, :], + "y1": data[2, :, :], + "y2": data[3, :, :], + } tmp_path = tmp_path_factory.mktemp("export_from_config") path = str(tmp_path) @@ -1726,7 +1784,6 @@ def test_datasaver_export( assert os.listdir(path) == [expected_filename] if export_type == DataExportType.NETCDF: - xr_ds = xr.open_dataset(os.path.join(path, expected_filename)) assert xr_ds.attrs["metadata_added_after_export"] == 69 else: @@ -1748,25 +1805,27 @@ def test_datasaver_multidim_numeric(experiment, bg_writing) -> None: {name: i for i, name in zip(range(4), ["x1", "x2", "y1", "y2"])} - meas.register_parameter(x1, paramtype='numeric') - meas.register_parameter(x2, paramtype='numeric') - meas.register_parameter(y1, setpoints=[x1, x2], paramtype='numeric') - meas.register_parameter(y2, setpoints=[x1, x2], paramtype='numeric') + meas.register_parameter(x1, paramtype="numeric") + meas.register_parameter(x2, paramtype="numeric") + meas.register_parameter(y1, setpoints=[x1, x2], paramtype="numeric") + meas.register_parameter(y2, setpoints=[x1, x2], paramtype="numeric") data = np.random.rand(4, size1, size2) with meas.run(write_in_background=bg_writing) as datasaver: - datasaver.add_result((str(x1), data[0, :, :]), - (str(x2), data[1, :, :]), - (str(y1), data[2, :, :]), - (str(y2), data[3, :, :])) + datasaver.add_result( + (str(x1), data[0, :, :]), + (str(x2), data[1, :, :]), + (str(y1), data[2, :, :]), + (str(y2), data[3, :, :]), + ) # The factor of 2 is due to there being 2 top-level params assert datasaver.points_written == 2 * (size1 * size2) dataset = load_by_id(datasaver.run_id) assert isinstance(dataset, DataSet) all_data = dataset.get_parameter_data() - for outer in ('y1', 'y2'): - for inner in ('x1', 'x2', outer): + for outer in ("y1", "y2"): + for inner in ("x1", "x2", outer): mydata = all_data[outer][inner] - assert mydata.shape == (size1 * size2, ) + assert mydata.shape == (size1 * size2,) assert mydata.dtype == np.float64 @@ -1780,7 +1839,7 @@ def test_datasaver_multidimarrayparameter_as_array( """ array_param = SpectrumAnalyzer.multidimspectrum meas = Measurement() - meas.register_parameter(array_param, paramtype='array') + meas.register_parameter(array_param, paramtype="array") assert len(meas.parameters) == 4 inserted_data = array_param.get() with meas.run(write_in_background=bg_writing) as datasaver: @@ -1792,9 +1851,9 @@ def test_datasaver_multidimarrayparameter_as_array( expected_shape = (1, 100, 50, 20) loaded_data = ds.get_parameter_data() for i in range(3): - data = loaded_data['dummy_SA_multidimspectrum'][f'dummy_SA_Frequency{i}'] + data = loaded_data["dummy_SA_multidimspectrum"][f"dummy_SA_Frequency{i}"] aux_shape = list(expected_shape) - aux_shape.pop(i+1) + aux_shape.pop(i + 1) assert data.shape == expected_shape for j in range(aux_shape[1]): @@ -1809,10 +1868,12 @@ def test_datasaver_multidimarrayparameter_as_array( else: raise RuntimeError("Unknown dim") - assert_array_equal(mydata, - np.linspace(array_param.start, - array_param.stop, - array_param.npts[i])) + assert_array_equal( + mydata, + np.linspace( + array_param.start, array_param.stop, array_param.npts[i] + ), + ) @pytest.mark.parametrize("bg_writing", [True, False]) @@ -1827,7 +1888,7 @@ def test_datasaver_multidimarrayparameter_as_numeric( array_param = SpectrumAnalyzer.multidimspectrum meas = Measurement() - meas.register_parameter(array_param, paramtype='numeric') + meas.register_parameter(array_param, paramtype="numeric") dims = len(array_param.shape) assert len(meas.parameters) == dims + 1 @@ -1841,23 +1902,24 @@ def test_datasaver_multidimarrayparameter_as_numeric( assert isinstance(ds, DataSet) # check setpoints - expected_setpoints_vectors = (np.linspace(array_param.start, - array_param.stop, - array_param.npts[i]) for i in - range(dims)) - expected_setpoints_matrix = np.meshgrid(*expected_setpoints_vectors, - indexing='ij') + expected_setpoints_vectors = ( + np.linspace(array_param.start, array_param.stop, array_param.npts[i]) + for i in range(dims) + ) + expected_setpoints_matrix = np.meshgrid(*expected_setpoints_vectors, indexing="ij") expected_setpoints = tuple( - setpoint_array.ravel() for setpoint_array in expected_setpoints_matrix) + setpoint_array.ravel() for setpoint_array in expected_setpoints_matrix + ) loaded_data = ds.get_parameter_data() for i in range(dims): - data = loaded_data['dummy_SA_multidimspectrum'][f'dummy_SA_Frequency{i}'] + data = loaded_data["dummy_SA_multidimspectrum"][f"dummy_SA_Frequency{i}"] assert len(data) == points_expected - assert_allclose(data.squeeze(), - expected_setpoints[i]) - data = loaded_data['dummy_SA_multidimspectrum']['dummy_SA_multidimspectrum'].squeeze() + assert_allclose(data.squeeze(), expected_setpoints[i]) + data = loaded_data["dummy_SA_multidimspectrum"][ + "dummy_SA_multidimspectrum" + ].squeeze() assert_allclose(data, inserted_data.ravel()) @@ -1881,8 +1943,12 @@ def test_datasaver_multi_parameters_scalar( assert datasaver.points_written == 2 ds = load_by_id(datasaver.run_id) assert isinstance(ds, DataSet) - assert ds.get_parameter_data()['dummy_channel_inst_ChanA_thisparam']['dummy_channel_inst_ChanA_thisparam'] == np.array([[0]]) - assert ds.get_parameter_data()['dummy_channel_inst_ChanA_thatparam']['dummy_channel_inst_ChanA_thatparam'] == np.array([[1]]) + assert ds.get_parameter_data()["dummy_channel_inst_ChanA_thisparam"][ + "dummy_channel_inst_ChanA_thisparam" + ] == np.array([[0]]) + assert ds.get_parameter_data()["dummy_channel_inst_ChanA_thatparam"][ + "dummy_channel_inst_ChanA_thatparam" + ] == np.array([[1]]) @pytest.mark.parametrize("bg_writing", [True, False]) @@ -1895,9 +1961,11 @@ def test_datasaver_multi_parameters_array(channel_array_instrument, bg_writing) param = channel_array_instrument.A.dummy_multi_parameter meas.register_parameter(param) assert len(meas.parameters) == 3 # two params + 1D identical setpoints - param_names = ('dummy_channel_inst_ChanA_multi_setpoint_param_this_setpoint', - 'dummy_channel_inst_ChanA_multi_setpoint_param_this', - 'dummy_channel_inst_ChanA_multi_setpoint_param_that') + param_names = ( + "dummy_channel_inst_ChanA_multi_setpoint_param_this_setpoint", + "dummy_channel_inst_ChanA_multi_setpoint_param_this", + "dummy_channel_inst_ChanA_multi_setpoint_param_that", + ) assert set(meas.parameters.keys()) == set(param_names) this_ps = meas.parameters[param_names[1]] that_ps = meas.parameters[param_names[2]] @@ -1912,8 +1980,12 @@ def test_datasaver_multi_parameters_array(channel_array_instrument, bg_writing) assert isinstance(ds, DataSet) setpts = np.arange(5, 10) - np.testing.assert_array_equal(ds.get_parameter_data()[param_names[1]][param_names[0]], setpts) - np.testing.assert_array_equal(ds.get_parameter_data()[param_names[2]][param_names[0]], setpts) + np.testing.assert_array_equal( + ds.get_parameter_data()[param_names[1]][param_names[0]], setpts + ) + np.testing.assert_array_equal( + ds.get_parameter_data()[param_names[2]][param_names[0]], setpts + ) this_read_data = ds.get_parameter_data()[param_names[1]][param_names[1]] that_read_data = ds.get_parameter_data()[param_names[2]][param_names[2]] @@ -1930,19 +2002,16 @@ def test_datasaver_2d_multi_parameters_array( Test that we can register multiparameters that are array like and 2D. """ sp_name_1 = "dummy_channel_inst_ChanA_multi_2d_setpoint_param_this_setpoint" - sp_name_2 = 'dummy_channel_inst_ChanA_multi_2d_setpoint_param_that_setpoint' - p_name_1 = 'dummy_channel_inst_ChanA_this' - p_name_2 = 'dummy_channel_inst_ChanA_that' + sp_name_2 = "dummy_channel_inst_ChanA_multi_2d_setpoint_param_that_setpoint" + p_name_1 = "dummy_channel_inst_ChanA_this" + p_name_2 = "dummy_channel_inst_ChanA_that" from functools import reduce meas = Measurement() param = channel_array_instrument.A.dummy_2d_multi_parameter meas.register_parameter(param) assert len(meas.parameters) == 4 # two params + 2D identical setpoints - param_names = (sp_name_1, - sp_name_2, - p_name_1, - p_name_2) + param_names = (sp_name_1, sp_name_2, p_name_1, p_name_2) assert set(meas.parameters.keys()) == set(param_names) this_ps = meas.parameters[p_name_1] that_ps = meas.parameters[p_name_2] @@ -1969,20 +2038,16 @@ def test_datasaver_2d_multi_parameters_array( assert isinstance(ds, DataSet) np.testing.assert_array_equal( - ds.get_parameter_data()[p_name_1][sp_name_1], - this_sp_val + ds.get_parameter_data()[p_name_1][sp_name_1], this_sp_val ) np.testing.assert_array_equal( - ds.get_parameter_data()[p_name_1][sp_name_2], - that_sp_val + ds.get_parameter_data()[p_name_1][sp_name_2], that_sp_val ) np.testing.assert_array_equal( - ds.get_parameter_data()[p_name_2][sp_name_1], - this_sp_val + ds.get_parameter_data()[p_name_2][sp_name_1], this_sp_val ) np.testing.assert_array_equal( - ds.get_parameter_data()[p_name_2][sp_name_2], - that_sp_val + ds.get_parameter_data()[p_name_2][sp_name_2], that_sp_val ) this_read_data = ds.get_parameter_data()[p_name_1][p_name_1] @@ -1996,7 +2061,7 @@ def test_datasaver_2d_multi_parameters_array( @pytest.mark.usefixtures("experiment") @pytest.mark.parametrize("bg_writing", [True, False]) -@pytest.mark.parametrize("storage_type", ['numeric', 'array']) +@pytest.mark.parametrize("storage_type", ["numeric", "array"]) @settings(deadline=None) @given(Ns=hst.lists(hst.integers(2, 10), min_size=2, max_size=5)) def test_datasaver_arrays_of_different_length(storage_type, Ns, bg_writing) -> None: @@ -2008,15 +2073,14 @@ def test_datasaver_arrays_of_different_length(storage_type, Ns, bg_writing) -> N no_of_signals = len(Ns) meas = Measurement() - meas.register_custom_parameter('temperature', - paramtype='numeric', - label='Temperature', - unit='K') + meas.register_custom_parameter( + "temperature", paramtype="numeric", label="Temperature", unit="K" + ) for n in range(no_of_signals): - meas.register_custom_parameter(f'freqs{n}', paramtype=storage_type) - meas.register_custom_parameter(f'signal{n}', - paramtype=storage_type, - setpoints=(f'freqs{n}', 'temperature')) + meas.register_custom_parameter(f"freqs{n}", paramtype=storage_type) + meas.register_custom_parameter( + f"signal{n}", paramtype=storage_type, setpoints=(f"freqs{n}", "temperature") + ) with meas.run(write_in_background=bg_writing) as datasaver: result_t = ("temperature", 70) @@ -2036,9 +2100,9 @@ def test_datasaver_arrays_of_different_length(storage_type, Ns, bg_writing) -> N assert isinstance(ds, DataSet) data = ds.get_parameter_data() - assert list(data.keys()) == [f'signal{n}' for n in range(no_of_signals)] + assert list(data.keys()) == [f"signal{n}" for n in range(no_of_signals)] for n in range(no_of_signals): - assert (data[f'signal{n}']['temperature'] == np.array([70]*(Ns[n]))).all() + assert (data[f"signal{n}"]["temperature"] == np.array([70] * (Ns[n]))).all() @pytest.mark.parametrize("bg_writing", [True, False]) @@ -2058,66 +2122,74 @@ def test_save_complex_num(complex_num_instrument, bg_writing) -> None: some_complex_array_setpoints = complex_num_instrument.some_complex_array_setpoints meas = Measurement() - meas.register_parameter(setparam, paramtype='numeric') - meas.register_parameter(param, paramtype='complex', setpoints=(setparam,)) - meas.register_parameter(arrayparam, paramtype='array', - setpoints=(setparam,)) - - meas.register_parameter(some_complex_array_setpoints, paramtype='numeric') - meas.register_parameter(complexarrayparam, paramtype='complex', - setpoints=(setparam, some_complex_array_setpoints)) + meas.register_parameter(setparam, paramtype="numeric") + meas.register_parameter(param, paramtype="complex", setpoints=(setparam,)) + meas.register_parameter(arrayparam, paramtype="array", setpoints=(setparam,)) + + meas.register_parameter(some_complex_array_setpoints, paramtype="numeric") + meas.register_parameter( + complexarrayparam, + paramtype="complex", + setpoints=(setparam, some_complex_array_setpoints), + ) with meas.run(write_in_background=bg_writing) as datasaver: for i in range(10): setparam.set(i) - datasaver.add_result((setparam, setparam()), - (param, param()), - *expand_setpoints_helper(arrayparam), - (some_complex_array_setpoints, some_complex_array_setpoints.get()), - (complexarrayparam, complexarrayparam.get())) + datasaver.add_result( + (setparam, setparam()), + (param, param()), + *expand_setpoints_helper(arrayparam), + (some_complex_array_setpoints, some_complex_array_setpoints.get()), + (complexarrayparam, complexarrayparam.get()), + ) ds = datasaver.dataset assert isinstance(ds, DataSet) data = ds.get_parameter_data() # scalar complex parameter - setpoints_num = data['dummy_channel_inst_complex_num'][ - 'dummy_channel_inst_setpoint'] - data_num = data['dummy_channel_inst_complex_num'][ - 'dummy_channel_inst_complex_num'] + setpoints_num = data["dummy_channel_inst_complex_num"][ + "dummy_channel_inst_setpoint" + ] + data_num = data["dummy_channel_inst_complex_num"]["dummy_channel_inst_complex_num"] assert_allclose(setpoints_num, np.arange(10)) - assert_allclose(data_num, np.arange(10) + 1j*np.arange(10)) + assert_allclose(data_num, np.arange(10) + 1j * np.arange(10)) # array parameter - setpoints1_array = data['dummy_channel_inst_some_array'][ - 'dummy_channel_inst_setpoint'] + setpoints1_array = data["dummy_channel_inst_some_array"][ + "dummy_channel_inst_setpoint" + ] assert_allclose(setpoints1_array, np.repeat(np.arange(10), 5).reshape(10, 5)) - setpoints2_array = data['dummy_channel_inst_some_array'][ - 'dummy_channel_inst_some_array_setpoints'] + setpoints2_array = data["dummy_channel_inst_some_array"][ + "dummy_channel_inst_some_array_setpoints" + ] assert_allclose(setpoints2_array, np.tile(np.arange(5), 10).reshape(10, 5)) - array_data = data['dummy_channel_inst_some_array'][ - 'dummy_channel_inst_some_array'] + array_data = data["dummy_channel_inst_some_array"]["dummy_channel_inst_some_array"] assert_allclose(array_data, np.ones((10, 5))) # complex array parameter - setpoints1_array = data['dummy_channel_inst_some_complex_array'][ - 'dummy_channel_inst_setpoint'] + setpoints1_array = data["dummy_channel_inst_some_complex_array"][ + "dummy_channel_inst_setpoint" + ] assert_allclose(setpoints1_array, np.repeat(np.arange(10), 5)) - setpoints2_array = data['dummy_channel_inst_some_complex_array'][ - 'dummy_channel_inst_some_complex_array_setpoints'] + setpoints2_array = data["dummy_channel_inst_some_complex_array"][ + "dummy_channel_inst_some_complex_array_setpoints" + ] assert_allclose(setpoints2_array, np.tile(np.arange(5), 10)) - array_data = data['dummy_channel_inst_some_complex_array'][ - 'dummy_channel_inst_some_complex_array'] + array_data = data["dummy_channel_inst_some_complex_array"][ + "dummy_channel_inst_some_complex_array" + ] - assert_allclose(array_data, np.ones(50)+1j*np.ones(50)) + assert_allclose(array_data, np.ones(50) + 1j * np.ones(50)) @pytest.mark.parametrize("bg_writing", [True, False]) @@ -2134,8 +2206,7 @@ def test_save_and_reload_complex_standalone(complex_num_instrument, bg_writing) ds = datasaver.dataset assert isinstance(ds, DataSet) data = ds.get_parameter_data() - data_num = data['dummy_channel_inst_complex_num'][ - 'dummy_channel_inst_complex_num'] + data_num = data["dummy_channel_inst_complex_num"]["dummy_channel_inst_complex_num"] assert_allclose(data_num, 1 + 1j) @@ -2148,23 +2219,22 @@ def test_save_complex_num_setpoints(complex_num_instrument, bg_writing) -> None: setparam = complex_num_instrument.complex_setpoint param = complex_num_instrument.real_part meas = Measurement() - meas.register_parameter(setparam, paramtype='complex') - meas.register_parameter(param, paramtype='numeric', setpoints=(setparam,)) + meas.register_parameter(setparam, paramtype="complex") + meas.register_parameter(param, paramtype="numeric", setpoints=(setparam,)) with meas.run(write_in_background=bg_writing) as datasaver: for i in range(10): - setparam.set(i+1j*i) - datasaver.add_result((setparam, setparam()), - (param, param())) + setparam.set(i + 1j * i) + datasaver.add_result((setparam, setparam()), (param, param())) ds = datasaver.dataset assert isinstance(ds, DataSet) data = ds.get_parameter_data() - setpoints_num = data['dummy_channel_inst_real_part'][ - 'dummy_channel_inst_complex_setpoint'] - data_num = data['dummy_channel_inst_real_part'][ - 'dummy_channel_inst_real_part'] + setpoints_num = data["dummy_channel_inst_real_part"][ + "dummy_channel_inst_complex_setpoint" + ] + data_num = data["dummy_channel_inst_real_part"]["dummy_channel_inst_real_part"] - assert_allclose(setpoints_num, np.arange(10) + 1j*np.arange(10)) + assert_allclose(setpoints_num, np.arange(10) + 1j * np.arange(10)) assert_allclose(data_num, np.arange(10)) @@ -2179,26 +2249,29 @@ def test_save_complex_num_setpoints_array(complex_num_instrument, bg_writing) -> param = complex_num_instrument.some_array meas = Measurement() - meas.register_parameter(setparam, paramtype='complex') - meas.register_parameter(param, paramtype='array', setpoints=(setparam,)) + meas.register_parameter(setparam, paramtype="complex") + meas.register_parameter(param, paramtype="array", setpoints=(setparam,)) with meas.run(write_in_background=bg_writing) as datasaver: for i in range(10): - setparam.set(i+1j*i) - datasaver.add_result((setparam, setparam()), - *expand_setpoints_helper(param)) + setparam.set(i + 1j * i) + datasaver.add_result( + (setparam, setparam()), *expand_setpoints_helper(param) + ) ds = datasaver.dataset assert isinstance(ds, DataSet) data = ds.get_parameter_data() - setpoints1 = data['dummy_channel_inst_some_array'][ - 'dummy_channel_inst_complex_setpoint'] - setpoints2 = data['dummy_channel_inst_some_array'][ - 'dummy_channel_inst_some_array_setpoints'] - data_num = data['dummy_channel_inst_some_array'][ - 'dummy_channel_inst_some_array'] - - assert_allclose(setpoints1, np.repeat(np.arange(10) + - 1j*np.arange(10), 5).reshape((10, 5))) + setpoints1 = data["dummy_channel_inst_some_array"][ + "dummy_channel_inst_complex_setpoint" + ] + setpoints2 = data["dummy_channel_inst_some_array"][ + "dummy_channel_inst_some_array_setpoints" + ] + data_num = data["dummy_channel_inst_some_array"]["dummy_channel_inst_some_array"] + + assert_allclose( + setpoints1, np.repeat(np.arange(10) + 1j * np.arange(10), 5).reshape((10, 5)) + ) assert_allclose(setpoints2, np.tile(np.arange(5), 10).reshape((10, 5))) assert_allclose(data_num, np.ones((10, 5))) @@ -2210,18 +2283,19 @@ def test_save_complex_as_num_raises(complex_num_instrument, bg_writing) -> None: setparam = complex_num_instrument.setpoint param = complex_num_instrument.complex_num meas = Measurement() - meas.register_parameter(setparam, paramtype='numeric') - meas.register_parameter(param, paramtype='numeric', setpoints=(setparam,)) + meas.register_parameter(setparam, paramtype="numeric") + meas.register_parameter(param, paramtype="numeric", setpoints=(setparam,)) - expected_msg = ('Parameter dummy_channel_inst_complex_num is of ' - 'type "numeric", but got a result of ' - 'type complex128') + expected_msg = ( + "Parameter dummy_channel_inst_complex_num is of " + 'type "numeric", but got a result of ' + "type complex128" + ) with meas.run(write_in_background=bg_writing) as datasaver: setparam.set(0) with pytest.raises(ValueError, match=expected_msg): - datasaver.add_result((setparam, setparam()), - (param, param())) + datasaver.add_result((setparam, setparam()), (param, param())) @pytest.mark.parametrize("bg_writing", [True, False]) @@ -2230,47 +2304,47 @@ def test_save_numeric_as_complex_raises(complex_num_instrument, bg_writing) -> N setparam = complex_num_instrument.setpoint param = complex_num_instrument.complex_num meas = Measurement() - meas.register_parameter(setparam, paramtype='numeric') - meas.register_parameter(param, paramtype='complex', setpoints=(setparam,)) + meas.register_parameter(setparam, paramtype="numeric") + meas.register_parameter(param, paramtype="complex", setpoints=(setparam,)) - expected_msg = ('Parameter dummy_channel_inst_complex_num is of ' - 'type "complex", but got a result of type int') + expected_msg = ( + "Parameter dummy_channel_inst_complex_num is of " + 'type "complex", but got a result of type int' + ) with meas.run(write_in_background=bg_writing) as datasaver: setparam.set(0) with pytest.raises(ValueError, match=expected_msg): - datasaver.add_result((setparam, setparam()), - (param, setparam())) + datasaver.add_result((setparam, setparam()), (param, setparam())) def test_parameter_inference(channel_array_instrument) -> None: chan = channel_array_instrument.channels[0] # default values assert Measurement._infer_paramtype(chan.temperature, None) is None - assert Measurement._infer_paramtype(chan.dummy_array_parameter, - None) == 'array' - assert Measurement._infer_paramtype(chan.dummy_parameter_with_setpoints, - None) == 'array' - assert Measurement._infer_paramtype(chan.dummy_multi_parameter, - None) is None - assert Measurement._infer_paramtype(chan.dummy_scalar_multi_parameter, - None) is None - assert Measurement._infer_paramtype(chan.dummy_2d_multi_parameter, - None) is None - assert Measurement._infer_paramtype(chan.dummy_text, - None) == 'text' - assert Measurement._infer_paramtype(chan.dummy_complex, - None) == 'complex' + assert Measurement._infer_paramtype(chan.dummy_array_parameter, None) == "array" + assert ( + Measurement._infer_paramtype(chan.dummy_parameter_with_setpoints, None) + == "array" + ) + assert Measurement._infer_paramtype(chan.dummy_multi_parameter, None) is None + assert Measurement._infer_paramtype(chan.dummy_scalar_multi_parameter, None) is None + assert Measurement._infer_paramtype(chan.dummy_2d_multi_parameter, None) is None + assert Measurement._infer_paramtype(chan.dummy_text, None) == "text" + assert Measurement._infer_paramtype(chan.dummy_complex, None) == "complex" # overwrite the default with sensible alternatives - assert Measurement._infer_paramtype(chan.dummy_array_parameter, - 'numeric') == 'numeric' - assert Measurement._infer_paramtype(chan.dummy_parameter_with_setpoints, - 'numeric') == 'numeric' - assert Measurement._infer_paramtype(chan.dummy_multi_parameter, - 'array') == 'array' - assert Measurement._infer_paramtype(chan.dummy_2d_multi_parameter, - 'array') == 'array' + assert ( + Measurement._infer_paramtype(chan.dummy_array_parameter, "numeric") == "numeric" + ) + assert ( + Measurement._infer_paramtype(chan.dummy_parameter_with_setpoints, "numeric") + == "numeric" + ) + assert Measurement._infer_paramtype(chan.dummy_multi_parameter, "array") == "array" + assert ( + Measurement._infer_paramtype(chan.dummy_2d_multi_parameter, "array") == "array" + ) @pytest.mark.parametrize("bg_writing", [True, False]) @@ -2285,20 +2359,23 @@ def test_adding_parents(bg_writing, DAC) -> None: # from the result of that where to measure next. We want to annotate the # second run as having the first run as predecessor - - meas = (Measurement() - .register_parameter(DAC.ch1) - .register_parameter(DAC.ch2, setpoints=[DAC.ch1])) + meas = ( + Measurement() + .register_parameter(DAC.ch1) + .register_parameter(DAC.ch2, setpoints=[DAC.ch1]) + ) with meas.run(write_in_background=bg_writing) as datasaver: datasaver.add_result((DAC.ch1, 0), (DAC.ch2, 1)) parent_ds = datasaver.dataset - meas = (Measurement() - .register_parameter(DAC.ch1) - .register_parameter(DAC.ch2, setpoints=[DAC.ch1]) - .register_parent(parent=parent_ds, link_type="predecessor")) + meas = ( + Measurement() + .register_parameter(DAC.ch1) + .register_parameter(DAC.ch2, setpoints=[DAC.ch1]) + .register_parent(parent=parent_ds, link_type="predecessor") + ) with meas.run(write_in_background=bg_writing) as datasaver: datasaver.add_result((DAC.ch1, 1), (DAC.ch2, 2)) diff --git a/tests/dataset/test_database_creation_and_upgrading.py b/tests/dataset/test_database_creation_and_upgrading.py index 64b96ef33bf..318850eab32 100644 --- a/tests/dataset/test_database_creation_and_upgrading.py +++ b/tests/dataset/test_database_creation_and_upgrading.py @@ -9,7 +9,7 @@ import qcodes as qc import qcodes.dataset.descriptions.versioning.serialization as serial -import qcodes.tests.dataset +import tests.dataset from qcodes.dataset import ( ConnectionPlus, connect, @@ -50,11 +50,11 @@ is_column_in_table, one, ) -from qcodes.tests.common import error_caused_by, skip_if_no_fixtures -from qcodes.tests.dataset.conftest import temporarily_copied_DB +from tests.common import error_caused_by, skip_if_no_fixtures +from tests.dataset.conftest import temporarily_copied_DB -fixturepath = os.sep.join(qcodes.tests.dataset.__file__.split(os.sep)[:-1]) -fixturepath = os.path.join(fixturepath, 'fixtures') +fixturepath = os.sep.join(tests.dataset.__file__.split(os.sep)[:-1]) +fixturepath = os.path.join(fixturepath, "fixtures") @contextmanager @@ -63,8 +63,8 @@ def location_and_station_set_to(location: int, work_station: int): if cfg is None: raise RuntimeError("Expected config to be not None.") old_cfg = deepcopy(cfg) - cfg['GUID_components']['location'] = location - cfg['GUID_components']['work_station'] = work_station + cfg["GUID_components"]["location"] = location + cfg["GUID_components"]["work_station"] = work_station try: yield @@ -72,19 +72,20 @@ def location_and_station_set_to(location: int, work_station: int): finally: qc.config.current_config = old_cfg + LATEST_VERSION = _latest_available_version() VERSIONS = tuple(range(LATEST_VERSION + 1)) LATEST_VERSION_ARG = -1 -@pytest.mark.parametrize('ver', VERSIONS + (LATEST_VERSION_ARG,)) +@pytest.mark.parametrize("ver", VERSIONS + (LATEST_VERSION_ARG,)) def test_connect_upgrades_user_version(ver) -> None: expected_version = ver if ver != LATEST_VERSION_ARG else LATEST_VERSION - conn = connect(':memory:', version=ver) + conn = connect(":memory:", version=ver) assert expected_version == get_user_version(conn) -@pytest.mark.parametrize('version', VERSIONS + (LATEST_VERSION_ARG,)) +@pytest.mark.parametrize("version", VERSIONS + (LATEST_VERSION_ARG,)) def test_tables_exist(empty_temp_db, version) -> None: conn = connect( qc.config["core"]["db_location"], qc.config["core"]["db_debug"], version=version @@ -103,7 +104,7 @@ def test_tables_exist(empty_temp_db, version) -> None: def test_initialise_database_at_for_nonexisting_db(tmp_path) -> None: - db_location = str(tmp_path / 'temp.db') + db_location = str(tmp_path / "temp.db") assert not os.path.exists(db_location) initialise_or_create_database_at(db_location) @@ -124,7 +125,7 @@ def test_initialise_database_at_for_nonexisting_db_pathlib_path(tmp_path) -> Non def test_initialise_database_at_for_existing_db(tmp_path) -> None: # Define DB location - db_location = str(tmp_path / 'temp.db') + db_location = str(tmp_path / "temp.db") assert not os.path.exists(db_location) # Create DB file @@ -147,14 +148,13 @@ def test_perform_actual_upgrade_0_to_1() -> None: # we cannot use the empty_temp_db, since that has already called connect # and is therefore latest version already - v0fixpath = os.path.join(fixturepath, 'db_files', 'version0') + v0fixpath = os.path.join(fixturepath, "db_files", "version0") - dbname_old = os.path.join(v0fixpath, 'empty.db') + dbname_old = os.path.join(v0fixpath, "empty.db") skip_if_no_fixtures(dbname_old) with temporarily_copied_DB(dbname_old, debug=False, version=0) as conn: - assert get_user_version(conn) == 0 guid_table_query = "SELECT guid FROM runs" @@ -162,7 +162,7 @@ def test_perform_actual_upgrade_0_to_1() -> None: with pytest.raises(RuntimeError) as excinfo: atomic_transaction(conn, guid_table_query) - assert error_caused_by(excinfo, 'no such column: guid') + assert error_caused_by(excinfo, "no such column: guid") perform_db_upgrade_0_to_1(conn) assert get_user_version(conn) == 1 @@ -172,15 +172,13 @@ def test_perform_actual_upgrade_0_to_1() -> None: def test_perform_actual_upgrade_1_to_2() -> None: + v1fixpath = os.path.join(fixturepath, "db_files", "version1") - v1fixpath = os.path.join(fixturepath, 'db_files', 'version1') - - dbname_old = os.path.join(v1fixpath, 'empty.db') + dbname_old = os.path.join(v1fixpath, "empty.db") skip_if_no_fixtures(dbname_old) with temporarily_copied_DB(dbname_old, debug=False, version=1) as conn: - assert get_user_version(conn) == 1 guid_table_query = "SELECT guid FROM runs" @@ -200,23 +198,21 @@ def test_perform_actual_upgrade_1_to_2() -> None: def test_perform_actual_upgrade_2_to_3_empty() -> None: + v2fixpath = os.path.join(fixturepath, "db_files", "version2") - v2fixpath = os.path.join(fixturepath, 'db_files', 'version2') - - dbname_old = os.path.join(v2fixpath, 'empty.db') + dbname_old = os.path.join(v2fixpath, "empty.db") skip_if_no_fixtures(dbname_old) with temporarily_copied_DB(dbname_old, debug=False, version=2) as conn: - assert get_user_version(conn) == 2 - desc_query = 'SELECT run_description FROM runs' + desc_query = "SELECT run_description FROM runs" with pytest.raises(RuntimeError) as excinfo: atomic_transaction(conn, desc_query) - assert error_caused_by(excinfo, 'no such column: run_description') + assert error_caused_by(excinfo, "no such column: run_description") perform_db_upgrade_2_to_3(conn) @@ -227,33 +223,29 @@ def test_perform_actual_upgrade_2_to_3_empty() -> None: def test_perform_actual_upgrade_2_to_3_empty_runs() -> None: + v2fixpath = os.path.join(fixturepath, "db_files", "version2") - v2fixpath = os.path.join(fixturepath, 'db_files', 'version2') - - dbname_old = os.path.join(v2fixpath, 'empty_runs.db') + dbname_old = os.path.join(v2fixpath, "empty_runs.db") skip_if_no_fixtures(dbname_old) with temporarily_copied_DB(dbname_old, debug=False, version=2) as conn: - perform_db_upgrade_2_to_3(conn) def test_perform_actual_upgrade_2_to_3_some_runs() -> None: + v2fixpath = os.path.join(fixturepath, "db_files", "version2") - v2fixpath = os.path.join(fixturepath, 'db_files', 'version2') - - dbname_old = os.path.join(v2fixpath, 'some_runs.db') + dbname_old = os.path.join(v2fixpath, "some_runs.db") skip_if_no_fixtures(dbname_old) with temporarily_copied_DB(dbname_old, debug=False, version=2) as conn: - assert get_user_version(conn) == 2 perform_db_upgrade_2_to_3(conn) - desc_query = 'SELECT run_description FROM runs' + desc_query = "SELECT run_description FROM runs" c = atomic_transaction(conn, desc_query) assert len(c.fetchall()) == 10 @@ -266,62 +258,61 @@ def test_perform_actual_upgrade_2_to_3_some_runs() -> None: WHERE run_id == 1 """ c = atomic_transaction(conn, sql) - json_str = one(c, 'run_description') + json_str = one(c, "run_description") unversioned_dict = json.loads(json_str) - idp = InterDependencies._from_dict( - unversioned_dict['interdependencies']) + idp = InterDependencies._from_dict(unversioned_dict["interdependencies"]) assert isinstance(idp, InterDependencies) # here we verify that the dependencies encoded in # tests/dataset/legacy_DB_generation/generate_version_2.py # are recovered - p0 = [p for p in idp.paramspecs if p.name == 'p0'][0] - assert p0.depends_on == '' + p0 = [p for p in idp.paramspecs if p.name == "p0"][0] + assert p0.depends_on == "" assert p0.depends_on_ == [] - assert p0.inferred_from == '' + assert p0.inferred_from == "" assert p0.inferred_from_ == [] assert p0.label == "Parameter 0" assert p0.unit == "unit 0" - p1 = [p for p in idp.paramspecs if p.name == 'p1'][0] - assert p1.depends_on == '' + p1 = [p for p in idp.paramspecs if p.name == "p1"][0] + assert p1.depends_on == "" assert p1.depends_on_ == [] - assert p1.inferred_from == '' + assert p1.inferred_from == "" assert p1.inferred_from_ == [] assert p1.label == "Parameter 1" assert p1.unit == "unit 1" - p2 = [p for p in idp.paramspecs if p.name == 'p2'][0] - assert p2.depends_on == '' + p2 = [p for p in idp.paramspecs if p.name == "p2"][0] + assert p2.depends_on == "" assert p2.depends_on_ == [] - assert p2.inferred_from == 'p0' - assert p2.inferred_from_ == ['p0'] + assert p2.inferred_from == "p0" + assert p2.inferred_from_ == ["p0"] assert p2.label == "Parameter 2" assert p2.unit == "unit 2" - p3 = [p for p in idp.paramspecs if p.name == 'p3'][0] - assert p3.depends_on == '' + p3 = [p for p in idp.paramspecs if p.name == "p3"][0] + assert p3.depends_on == "" assert p3.depends_on_ == [] - assert p3.inferred_from == 'p1, p0' - assert p3.inferred_from_ == ['p1', 'p0'] + assert p3.inferred_from == "p1, p0" + assert p3.inferred_from_ == ["p1", "p0"] assert p3.label == "Parameter 3" assert p3.unit == "unit 3" - p4 = [p for p in idp.paramspecs if p.name == 'p4'][0] - assert p4.depends_on == 'p2, p3' - assert p4.depends_on_ == ['p2', 'p3'] - assert p4.inferred_from == '' + p4 = [p for p in idp.paramspecs if p.name == "p4"][0] + assert p4.depends_on == "p2, p3" + assert p4.depends_on_ == ["p2", "p3"] + assert p4.inferred_from == "" assert p4.inferred_from_ == [] assert p4.label == "Parameter 4" assert p4.unit == "unit 4" - p5 = [p for p in idp.paramspecs if p.name == 'p5'][0] - assert p5.depends_on == '' + p5 = [p for p in idp.paramspecs if p.name == "p5"][0] + assert p5.depends_on == "" assert p5.depends_on_ == [] - assert p5.inferred_from == 'p0' - assert p5.inferred_from_ == ['p0'] + assert p5.inferred_from == "p0" + assert p5.inferred_from_ == ["p0"] assert p5.label == "Parameter 5" assert p5.unit == "unit 5" @@ -332,14 +323,13 @@ def test_perform_upgrade_v2_v3_to_v4_fixes() -> None: version will be corrected when upgraded to v4. """ - v3fixpath = os.path.join(fixturepath, 'db_files', 'version3') + v3fixpath = os.path.join(fixturepath, "db_files", "version3") - dbname_old = os.path.join(v3fixpath, 'some_runs_upgraded_2.db') + dbname_old = os.path.join(v3fixpath, "some_runs_upgraded_2.db") skip_if_no_fixtures(dbname_old) with temporarily_copied_DB(dbname_old, debug=False, version=3) as conn: - assert get_user_version(conn) == 3 sql = """ @@ -348,64 +338,63 @@ def test_perform_upgrade_v2_v3_to_v4_fixes() -> None: WHERE run_id == 1 """ c = atomic_transaction(conn, sql) - json_str = one(c, 'run_description') + json_str = one(c, "run_description") unversioned_dict = json.loads(json_str) - idp = InterDependencies._from_dict( - unversioned_dict['interdependencies']) + idp = InterDependencies._from_dict(unversioned_dict["interdependencies"]) assert isinstance(idp, InterDependencies) - p0 = [p for p in idp.paramspecs if p.name == 'p0'][0] - assert p0.depends_on == '' + p0 = [p for p in idp.paramspecs if p.name == "p0"][0] + assert p0.depends_on == "" assert p0.depends_on_ == [] - assert p0.inferred_from == '' + assert p0.inferred_from == "" assert p0.inferred_from_ == [] assert p0.label == "Parameter 0" assert p0.unit == "unit 0" - p1 = [p for p in idp.paramspecs if p.name == 'p1'][0] - assert p1.depends_on == '' + p1 = [p for p in idp.paramspecs if p.name == "p1"][0] + assert p1.depends_on == "" assert p1.depends_on_ == [] - assert p1.inferred_from == '' + assert p1.inferred_from == "" assert p1.inferred_from_ == [] assert p1.label == "Parameter 1" assert p1.unit == "unit 1" - p2 = [p for p in idp.paramspecs if p.name == 'p2'][0] - assert p2.depends_on == '' + p2 = [p for p in idp.paramspecs if p.name == "p2"][0] + assert p2.depends_on == "" assert p2.depends_on_ == [] # the 2 lines below are wrong due to the incorrect upgrade from # db version 2 to 3 - assert p2.inferred_from == 'p, 0' - assert p2.inferred_from_ == ['p', '0'] + assert p2.inferred_from == "p, 0" + assert p2.inferred_from_ == ["p", "0"] assert p2.label == "Parameter 2" assert p2.unit == "unit 2" - p3 = [p for p in idp.paramspecs if p.name == 'p3'][0] - assert p3.depends_on == '' + p3 = [p for p in idp.paramspecs if p.name == "p3"][0] + assert p3.depends_on == "" assert p3.depends_on_ == [] # the 2 lines below are wrong due to the incorrect upgrade from # db version 2 to 3 - assert p3.inferred_from == 'p, 1, ,, , p, 0' - assert p3.inferred_from_ == ['p', '1', ',', ' ', 'p', '0'] + assert p3.inferred_from == "p, 1, ,, , p, 0" + assert p3.inferred_from_ == ["p", "1", ",", " ", "p", "0"] assert p3.label == "Parameter 3" assert p3.unit == "unit 3" - p4 = [p for p in idp.paramspecs if p.name == 'p4'][0] - assert p4.depends_on == 'p2, p3' - assert p4.depends_on_ == ['p2', 'p3'] - assert p4.inferred_from == '' + p4 = [p for p in idp.paramspecs if p.name == "p4"][0] + assert p4.depends_on == "p2, p3" + assert p4.depends_on_ == ["p2", "p3"] + assert p4.inferred_from == "" assert p4.inferred_from_ == [] assert p4.label == "Parameter 4" assert p4.unit == "unit 4" - p5 = [p for p in idp.paramspecs if p.name == 'p5'][0] - assert p5.depends_on == '' + p5 = [p for p in idp.paramspecs if p.name == "p5"][0] + assert p5.depends_on == "" assert p5.depends_on_ == [] # the 2 lines below are wrong due to the incorrect upgrade from # db version 2 to 3. Here the interdep is missing - assert p5.inferred_from == '' + assert p5.inferred_from == "" assert p5.inferred_from_ == [] assert p5.label == "Parameter 5" assert p5.unit == "unit 5" @@ -413,59 +402,58 @@ def test_perform_upgrade_v2_v3_to_v4_fixes() -> None: perform_db_upgrade_3_to_4(conn) c = atomic_transaction(conn, sql) - json_str = one(c, 'run_description') + json_str = one(c, "run_description") unversioned_dict = json.loads(json_str) - idp = InterDependencies._from_dict( - unversioned_dict['interdependencies']) + idp = InterDependencies._from_dict(unversioned_dict["interdependencies"]) assert isinstance(idp, InterDependencies) - p0 = [p for p in idp.paramspecs if p.name == 'p0'][0] - assert p0.depends_on == '' + p0 = [p for p in idp.paramspecs if p.name == "p0"][0] + assert p0.depends_on == "" assert p0.depends_on_ == [] - assert p0.inferred_from == '' + assert p0.inferred_from == "" assert p0.inferred_from_ == [] assert p0.label == "Parameter 0" assert p0.unit == "unit 0" - p1 = [p for p in idp.paramspecs if p.name == 'p1'][0] - assert p1.depends_on == '' + p1 = [p for p in idp.paramspecs if p.name == "p1"][0] + assert p1.depends_on == "" assert p1.depends_on_ == [] - assert p1.inferred_from == '' + assert p1.inferred_from == "" assert p1.inferred_from_ == [] assert p1.label == "Parameter 1" assert p1.unit == "unit 1" - p2 = [p for p in idp.paramspecs if p.name == 'p2'][0] - assert p2.depends_on == '' + p2 = [p for p in idp.paramspecs if p.name == "p2"][0] + assert p2.depends_on == "" assert p2.depends_on_ == [] - assert p2.inferred_from == 'p0' - assert p2.inferred_from_ == ['p0'] + assert p2.inferred_from == "p0" + assert p2.inferred_from_ == ["p0"] assert p2.label == "Parameter 2" assert p2.unit == "unit 2" - p3 = [p for p in idp.paramspecs if p.name == 'p3'][0] - assert p3.depends_on == '' + p3 = [p for p in idp.paramspecs if p.name == "p3"][0] + assert p3.depends_on == "" assert p3.depends_on_ == [] - assert p3.inferred_from == 'p1, p0' - assert p3.inferred_from_ == ['p1', 'p0'] + assert p3.inferred_from == "p1, p0" + assert p3.inferred_from_ == ["p1", "p0"] assert p3.label == "Parameter 3" assert p3.unit == "unit 3" - p4 = [p for p in idp.paramspecs if p.name == 'p4'][0] - assert p4.depends_on == 'p2, p3' - assert p4.depends_on_ == ['p2', 'p3'] - assert p4.inferred_from == '' + p4 = [p for p in idp.paramspecs if p.name == "p4"][0] + assert p4.depends_on == "p2, p3" + assert p4.depends_on_ == ["p2", "p3"] + assert p4.inferred_from == "" assert p4.inferred_from_ == [] assert p4.label == "Parameter 4" assert p4.unit == "unit 4" - p5 = [p for p in idp.paramspecs if p.name == 'p5'][0] - assert p5.depends_on == '' + p5 = [p for p in idp.paramspecs if p.name == "p5"][0] + assert p5.depends_on == "" assert p5.depends_on_ == [] - assert p5.inferred_from == 'p0' - assert p5.inferred_from_ == ['p0'] + assert p5.inferred_from == "p0" + assert p5.inferred_from_ == ["p0"] assert p5.label == "Parameter 5" assert p5.unit == "unit 5" @@ -475,14 +463,13 @@ def test_perform_upgrade_v3_to_v4() -> None: Test that a db upgrade from v2 to v4 works correctly. """ - v3fixpath = os.path.join(fixturepath, 'db_files', 'version3') + v3fixpath = os.path.join(fixturepath, "db_files", "version3") - dbname_old = os.path.join(v3fixpath, 'some_runs_upgraded_2.db') + dbname_old = os.path.join(v3fixpath, "some_runs_upgraded_2.db") skip_if_no_fixtures(dbname_old) with temporarily_copied_DB(dbname_old, debug=False, version=3) as conn: - assert get_user_version(conn) == 3 sql = """ @@ -494,66 +481,64 @@ def test_perform_upgrade_v3_to_v4() -> None: perform_db_upgrade_3_to_4(conn) c = atomic_transaction(conn, sql) - json_str = one(c, 'run_description') + json_str = one(c, "run_description") unversioned_dict = json.loads(json_str) - idp = InterDependencies._from_dict( - unversioned_dict['interdependencies']) + idp = InterDependencies._from_dict(unversioned_dict["interdependencies"]) assert isinstance(idp, InterDependencies) - p0 = [p for p in idp.paramspecs if p.name == 'p0'][0] - assert p0.depends_on == '' + p0 = [p for p in idp.paramspecs if p.name == "p0"][0] + assert p0.depends_on == "" assert p0.depends_on_ == [] - assert p0.inferred_from == '' + assert p0.inferred_from == "" assert p0.inferred_from_ == [] assert p0.label == "Parameter 0" assert p0.unit == "unit 0" - p1 = [p for p in idp.paramspecs if p.name == 'p1'][0] - assert p1.depends_on == '' + p1 = [p for p in idp.paramspecs if p.name == "p1"][0] + assert p1.depends_on == "" assert p1.depends_on_ == [] - assert p1.inferred_from == '' + assert p1.inferred_from == "" assert p1.inferred_from_ == [] assert p1.label == "Parameter 1" assert p1.unit == "unit 1" - p2 = [p for p in idp.paramspecs if p.name == 'p2'][0] - assert p2.depends_on == '' + p2 = [p for p in idp.paramspecs if p.name == "p2"][0] + assert p2.depends_on == "" assert p2.depends_on_ == [] - assert p2.inferred_from == 'p0' - assert p2.inferred_from_ == ['p0'] + assert p2.inferred_from == "p0" + assert p2.inferred_from_ == ["p0"] assert p2.label == "Parameter 2" assert p2.unit == "unit 2" - p3 = [p for p in idp.paramspecs if p.name == 'p3'][0] - assert p3.depends_on == '' + p3 = [p for p in idp.paramspecs if p.name == "p3"][0] + assert p3.depends_on == "" assert p3.depends_on_ == [] - assert p3.inferred_from == 'p1, p0' - assert p3.inferred_from_ == ['p1', 'p0'] + assert p3.inferred_from == "p1, p0" + assert p3.inferred_from_ == ["p1", "p0"] assert p3.label == "Parameter 3" assert p3.unit == "unit 3" - p4 = [p for p in idp.paramspecs if p.name == 'p4'][0] - assert p4.depends_on == 'p2, p3' - assert p4.depends_on_ == ['p2', 'p3'] - assert p4.inferred_from == '' + p4 = [p for p in idp.paramspecs if p.name == "p4"][0] + assert p4.depends_on == "p2, p3" + assert p4.depends_on_ == ["p2", "p3"] + assert p4.inferred_from == "" assert p4.inferred_from_ == [] assert p4.label == "Parameter 4" assert p4.unit == "unit 4" - p5 = [p for p in idp.paramspecs if p.name == 'p5'][0] - assert p5.depends_on == '' + p5 = [p for p in idp.paramspecs if p.name == "p5"][0] + assert p5.depends_on == "" assert p5.depends_on_ == [] - assert p5.inferred_from == 'p0' - assert p5.inferred_from_ == ['p0'] + assert p5.inferred_from == "p0" + assert p5.inferred_from_ == ["p0"] assert p5.label == "Parameter 5" assert p5.unit == "unit 5" @pytest.mark.usefixtures("empty_temp_db") def test_update_existing_guids(caplog: LogCaptureFixture) -> None: - old_loc = 101 old_ws = 1200 @@ -563,56 +548,62 @@ def test_update_existing_guids(caplog: LogCaptureFixture) -> None: # prepare five runs with different location and work station codes with location_and_station_set_to(0, 0): - new_experiment('test', sample_name='test_sample') + new_experiment("test", sample_name="test_sample") - ds1 = new_data_set('ds_one') - xparam = ParamSpecBase('x', 'numeric') + ds1 = new_data_set("ds_one") + xparam = ParamSpecBase("x", "numeric") idps = InterDependencies_(standalones=(xparam,)) ds1.set_interdependencies(idps) ds1.mark_started() - ds1.add_results([{'x': 1}]) + ds1.add_results([{"x": 1}]) - ds2 = new_data_set('ds_two') + ds2 = new_data_set("ds_two") ds2.set_interdependencies(idps) ds2.mark_started() - ds2.add_results([{'x': 2}]) + ds2.add_results([{"x": 2}]) _assert_loc_station(ds1, 0, 0) _assert_loc_station(ds2, 0, 0) with location_and_station_set_to(0, old_ws): - ds3 = new_data_set('ds_three') + ds3 = new_data_set("ds_three") ds3.set_interdependencies(idps) ds3.mark_started() - ds3.add_results([{'x': 3}]) + ds3.add_results([{"x": 3}]) _assert_loc_station(ds3, 0, old_ws) with location_and_station_set_to(old_loc, 0): - ds4 = new_data_set('ds_four') + ds4 = new_data_set("ds_four") ds4.set_interdependencies(idps) ds4.mark_started() - ds4.add_results([{'x': 4}]) + ds4.add_results([{"x": 4}]) _assert_loc_station(ds4, old_loc, 0) with location_and_station_set_to(old_loc, old_ws): - ds5 = new_data_set('ds_five') + ds5 = new_data_set("ds_five") ds5.set_interdependencies(idps) ds5.mark_started() - ds5.add_results([{'x': 5}]) + ds5.add_results([{"x": 5}]) _assert_loc_station(ds5, old_loc, old_ws) with location_and_station_set_to(new_loc, new_ws): - caplog.clear() - expected_levels = ['INFO', - 'INFO', 'INFO', - 'INFO', 'INFO', - 'INFO', 'WARNING', - 'INFO', 'WARNING', - 'INFO', 'INFO'] + expected_levels = [ + "INFO", + "INFO", + "INFO", + "INFO", + "INFO", + "INFO", + "WARNING", + "INFO", + "WARNING", + "INFO", + "INFO", + ] with caplog.at_level(logging.INFO): update_GUIDs(ds1.conn) @@ -634,36 +625,35 @@ def _assert_loc_station(ds, expected_loc, expected_station): assert guid_dict["work_station"] == expected_station -@pytest.mark.parametrize('db_file', - ['empty', - 'with_runs_but_no_snapshots', - 'with_runs_and_snapshots']) +@pytest.mark.parametrize( + "db_file", ["empty", "with_runs_but_no_snapshots", "with_runs_and_snapshots"] +) def test_perform_actual_upgrade_4_to_5(db_file) -> None: - v4fixpath = os.path.join(fixturepath, 'db_files', 'version4') + v4fixpath = os.path.join(fixturepath, "db_files", "version4") - db_file += '.db' + db_file += ".db" dbname_old = os.path.join(v4fixpath, db_file) skip_if_no_fixtures(dbname_old) with temporarily_copied_DB(dbname_old, debug=False, version=4) as conn: # firstly, assert the situation with 'snapshot' column of 'runs' table - if 'with_runs_and_snapshots' in db_file: - assert is_column_in_table(conn, 'runs', 'snapshot') + if "with_runs_and_snapshots" in db_file: + assert is_column_in_table(conn, "runs", "snapshot") else: - assert not is_column_in_table(conn, 'runs', 'snapshot') + assert not is_column_in_table(conn, "runs", "snapshot") # secondly, perform the upgrade perform_db_upgrade_4_to_5(conn) # finally, assert the 'snapshot' column exists in 'runs' table - assert is_column_in_table(conn, 'runs', 'snapshot') + assert is_column_in_table(conn, "runs", "snapshot") def test_perform_actual_upgrade_5_to_6() -> None: - fixpath = os.path.join(fixturepath, 'db_files', 'version5') + fixpath = os.path.join(fixturepath, "db_files", "version5") - db_file = 'empty.db' + db_file = "empty.db" dbname_old = os.path.join(fixpath, db_file) skip_if_no_fixtures(dbname_old) @@ -672,7 +662,7 @@ def test_perform_actual_upgrade_5_to_6() -> None: perform_db_upgrade_5_to_6(conn) assert get_user_version(conn) == 6 - db_file = 'some_runs.db' + db_file = "some_runs.db" dbname_old = os.path.join(fixpath, db_file) with temporarily_copied_DB(dbname_old, debug=False, version=5) as conn: @@ -680,24 +670,23 @@ def test_perform_actual_upgrade_5_to_6() -> None: assert get_user_version(conn) == 6 no_of_runs_query = "SELECT max(run_id) FROM runs" - no_of_runs = one( - atomic_transaction(conn, no_of_runs_query), 'max(run_id)') + no_of_runs = one(atomic_transaction(conn, no_of_runs_query), "max(run_id)") assert no_of_runs == 10 for run_id in range(1, no_of_runs + 1): json_str = get_run_description(conn, run_id) deser = json.loads(json_str) - assert deser['version'] == 0 + assert deser["version"] == 0 desc = serial.from_json_to_current(json_str) assert desc._version == 3 def test_perform_upgrade_6_7() -> None: - fixpath = os.path.join(fixturepath, 'db_files', 'version6') + fixpath = os.path.join(fixturepath, "db_files", "version6") - db_file = 'empty.db' + db_file = "empty.db" dbname_old = os.path.join(fixpath, db_file) skip_if_no_fixtures(dbname_old) @@ -708,10 +697,9 @@ def test_perform_upgrade_6_7() -> None: def test_perform_actual_upgrade_6_to_7() -> None: + fixpath = os.path.join(fixturepath, "db_files", "version6") - fixpath = os.path.join(fixturepath, 'db_files', 'version6') - - db_file = 'some_runs.db' + db_file = "some_runs.db" dbname_old = os.path.join(fixpath, db_file) skip_if_no_fixtures(dbname_old) @@ -722,8 +710,7 @@ def test_perform_actual_upgrade_6_to_7() -> None: assert get_user_version(conn) == 7 no_of_runs_query = "SELECT max(run_id) FROM runs" - no_of_runs = one( - atomic_transaction(conn, no_of_runs_query), 'max(run_id)') + no_of_runs = one(atomic_transaction(conn, no_of_runs_query), "max(run_id)") assert no_of_runs == 10 c = atomic_transaction(conn, "PRAGMA table_info(runs)") @@ -731,8 +718,8 @@ def test_perform_actual_upgrade_6_to_7() -> None: columns = c.fetchall() col_names = [col[description["name"]] for col in columns] - assert 'captured_run_id' in col_names - assert 'captured_counter' in col_names + assert "captured_run_id" in col_names + assert "captured_counter" in col_names for run_id in range(1, no_of_runs + 1): ds1 = load_by_id(run_id, conn) @@ -769,9 +756,9 @@ def test_perform_actual_upgrade_6_to_newest_add_new_data() -> None: from qcodes.dataset.measurements import Measurement from qcodes.parameters import Parameter - fixpath = os.path.join(fixturepath, 'db_files', 'version6') + fixpath = os.path.join(fixturepath, "db_files", "version6") - db_file = 'some_runs.db' + db_file = "some_runs.db" dbname_old = os.path.join(fixpath, db_file) skip_if_no_fixtures(dbname_old) @@ -781,19 +768,24 @@ def test_perform_actual_upgrade_6_to_newest_add_new_data() -> None: perform_db_upgrade(conn) assert get_user_version(conn) >= 7 no_of_runs_query = "SELECT max(run_id) FROM runs" - no_of_runs = one( - atomic_transaction(conn, no_of_runs_query), 'max(run_id)') + no_of_runs = one(atomic_transaction(conn, no_of_runs_query), "max(run_id)") # Now let's insert new runs and ensure that they also get # captured_run_id assigned. params = [] for n in range(5): - params.append(Parameter(f'p{n}', label=f'Parameter {n}', - unit=f'unit {n}', set_cmd=None, - get_cmd=None)) + params.append( + Parameter( + f"p{n}", + label=f"Parameter {n}", + unit=f"unit {n}", + set_cmd=None, + get_cmd=None, + ) + ) # Set up an experiment - exp = new_experiment('some-exp', 'some-sample', conn=conn) + exp = new_experiment("some-exp", "some-sample", conn=conn) meas = Measurement(exp=exp) meas.register_parameter(params[0]) meas.register_parameter(params[1]) @@ -807,14 +799,15 @@ def test_perform_actual_upgrade_6_to_newest_add_new_data() -> None: for x in np.random.rand(10): for y in np.random.rand(10): z = np.random.rand() - datasaver.add_result((params[0], 0), - (params[1], 1), - (params[2], x), - (params[3], y), - (params[4], z)) - - no_of_runs_new = one( - atomic_transaction(conn, no_of_runs_query), 'max(run_id)') + datasaver.add_result( + (params[0], 0), + (params[1], 1), + (params[2], x), + (params[3], y), + (params[4], z), + ) + + no_of_runs_new = one(atomic_transaction(conn, no_of_runs_query), "max(run_id)") assert no_of_runs_new == 20 # check that run_id is equivalent to captured_run_id for new @@ -839,13 +832,12 @@ def test_perform_actual_upgrade_6_to_newest_add_new_data() -> None: for counter in range(1, no_of_runs_new - no_of_runs + 1): ds1 = load_by_counter(counter, exp_id, conn) # giving only the counter is not unique since we have 2 experiments - with pytest.raises(NameError, match="More than one" - " matching dataset"): + with pytest.raises(NameError, match="More than one matching dataset"): load_by_run_spec(captured_counter=counter, conn=conn) # however we can supply counter and experiment - ds2 = load_by_run_spec(captured_counter=counter, - experiment_name='some-exp', - conn=conn) + ds2 = load_by_run_spec( + captured_counter=counter, experiment_name="some-exp", conn=conn + ) assert isinstance(ds1, DataSet) assert ds1.the_same_dataset_as(ds2) @@ -855,38 +847,33 @@ def test_perform_actual_upgrade_6_to_newest_add_new_data() -> None: assert ds2.counter == ds2.captured_counter -@pytest.mark.parametrize('db_file', - ['empty', - 'some_runs']) +@pytest.mark.parametrize("db_file", ["empty", "some_runs"]) def test_perform_actual_upgrade_7_to_8(db_file) -> None: - v7fixpath = os.path.join(fixturepath, 'db_files', 'version7') + v7fixpath = os.path.join(fixturepath, "db_files", "version7") - db_file += '.db' + db_file += ".db" dbname_old = os.path.join(v7fixpath, db_file) skip_if_no_fixtures(dbname_old) with temporarily_copied_DB(dbname_old, debug=False, version=7) as conn: - perform_db_upgrade_7_to_8(conn) - assert is_column_in_table(conn, 'runs', 'parent_datasets') + assert is_column_in_table(conn, "runs", "parent_datasets") @pytest.mark.usefixtures("empty_temp_db") def test_cannot_connect_to_newer_db() -> None: - conn = connect(qc.config["core"]["db_location"], - qc.config["core"]["db_debug"]) + conn = connect(qc.config["core"]["db_location"], qc.config["core"]["db_debug"]) current_version = get_user_version(conn) - set_user_version(conn, current_version+1) + set_user_version(conn, current_version + 1) conn.close() err_msg = ( f"is version {current_version + 1} but this version of QCoDeS " f"supports up to version {current_version}" ) with pytest.raises(RuntimeError, match=err_msg): - conn = connect(qc.config["core"]["db_location"], - qc.config["core"]["db_debug"]) + conn = connect(qc.config["core"]["db_location"], qc.config["core"]["db_debug"]) def test_latest_available_version() -> None: @@ -895,10 +882,9 @@ def test_latest_available_version() -> None: @pytest.mark.parametrize("version", VERSIONS[:-1]) def test_getting_db_version(version) -> None: + fixpath = os.path.join(fixturepath, "db_files", f"version{version}") - fixpath = os.path.join(fixturepath, 'db_files', f'version{version}') - - dbname = os.path.join(fixpath, 'empty.db') + dbname = os.path.join(fixpath, "empty.db") skip_if_no_fixtures(dbname) @@ -908,19 +894,16 @@ def test_getting_db_version(version) -> None: assert new_v == LATEST_VERSION -@pytest.mark.parametrize('db_file', - ['empty', - 'some_runs']) +@pytest.mark.parametrize("db_file", ["empty", "some_runs"]) def test_perform_actual_upgrade_8_to_9(db_file) -> None: - v8fixpath = os.path.join(fixturepath, 'db_files', 'version8') + v8fixpath = os.path.join(fixturepath, "db_files", "version8") - db_file += '.db' + db_file += ".db" dbname_old = os.path.join(v8fixpath, db_file) skip_if_no_fixtures(dbname_old) with temporarily_copied_DB(dbname_old, debug=False, version=8) as conn: - index_query = "PRAGMA index_list(runs)" c = atomic_transaction(conn, index_query) diff --git a/tests/dataset/test_database_extract_runs.py b/tests/dataset/test_database_extract_runs.py index a3d863ef148..38c2c17faea 100644 --- a/tests/dataset/test_database_extract_runs.py +++ b/tests/dataset/test_database_extract_runs.py @@ -12,7 +12,7 @@ from numpy.testing import assert_array_equal import qcodes as qc -import qcodes.tests.dataset +import tests.dataset from qcodes.dataset import do1d, do2d from qcodes.dataset.data_set import ( DataSet, @@ -35,7 +35,7 @@ from qcodes.dataset.sqlite.queries import get_experiments from qcodes.instrument_drivers.mock_instruments import DummyInstrument from qcodes.station import Station -from qcodes.tests.common import error_caused_by, skip_if_no_fixtures +from tests.common import error_caused_by, skip_if_no_fixtures @contextmanager @@ -50,7 +50,7 @@ def raise_if_file_changed(path_to_file: str): yield post_operation_time = getmtime(path_to_file) if pre_operation_time != post_operation_time: - raise RuntimeError(f'File {path_to_file} was modified.') + raise RuntimeError(f"File {path_to_file} was modified.") @pytest.fixture(scope="function", name="inst") @@ -60,7 +60,7 @@ def _make_inst(): and removed from the global register of instruments, which, if not done, make break other tests """ - inst = DummyInstrument('extract_run_inst', gates=['back', 'plunger', 'cutter']) + inst = DummyInstrument("extract_run_inst", gates=["back", "plunger", "cutter"]) yield inst inst.close() @@ -78,7 +78,6 @@ def test_missing_runs_raises(two_empty_temp_db_connections, some_interdeps) -> N exp_1_run_ids = [] for _ in range(5): - source_dataset = DataSet(conn=source_conn, exp_id=source_exp_1.exp_id) exp_1_run_ids.append(source_dataset.run_id) source_dataset.set_interdependencies(some_interdeps[1]) @@ -86,8 +85,9 @@ def test_missing_runs_raises(two_empty_temp_db_connections, some_interdeps) -> N source_dataset.mark_started() for val in range(10): - source_dataset.add_results([{name: val - for name in some_interdeps[1].names}]) + source_dataset.add_results( + [{name: val for name in some_interdeps[1].names}] + ) source_dataset.mark_completed() source_path = path_to_dbfile(source_conn) @@ -96,9 +96,11 @@ def test_missing_runs_raises(two_empty_temp_db_connections, some_interdeps) -> N run_ids = [1, 8, 5, 3, 2, 4, 4, 4, 7, 8] wrong_ids = [8, 7, 8] - expected_err = ("Error: not all run_ids exist in the source database. " - "The following run(s) is/are not present: " - f"{wrong_ids}") + expected_err = ( + "Error: not all run_ids exist in the source database. " + "The following run(s) is/are not present: " + f"{wrong_ids}" + ) with pytest.raises(ValueError, match=re.escape(expected_err)): extract_runs_into_db(source_path, target_path, *run_ids) @@ -122,26 +124,32 @@ def test_basic_extraction(two_empty_temp_db_connections, some_interdeps) -> None with pytest.raises(RuntimeError) as excinfo: extract_runs_into_db(source_path, target_path, source_dataset.run_id) - assert error_caused_by(excinfo, ('Dataset not completed. An incomplete ' - 'dataset can not be copied. The ' - 'incomplete dataset has GUID: ' - f'{source_dataset.guid} and run_id: ' - f'{source_dataset.run_id}')) + assert error_caused_by( + excinfo, + ( + "Dataset not completed. An incomplete " + "dataset can not be copied. The " + "incomplete dataset has GUID: " + f"{source_dataset.guid} and run_id: " + f"{source_dataset.run_id}" + ), + ) source_dataset.set_interdependencies(some_interdeps[0]) - source_dataset.parent_dataset_links = [Link(head=source_dataset.guid, - tail=str(uuid.uuid4()), - edge_type='test_link')] + source_dataset.parent_dataset_links = [ + Link(head=source_dataset.guid, tail=str(uuid.uuid4()), edge_type="test_link") + ] source_dataset.mark_started() for value in range(10): - result = {ps.name: type_casters[ps.type](value) - for ps in some_interdeps[0].paramspecs} + result = { + ps.name: type_casters[ps.type](value) for ps in some_interdeps[0].paramspecs + } source_dataset.add_results([result]) - source_dataset.add_metadata('goodness', 'fair') - source_dataset.add_metadata('test', True) + source_dataset.add_metadata("goodness", "fair") + source_dataset.add_metadata("test", True) source_dataset.mark_completed() @@ -168,15 +176,18 @@ def test_basic_extraction(two_empty_temp_db_connections, some_interdeps) -> None assert source_dataset.the_same_dataset_as(target_dataset) assert source_dataset.parameters is not None assert target_dataset.parameters is not None - source_data = source_dataset.get_parameter_data(*source_dataset.parameters.split(',')) - target_data = target_dataset.get_parameter_data(*target_dataset.parameters.split(',')) + source_data = source_dataset.get_parameter_data( + *source_dataset.parameters.split(",") + ) + target_data = target_dataset.get_parameter_data( + *target_dataset.parameters.split(",") + ) for outkey, outval in source_data.items(): for inkey, inval in outval.items(): np.testing.assert_array_equal(inval, target_data[outkey][inkey]) - exp_attrs = ['name', 'sample_name', 'format_string', 'started_at', - 'finished_at'] + exp_attrs = ["name", "sample_name", "format_string", "started_at", "finished_at"] for exp_attr in exp_attrs: assert getattr(source_exp, exp_attr) == getattr(target_exp, exp_attr) @@ -287,7 +298,6 @@ def test_correct_experiment_routing( exp_1_run_ids = [] for _ in range(5): - source_dataset = DataSet(conn=source_conn, exp_id=source_exp_1.exp_id) exp_1_run_ids.append(source_dataset.run_id) @@ -296,8 +306,9 @@ def test_correct_experiment_routing( source_dataset.mark_started() for val in range(10): - source_dataset.add_results([{name: val - for name in some_interdeps[1].names}]) + source_dataset.add_results( + [{name: val for name in some_interdeps[1].names}] + ) source_dataset.mark_completed() # make a new experiment with 1 run @@ -363,8 +374,8 @@ def test_correct_experiment_routing( assert source_ds.the_same_dataset_as(target_ds) assert source_ds.parameters is not None assert target_ds.parameters is not None - source_data = source_ds.get_parameter_data(*source_ds.parameters.split(',')) - target_data = target_ds.get_parameter_data(*target_ds.parameters.split(',')) + source_data = source_ds.get_parameter_data(*source_ds.parameters.split(",")) + target_data = target_ds.get_parameter_data(*target_ds.parameters.split(",")) for outkey, outval in source_data.items(): for inkey, inval in outval.items(): @@ -389,7 +400,6 @@ def test_runs_from_different_experiments_raises( exp_1_run_ids = [] for _ in range(5): - source_dataset = DataSet(conn=source_conn, exp_id=source_exp_1.exp_id) exp_1_run_ids.append(source_dataset.run_id) @@ -398,34 +408,36 @@ def test_runs_from_different_experiments_raises( source_dataset.mark_started() for val in range(10): - source_dataset.add_results([{name: val - for name in some_interdeps[1].names}]) + source_dataset.add_results( + [{name: val for name in some_interdeps[1].names}] + ) source_dataset.mark_completed() # make 5 runs in second experiment exp_2_run_ids = [] for _ in range(5): - source_dataset = DataSet(conn=source_conn, exp_id=source_exp_2.exp_id) exp_2_run_ids.append(source_dataset.run_id) source_dataset.set_interdependencies(some_interdeps[1]) - source_dataset.mark_started() for val in range(10): - source_dataset.add_results([{name: val - for name in some_interdeps[1].names}]) + source_dataset.add_results( + [{name: val for name in some_interdeps[1].names}] + ) source_dataset.mark_completed() run_ids = exp_1_run_ids + exp_2_run_ids source_exp_ids = np.unique([1, 2]) - matchstring = ('Did not receive runs from a single experiment\\. ' - f'Got runs from experiments {source_exp_ids}') + matchstring = ( + "Did not receive runs from a single experiment\\. " + f"Got runs from experiments {source_exp_ids}" + ) # make the matchstring safe to use as a regexp - matchstring = matchstring.replace('[', '\\[').replace(']', '\\]') + matchstring = matchstring.replace("[", "\\[").replace("]", "\\]") with pytest.raises(ValueError, match=matchstring): extract_runs_into_db(source_path, target_path, *run_ids) @@ -469,25 +481,22 @@ def test_result_table_naming_and_run_id( source_ds_1_1.set_interdependencies(some_interdeps[1]) source_ds_1_1.mark_started() - source_ds_1_1.add_results([{name: 0.0 - for name in some_interdeps[1].names}]) + source_ds_1_1.add_results([{name: 0.0 for name in some_interdeps[1].names}]) source_ds_1_1.mark_completed() source_exp2 = Experiment(conn=source_conn) source_ds_2_1 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id) source_ds_2_1.set_interdependencies(some_interdeps[1]) source_ds_2_1.mark_started() - source_ds_2_1.add_results([{name: 0.0 - for name in some_interdeps[1].names}]) + source_ds_2_1.add_results([{name: 0.0 for name in some_interdeps[1].names}]) source_ds_2_1.mark_completed() - source_ds_2_2 = DataSet(conn=source_conn, - exp_id=source_exp2.exp_id, - name="customname") + source_ds_2_2 = DataSet( + conn=source_conn, exp_id=source_exp2.exp_id, name="customname" + ) source_ds_2_2.set_interdependencies(some_interdeps[1]) source_ds_2_2.mark_started() - source_ds_2_2.add_results([{name: 0.0 - for name in some_interdeps[1].names}]) + source_ds_2_2.add_results([{name: 0.0 for name in some_interdeps[1].names}]) source_ds_2_2.mark_completed() extract_runs_into_db(source_path, target_path, source_ds_2_2.run_id) @@ -516,9 +525,9 @@ def test_load_by_X_functions(two_empty_temp_db_connections, some_interdeps) -> N source_exp2 = Experiment(conn=source_conn) source_ds_2_1 = DataSet(conn=source_conn, exp_id=source_exp2.exp_id) - source_ds_2_2 = DataSet(conn=source_conn, - exp_id=source_exp2.exp_id, - name="customname") + source_ds_2_2 = DataSet( + conn=source_conn, exp_id=source_exp2.exp_id, name="customname" + ) for ds in (source_ds_1_1, source_ds_2_1, source_ds_2_2): ds.set_interdependencies(some_interdeps[1]) @@ -536,8 +545,9 @@ def test_load_by_X_functions(two_empty_temp_db_connections, some_interdeps) -> N test_ds = load_by_id(1, target_conn) assert source_ds_2_2.the_same_dataset_as(test_ds) - test_ds = load_by_run_spec(captured_run_id=source_ds_2_2.captured_run_id, - conn=target_conn) + test_ds = load_by_run_spec( + captured_run_id=source_ds_2_2.captured_run_id, conn=target_conn + ) assert source_ds_2_2.the_same_dataset_as(test_ds) assert source_exp2.exp_id == 2 @@ -564,18 +574,16 @@ def test_combine_runs( source_conn_1, source_conn_2 = two_empty_temp_db_connections target_conn = empty_temp_db_connection - source_1_exp = Experiment(conn=source_conn_1, - name='exp1', - sample_name='no_sample') - source_1_datasets = [DataSet(conn=source_conn_1, - exp_id=source_1_exp.exp_id) for i in range(10)] + source_1_exp = Experiment(conn=source_conn_1, name="exp1", sample_name="no_sample") + source_1_datasets = [ + DataSet(conn=source_conn_1, exp_id=source_1_exp.exp_id) for i in range(10) + ] - source_2_exp = Experiment(conn=source_conn_2, - name='exp2', - sample_name='no_sample') + source_2_exp = Experiment(conn=source_conn_2, name="exp2", sample_name="no_sample") - source_2_datasets = [DataSet(conn=source_conn_2, - exp_id=source_2_exp.exp_id) for i in range(10)] + source_2_datasets = [ + DataSet(conn=source_conn_2, exp_id=source_2_exp.exp_id) for i in range(10) + ] guids_1 = {dataset.guid for dataset in source_1_datasets} guids_2 = {dataset.guid for dataset in source_2_datasets} @@ -596,19 +604,24 @@ def test_combine_runs( # now let's insert all datasets in random order for ds in shuffled_datasets: - extract_runs_into_db(ds.conn.path_to_dbfile, - target_conn.path_to_dbfile, ds.run_id) + extract_runs_into_db( + ds.conn.path_to_dbfile, target_conn.path_to_dbfile, ds.run_id + ) for ds in source_all_datasets: - loaded_ds = load_by_run_spec(captured_run_id=ds.captured_run_id, - experiment_name=ds.exp_name, - conn=target_conn) + loaded_ds = load_by_run_spec( + captured_run_id=ds.captured_run_id, + experiment_name=ds.exp_name, + conn=target_conn, + ) assert ds.the_same_dataset_as(loaded_ds) for ds in source_all_datasets: - loaded_ds = load_by_run_spec(captured_run_id=ds.captured_counter, - experiment_name=ds.exp_name, - conn=target_conn) + loaded_ds = load_by_run_spec( + captured_run_id=ds.captured_counter, + experiment_name=ds.exp_name, + conn=target_conn, + ) assert ds.the_same_dataset_as(loaded_ds) # Now test that we generate the correct table for the guids above @@ -618,14 +631,14 @@ def test_combine_runs( new_guids = [ds.guid for ds in source_all_datasets] table = generate_dataset_table(new_guids, conn=target_conn) - lines = table.split('\n') - headers = re.split(r'\s+', lines[0].strip()) + lines = table.split("\n") + headers = re.split(r"\s+", lines[0].strip()) cfg = qc.config - guid_comp = cfg['GUID_components'] + guid_comp = cfg["GUID_components"] for i in range(2, len(lines)): - split_line = re.split(r'\s+', lines[i].strip()) + split_line = re.split(r"\s+", lines[i].strip()) mydict = {headers[j]: split_line[j] for j in range(len(split_line))} ds2 = load_by_guid(new_guids[i - 2], conn=target_conn) assert ds2.captured_run_id == int(mydict["captured_run_id"]) @@ -645,16 +658,14 @@ def test_copy_datasets_and_add_new( """ source_conn, target_conn = two_empty_temp_db_connections - source_exp_1 = Experiment(conn=source_conn, - name='exp1', - sample_name='no_sample') - source_exp_2 = Experiment(conn=source_conn, - name='exp2', - sample_name='no_sample') - source_datasets_1 = [DataSet(conn=source_conn, - exp_id=source_exp_1.exp_id) for i in range(5)] - source_datasets_2 = [DataSet(conn=source_conn, - exp_id=source_exp_2.exp_id) for i in range(5)] + source_exp_1 = Experiment(conn=source_conn, name="exp1", sample_name="no_sample") + source_exp_2 = Experiment(conn=source_conn, name="exp2", sample_name="no_sample") + source_datasets_1 = [ + DataSet(conn=source_conn, exp_id=source_exp_1.exp_id) for i in range(5) + ] + source_datasets_2 = [ + DataSet(conn=source_conn, exp_id=source_exp_2.exp_id) for i in range(5) + ] source_datasets = source_datasets_1 + source_datasets_2 for ds in source_datasets: @@ -666,11 +677,13 @@ def test_copy_datasets_and_add_new( # now let's insert only some of the datasets # and verify that the ids and counters are set correctly for ds in source_datasets[-3:]: - extract_runs_into_db(ds.conn.path_to_dbfile, - target_conn.path_to_dbfile, ds.run_id) + extract_runs_into_db( + ds.conn.path_to_dbfile, target_conn.path_to_dbfile, ds.run_id + ) - loaded_datasets = [load_by_run_spec(captured_run_id=i, conn=target_conn) - for i in range(8, 11)] + loaded_datasets = [ + load_by_run_spec(captured_run_id=i, conn=target_conn) for i in range(8, 11) + ] expected_run_ids = [1, 2, 3] expected_captured_run_ids = [8, 9, 10] expected_counter = [1, 2, 3] @@ -688,12 +701,11 @@ def test_copy_datasets_and_add_new( assert ds2.counter == ec assert ds2.captured_counter == ecc - exp = load_experiment_by_name('exp2', conn=target_conn) + exp = load_experiment_by_name("exp2", conn=target_conn) # add additional runs and verify that the ids and counters increase as # expected - new_datasets = [DataSet(conn=target_conn, - exp_id=exp.exp_id) for i in range(3)] + new_datasets = [DataSet(conn=target_conn, exp_id=exp.exp_id) for i in range(3)] for ds in new_datasets: ds.set_interdependencies(some_interdeps[1]) @@ -706,11 +718,13 @@ def test_copy_datasets_and_add_new( expected_counter = [4, 5, 6] expected_captured_counter = expected_counter - for ds, eri, ecri, ec, ecc in zip(new_datasets, - expected_run_ids, - expected_captured_run_ids, - expected_counter, - expected_captured_counter): + for ds, eri, ecri, ec, ecc in zip( + new_datasets, + expected_run_ids, + expected_captured_run_ids, + expected_counter, + expected_captured_counter, + ): assert ds.run_id == eri assert ds.captured_run_id == ecri assert ds.counter == ec @@ -727,10 +741,10 @@ def test_old_versions_not_touched( _, new_v = get_db_version_and_newest_available_version(source_path) - fixturepath = os.sep.join(qcodes.tests.dataset.__file__.split(os.sep)[:-1]) - fixturepath = os.path.join(fixturepath, - 'fixtures', 'db_files', 'version2', - 'some_runs.db') + fixturepath = os.sep.join(tests.dataset.__file__.split(os.sep)[:-1]) + fixturepath = os.path.join( + fixturepath, "fixtures", "db_files", "version2", "some_runs.db" + ) skip_if_no_fixtures(fixturepath) # First test that we cannot use an old version as source @@ -738,11 +752,13 @@ def test_old_versions_not_touched( with raise_if_file_changed(fixturepath): with pytest.warns(UserWarning) as warning: extract_runs_into_db(fixturepath, target_path, 1) - expected_mssg = ('Source DB version is 2, but this ' - f'function needs it to be in version {new_v}. ' - 'Run this function again with ' - 'upgrade_source_db=True to auto-upgrade ' - 'the source DB file.') + expected_mssg = ( + "Source DB version is 2, but this " + f"function needs it to be in version {new_v}. " + "Run this function again with " + "upgrade_source_db=True to auto-upgrade " + "the source DB file." + ) assert isinstance(warning[0].message, Warning) assert warning[0].message.args[0] == expected_mssg @@ -755,18 +771,19 @@ def test_old_versions_not_touched( source_ds.set_interdependencies(some_interdeps[1]) source_ds.mark_started() - source_ds.add_results([{name: 0.0 - for name in some_interdeps[1].names}]) + source_ds.add_results([{name: 0.0 for name in some_interdeps[1].names}]) source_ds.mark_completed() with raise_if_file_changed(fixturepath): with pytest.warns(UserWarning) as warning: extract_runs_into_db(source_path, fixturepath, 1) - expected_mssg = ('Target DB version is 2, but this ' - f'function needs it to be in version {new_v}. ' - 'Run this function again with ' - 'upgrade_target_db=True to auto-upgrade ' - 'the target DB file.') + expected_mssg = ( + "Target DB version is 2, but this " + f"function needs it to be in version {new_v}. " + "Run this function again with " + "upgrade_target_db=True to auto-upgrade " + "the target DB file." + ) assert isinstance(warning[0].message, Warning) assert warning[0].message.args[0] == expected_mssg @@ -782,7 +799,7 @@ def test_experiments_with_NULL_sample_name( is thus not ever re-inserted into the target DB """ source_conn, target_conn = two_empty_temp_db_connections - source_exp_1 = Experiment(conn=source_conn, name='null_sample_name') + source_exp_1 = Experiment(conn=source_conn, name="null_sample_name") source_path = path_to_dbfile(source_conn) target_path = path_to_dbfile(target_conn) @@ -791,7 +808,6 @@ def test_experiments_with_NULL_sample_name( exp_1_run_ids = [] for _ in range(5): - source_dataset = DataSet(conn=source_conn, exp_id=source_exp_1.exp_id) exp_1_run_ids.append(source_dataset.run_id) @@ -799,8 +815,9 @@ def test_experiments_with_NULL_sample_name( source_dataset.mark_started() for val in range(10): - source_dataset.add_results([{name: val - for name in some_interdeps[1].names}]) + source_dataset.add_results( + [{name: val for name in some_interdeps[1].names}] + ) source_dataset.mark_completed() sql = """ @@ -848,9 +865,11 @@ def test_integration_station_and_measurement( with meas.run() as datasaver: for back_v in [1, 2, 3]: for plung_v in [-3, -2.5, 0]: - datasaver.add_result((inst.back, back_v), - (inst.plunger, plung_v), - (inst.cutter, back_v+plung_v)) + datasaver.add_result( + (inst.back, back_v), + (inst.plunger, plung_v), + (inst.cutter, back_v + plung_v), + ) extract_runs_into_db(source_path, target_path, 1) @@ -881,8 +900,7 @@ def test_atomicity(two_empty_temp_db_connections, some_interdeps) -> None: for ds in (source_ds_1, source_ds_2): ds.set_interdependencies(some_interdeps[1]) ds.mark_started() - ds.add_results([{name: 2.1 - for name in some_interdeps[1].names}]) + ds.add_results([{name: 2.1 for name in some_interdeps[1].names}]) # importantly, source_ds_2 is NOT marked as completed source_ds_1.mark_completed() @@ -918,18 +936,19 @@ def test_column_mismatch(two_empty_temp_db_connections, some_interdeps, inst) -> with meas.run() as datasaver: for back_v in [1, 2, 3]: for plung_v in [-3, -2.5, 0]: - datasaver.add_result((inst.back, back_v), - (inst.plunger, plung_v), - (inst.cutter, back_v+plung_v)) - datasaver.dataset.add_metadata('meta_tag', 'meta_value') + datasaver.add_result( + (inst.back, back_v), + (inst.plunger, plung_v), + (inst.cutter, back_v + plung_v), + ) + datasaver.dataset.add_metadata("meta_tag", "meta_value") Experiment(conn=source_conn) source_ds = DataSet(conn=source_conn) source_ds.set_interdependencies(some_interdeps[1]) source_ds.mark_started() - source_ds.add_results([{name: 2.1 - for name in some_interdeps[1].names}]) + source_ds.add_results([{name: 2.1 for name in some_interdeps[1].names}]) source_ds.mark_completed() extract_runs_into_db(source_path, target_path, 1) diff --git a/tests/dataset/test_dataset_basic.py b/tests/dataset/test_dataset_basic.py index 8539ae3a77a..5a2d2759e8c 100644 --- a/tests/dataset/test_dataset_basic.py +++ b/tests/dataset/test_dataset_basic.py @@ -29,10 +29,10 @@ from qcodes.dataset.sqlite.connection import atomic, path_to_dbfile from qcodes.dataset.sqlite.database import _convert_array, get_DB_location from qcodes.dataset.sqlite.queries import _rewrite_timestamps, _unicode_categories -from qcodes.tests.common import error_caused_by -from qcodes.tests.dataset.helper_functions import verify_data_dict -from qcodes.tests.dataset.test_links import generate_some_links from qcodes.utils.types import complex_types, numpy_complex, numpy_floats, numpy_ints +from tests.common import error_caused_by +from tests.dataset.helper_functions import verify_data_dict +from tests.dataset.test_links import generate_some_links n_experiments = 0 @@ -59,12 +59,30 @@ def test_has_attributes_after_init() -> None: (run_id is None / run_id is not None) """ - attrs = ['path_to_db', 'conn', '_run_id', 'run_id', - '_debug', 'subscribers', '_completed', 'name', 'table_name', - 'guid', 'number_of_results', 'counter', 'parameters', - 'paramspecs', 'exp_id', 'exp_name', 'sample_name', - 'run_timestamp_raw', 'completed_timestamp_raw', 'completed', - 'snapshot', 'snapshot_raw'] + attrs = [ + "path_to_db", + "conn", + "_run_id", + "run_id", + "_debug", + "subscribers", + "_completed", + "name", + "table_name", + "guid", + "number_of_results", + "counter", + "parameters", + "paramspecs", + "exp_id", + "exp_name", + "sample_name", + "run_timestamp_raw", + "completed_timestamp_raw", + "completed", + "snapshot", + "snapshot_raw", + ] path_to_db = get_DB_location() ds = DataSet(path_to_db, run_id=None) @@ -82,14 +100,12 @@ def test_has_attributes_after_init() -> None: @pytest.mark.usefixtures("experiment") def test_dataset_length() -> None: - path_to_db = get_DB_location() ds = DataSet(path_to_db, run_id=None) assert len(ds) == 0 - parameter = ParamSpecBase(name='single', paramtype='numeric', - label='', unit='N/A') + parameter = ParamSpecBase(name="single", paramtype="numeric", label="", unit="N/A") idps = InterDependencies_(standalones=(parameter,)) ds.set_interdependencies(idps) @@ -107,8 +123,10 @@ def test_dataset_location(empty_temp_db_connection) -> None: """ exp = new_experiment("test", "test1", conn=empty_temp_db_connection) ds = DataSet(conn=empty_temp_db_connection) - assert path_to_dbfile(empty_temp_db_connection) == \ - empty_temp_db_connection.path_to_dbfile + assert ( + path_to_dbfile(empty_temp_db_connection) + == empty_temp_db_connection.path_to_dbfile + ) assert exp.path_to_db == empty_temp_db_connection.path_to_dbfile assert ds.path_to_db == empty_temp_db_connection.path_to_dbfile @@ -126,19 +144,23 @@ def test_dataset_states() -> None: assert ds.started is False assert ds.completed is False - with pytest.raises(RuntimeError, match='Can not mark DataSet as complete ' - 'before it has ' - 'been marked as started.'): + with pytest.raises( + RuntimeError, + match="Can not mark DataSet as complete " + "before it has " + "been marked as started.", + ): ds.mark_completed() - match = ('This DataSet has not been marked as started. ' - 'Please mark the DataSet as started before ' - 'adding results to it.') + match = ( + "This DataSet has not been marked as started. " + "Please mark the DataSet as started before " + "adding results to it." + ) with pytest.raises(RuntimeError, match=match): - ds.add_results([{'x': 1}]) + ds.add_results([{"x": 1}]) - parameter = ParamSpecBase(name='single', paramtype='numeric', - label='', unit='N/A') + parameter = ParamSpecBase(name="single", paramtype="numeric", label="", unit="N/A") idps = InterDependencies_(standalones=(parameter,)) ds.set_interdependencies(idps) @@ -149,8 +171,7 @@ def test_dataset_states() -> None: assert ds.started is True assert ds.completed is False - match = ('Can not set interdependencies on a DataSet that has ' - 'been started.') + match = "Can not set interdependencies on a DataSet that has been started." with pytest.raises(RuntimeError, match=match): ds.set_interdependencies(idps) @@ -164,21 +185,19 @@ def test_dataset_states() -> None: assert ds.started is True assert ds.completed is True - match = ('Can not set interdependencies on a DataSet that has ' - 'been started.') + match = "Can not set interdependencies on a DataSet that has been started." with pytest.raises(RuntimeError, match=match): ds.set_interdependencies(idps) - match = ('This DataSet is complete, no further ' - 'results can be added to it.') + match = "This DataSet is complete, no further results can be added to it." with pytest.raises(CompletedError, match=match): ds.add_results([{parameter.name: 1}]) @pytest.mark.parametrize("start_bg_writer", (True, False)) -@pytest.mark.usefixtures('experiment') +@pytest.mark.usefixtures("experiment") def test_mark_completed_twice(start_bg_writer) -> None: """ Ensure that its not an error to call mark_completed @@ -190,7 +209,7 @@ def test_mark_completed_twice(start_bg_writer) -> None: ds.mark_completed() -@pytest.mark.usefixtures('experiment') +@pytest.mark.usefixtures("experiment") def test_timestamps_are_none() -> None: ds = DataSet() @@ -203,7 +222,7 @@ def test_timestamps_are_none() -> None: assert isinstance(ds.run_timestamp(), str) -@pytest.mark.usefixtures('experiment') +@pytest.mark.usefixtures("experiment") def test_integer_timestamps_in_database_are_supported() -> None: ds = DataSet() @@ -220,11 +239,25 @@ def test_integer_timestamps_in_database_are_supported() -> None: def test_dataset_read_only_properties(dataset) -> None: - read_only_props = ['run_id', 'path_to_db', 'name', 'table_name', 'guid', - 'number_of_results', 'counter', 'parameters', - 'paramspecs', 'exp_id', 'exp_name', 'sample_name', - 'run_timestamp_raw', 'completed_timestamp_raw', - 'snapshot', 'snapshot_raw', 'dependent_parameters'] + read_only_props = [ + "run_id", + "path_to_db", + "name", + "table_name", + "guid", + "number_of_results", + "counter", + "parameters", + "paramspecs", + "exp_id", + "exp_name", + "sample_name", + "run_timestamp_raw", + "completed_timestamp_raw", + "snapshot", + "snapshot_raw", + "dependent_parameters", + ] # It is not expected to be possible to set readonly properties # the error message changed in python 3.11 @@ -237,18 +270,24 @@ def test_dataset_read_only_properties(dataset) -> None: @pytest.mark.usefixtures("experiment") -@pytest.mark.parametrize("non_existing_run_id", (1, 0, -1, 'number#42')) +@pytest.mark.parametrize("non_existing_run_id", (1, 0, -1, "number#42")) def test_create_dataset_from_non_existing_run_id(non_existing_run_id) -> None: - with pytest.raises(ValueError, match=f"Run with run_id " - f"{non_existing_run_id} does not " - f"exist in the database"): + with pytest.raises( + ValueError, + match=f"Run with run_id " + f"{non_existing_run_id} does not " + f"exist in the database", + ): _ = DataSet(run_id=non_existing_run_id) def test_create_dataset_pass_both_connection_and_path_to_db(experiment) -> None: - with pytest.raises(ValueError, match="Received BOTH conn and path_to_db. " - "Please provide only one or " - "the other."): + with pytest.raises( + ValueError, + match="Received BOTH conn and path_to_db. " + "Please provide only one or " + "the other.", + ): some_valid_connection = experiment.conn _ = DataSet(path_to_db="some valid path", conn=some_valid_connection) @@ -259,19 +298,23 @@ def test_load_by_id(dataset) -> None: assert dataset.path_to_db == ds.path_to_db -@pytest.mark.usefixtures('experiment') -@pytest.mark.parametrize('non_existing_run_id', (1, 0, -1)) +@pytest.mark.usefixtures("experiment") +@pytest.mark.parametrize("non_existing_run_id", (1, 0, -1)) def test_load_by_id_for_nonexisting_run_id(non_existing_run_id) -> None: - with pytest.raises(ValueError, match=f'Run with run_id ' - f'{non_existing_run_id} does not ' - f'exist in the database'): + with pytest.raises( + ValueError, + match=f"Run with run_id " + f"{non_existing_run_id} does not " + f"exist in the database", + ): _ = load_by_id(non_existing_run_id) -@pytest.mark.usefixtures('experiment') +@pytest.mark.usefixtures("experiment") def test_load_by_id_for_none() -> None: - with pytest.raises(ValueError, match='run_id has to be a positive integer, ' - 'not None.'): + with pytest.raises( + ValueError, match="run_id has to be a positive integer, not None." + ): _ = load_by_id(None) # type: ignore[arg-type] @@ -306,9 +349,9 @@ def test_add_experiments(experiment_name, sample_name, dataset_name) -> None: assert loaded_dataset.name == dataset_name assert loaded_dataset.counter == expected_ds_counter assert isinstance(loaded_dataset, DataSet) - assert loaded_dataset.table_name == "{}-{}-{}".format("results", - exp.exp_id, - loaded_dataset.counter) + assert loaded_dataset.table_name == "{}-{}-{}".format( + "results", exp.exp_id, loaded_dataset.counter + ) expected_ds_counter += 1 dataset = new_data_set(dataset_name) dsid = dataset.run_id @@ -316,36 +359,38 @@ def test_add_experiments(experiment_name, sample_name, dataset_name) -> None: assert loaded_dataset.name == dataset_name assert loaded_dataset.counter == expected_ds_counter assert isinstance(loaded_dataset, DataSet) - assert loaded_dataset.table_name == "{}-{}-{}".format("results", - exp.exp_id, - loaded_dataset.counter) + assert loaded_dataset.table_name == "{}-{}-{}".format( + "results", exp.exp_id, loaded_dataset.counter + ) @pytest.mark.usefixtures("experiment") def test_dependent_parameters() -> None: - pss: list[ParamSpecBase] = [] for n in range(5): - pss.append(ParamSpecBase(f'ps{n}', paramtype='numeric')) + pss.append(ParamSpecBase(f"ps{n}", paramtype="numeric")) idps = InterDependencies_(dependencies={pss[0]: (pss[1], pss[2])}) ds = DataSet(specs=idps) assert ds.dependent_parameters == (pss[0],) - idps = InterDependencies_(dependencies={pss[0]: (pss[1], pss[2])}, - standalones=(pss[3], pss[4])) + idps = InterDependencies_( + dependencies={pss[0]: (pss[1], pss[2])}, standalones=(pss[3], pss[4]) + ) ds = DataSet(specs=idps) assert ds.dependent_parameters == (pss[0],) - idps = InterDependencies_(dependencies={pss[0]: (pss[1], pss[2]), - pss[3]: (pss[4],)}) + idps = InterDependencies_( + dependencies={pss[0]: (pss[1], pss[2]), pss[3]: (pss[4],)} + ) ds = DataSet(specs=idps) assert ds.dependent_parameters == (pss[0], pss[3]) - idps = InterDependencies_(dependencies={pss[3]: (pss[1], pss[2]), - pss[0]: (pss[4],)}) + idps = InterDependencies_( + dependencies={pss[3]: (pss[1], pss[2]), pss[0]: (pss[4],)} + ) ds = DataSet(specs=idps) assert ds.dependent_parameters == (pss[3], pss[0]) @@ -363,8 +408,7 @@ def test_set_interdependencies(dataset) -> None: parameter_b = ParamSpecBase("b_param", "NUMERIC") parameter_c = ParamSpecBase("c_param", "array") - idps = InterDependencies_( - inferences={parameter_c: (parameter_a, parameter_b)}) + idps = InterDependencies_(inferences={parameter_c: (parameter_a, parameter_b)}) dataset.set_interdependencies(idps) @@ -377,7 +421,7 @@ def test_set_interdependencies(dataset) -> None: paramspecs = shadow_ds.paramspecs - expected_keys = ['a_param', 'b_param', 'c_param'] + expected_keys = ["a_param", "b_param", "c_param"] keys = sorted(list(paramspecs.keys())) assert keys == expected_keys for expected_param_name in expected_keys: @@ -415,24 +459,20 @@ def test_add_data_1d() -> None: shadow_ds = make_shadow_dataset(mydataset) - np.testing.assert_array_equal( - mydataset.get_parameter_data()['y']['x'], expected_x) - np.testing.assert_array_equal( - mydataset.get_parameter_data()['y']['y'], expected_y) - np.testing.assert_array_equal( - shadow_ds.get_parameter_data()['y']['x'], expected_x) - np.testing.assert_array_equal( - shadow_ds.get_parameter_data()['y']['y'], expected_y) + np.testing.assert_array_equal(mydataset.get_parameter_data()["y"]["x"], expected_x) + np.testing.assert_array_equal(mydataset.get_parameter_data()["y"]["y"], expected_y) + np.testing.assert_array_equal(shadow_ds.get_parameter_data()["y"]["x"], expected_x) + np.testing.assert_array_equal(shadow_ds.get_parameter_data()["y"]["y"], expected_y) assert mydataset.completed is False mydataset.mark_completed() assert mydataset.completed is True with pytest.raises(CompletedError): - mydataset.add_results([{'y': 500}]) + mydataset.add_results([{"y": 500}]) with pytest.raises(CompletedError): - mydataset.add_results([{'x': 5}]) + mydataset.add_results([{"x": 5}]) @pytest.mark.usefixtures("experiment") @@ -445,8 +485,8 @@ def test_add_data_array() -> None: assert exp.last_counter == 0 idps = InterDependencies_( - standalones=(ParamSpecBase("x", "numeric"), - ParamSpecBase("y", "array"))) + standalones=(ParamSpecBase("x", "numeric"), ParamSpecBase("y", "array")) + ) mydataset = new_data_set("test") mydataset.set_interdependencies(idps) mydataset.mark_started() @@ -460,14 +500,16 @@ def test_add_data_array() -> None: shadow_ds = make_shadow_dataset(mydataset) - np.testing.assert_array_equal(mydataset.get_parameter_data()[ - 'x']['x'], np.array(expected_x)) - np.testing.assert_array_equal(shadow_ds.get_parameter_data()[ - 'x']['x'], np.array(expected_x)) + np.testing.assert_array_equal( + mydataset.get_parameter_data()["x"]["x"], np.array(expected_x) + ) + np.testing.assert_array_equal( + shadow_ds.get_parameter_data()["x"]["x"], np.array(expected_x) + ) - y_data = mydataset.get_parameter_data()['y']['y'] + y_data = mydataset.get_parameter_data()["y"]["y"] np.testing.assert_allclose(y_data, expected_y) - y_data = shadow_ds.get_parameter_data()['y']['y'] + y_data = shadow_ds.get_parameter_data()["y"]["y"] np.testing.assert_allclose(y_data, expected_y) @@ -478,25 +520,23 @@ def test_adding_too_many_results() -> None: insert_many_values function of the sqlite.query_helpers module """ dataset = new_data_set("test_adding_too_many_results") - xparam = ParamSpecBase("x", "numeric", label="x parameter", - unit='V') - yparam = ParamSpecBase("y", 'numeric', label='y parameter', - unit='Hz') + xparam = ParamSpecBase("x", "numeric", label="x parameter", unit="V") + yparam = ParamSpecBase("y", "numeric", label="y parameter", unit="Hz") idps = InterDependencies_(dependencies={yparam: (xparam,)}) dataset.set_interdependencies(idps) dataset.mark_started() n_max = int(qcodes.dataset.SQLiteSettings.limits["MAX_VARIABLE_NUMBER"]) - vals = np.linspace(0, 1, int(n_max/2)+2) - results = [{'x': val} for val in vals] + vals = np.linspace(0, 1, int(n_max / 2) + 2) + results = [{"x": val} for val in vals] dataset.add_results(results) - vals = np.linspace(0, 1, int(n_max/2)+1) - results = [{'x': val, 'y': val} for val in vals] + vals = np.linspace(0, 1, int(n_max / 2) + 1) + results = [{"x": val, "y": val} for val in vals] dataset.add_results(results) - vals = np.linspace(0, 1, n_max*3) - results = [{'x': val} for val in vals] + vals = np.linspace(0, 1, n_max * 3) + results = [{"x": val} for val in vals] dataset.add_results(results) @@ -517,17 +557,17 @@ def test_load_by_counter() -> None: @pytest.mark.usefixtures("experiment") -@pytest.mark.parametrize('nonexisting_counter', (-1, 0, 1, None)) +@pytest.mark.parametrize("nonexisting_counter", (-1, 0, 1, None)) def test_load_by_counter_for_nonexisting_counter(nonexisting_counter) -> None: exp_id = 1 - with pytest.raises(RuntimeError, match='Expected one row'): + with pytest.raises(RuntimeError, match="Expected one row"): _ = load_by_counter(exp_id, nonexisting_counter) @pytest.mark.usefixtures("empty_temp_db") -@pytest.mark.parametrize('nonexisting_exp_id', (-1, 0, 1, None)) +@pytest.mark.parametrize("nonexisting_exp_id", (-1, 0, 1, None)) def test_load_by_counter_for_nonexisting_experiment(nonexisting_exp_id) -> None: - with pytest.raises(RuntimeError, match='Expected one row'): + with pytest.raises(RuntimeError, match="Expected one row"): _ = load_by_counter(nonexisting_exp_id, 1) @@ -545,9 +585,9 @@ def test_guid(dataset) -> None: def test_numpy_ints(dataset) -> None: """ - Test that we can insert numpy integers in the data set + Test that we can insert numpy integers in the data set """ - xparam = ParamSpecBase('x', 'numeric') + xparam = ParamSpecBase("x", "numeric") idps = InterDependencies_(standalones=(xparam,)) dataset.set_interdependencies(idps) dataset.mark_started() @@ -555,15 +595,16 @@ def test_numpy_ints(dataset) -> None: results = [{"x": tp(1)} for tp in numpy_ints] dataset.add_results(results) expected_result = np.ones(len(numpy_ints)) - np.testing.assert_array_equal(dataset.get_parameter_data()[ - "x"]["x"], expected_result) + np.testing.assert_array_equal( + dataset.get_parameter_data()["x"]["x"], expected_result + ) def test_numpy_floats(dataset) -> None: """ Test that we can insert numpy floats in the data set """ - float_param = ParamSpecBase('y', 'numeric') + float_param = ParamSpecBase("y", "numeric") idps = InterDependencies_(standalones=(float_param,)) dataset.set_interdependencies(idps) dataset.mark_started() @@ -572,7 +613,7 @@ def test_numpy_floats(dataset) -> None: dataset.add_results(results) expected_result = np.array([tp(1.2) for tp in numpy_floats]) data = dataset.get_parameter_data()["y"]["y"] - assert np.allclose(data, expected_result, atol=1E-8) + assert np.allclose(data, expected_result, atol=1e-8) def test_numpy_complex(dataset) -> None: @@ -633,10 +674,10 @@ def test_missing_keys(dataset) -> None: example handy when having an interleaved 1D and 2D sweep. """ - x = ParamSpecBase("x", paramtype='numeric') - y = ParamSpecBase("y", paramtype='numeric') - a = ParamSpecBase("a", paramtype='numeric') - b = ParamSpecBase("b", paramtype='numeric') + x = ParamSpecBase("x", paramtype="numeric") + y = ParamSpecBase("y", paramtype="numeric") + a = ParamSpecBase("a", paramtype="numeric") + b = ParamSpecBase("b", paramtype="numeric") idps = InterDependencies_(dependencies={a: (x,), b: (x, y)}) dataset.set_interdependencies(idps) @@ -661,21 +702,19 @@ def fb(xv, yv): loaded_data = dataset.get_parameter_data() - np.testing.assert_array_equal(loaded_data['a']['x'], - np.array(xvals)) - np.testing.assert_array_equal(loaded_data['a']['a'], - np.array([fa(xv) for xv in xvals])) + np.testing.assert_array_equal(loaded_data["a"]["x"], np.array(xvals)) + np.testing.assert_array_equal( + loaded_data["a"]["a"], np.array([fa(xv) for xv in xvals]) + ) - np.testing.assert_array_equal(loaded_data['b']['x'], - np.repeat(np.array(xvals), 3)) - np.testing.assert_array_equal(loaded_data['b']['y'], - np.tile(np.array(yvals), 3)) - np.testing.assert_array_equal(loaded_data['b']['b'], - np.array([fb(xv, yv) for xv in xvals for yv in yvals])) + np.testing.assert_array_equal(loaded_data["b"]["x"], np.repeat(np.array(xvals), 3)) + np.testing.assert_array_equal(loaded_data["b"]["y"], np.tile(np.array(yvals), 3)) + np.testing.assert_array_equal( + loaded_data["b"]["b"], np.array([fb(xv, yv) for xv in xvals for yv in yvals]) + ) def test_get_description(experiment, some_interdeps) -> None: - ds = DataSet() assert ds.run_id == 1 @@ -690,8 +729,7 @@ def test_get_description(experiment, some_interdeps) -> None: # the run description gets written as the dataset is marked as started, # so now no description should be stored in the database prematurely_loaded_ds = DataSet(run_id=1) - assert prematurely_loaded_ds.description == RunDescriber( - InterDependencies_()) + assert prematurely_loaded_ds.description == RunDescriber(InterDependencies_()) ds.mark_started() @@ -703,9 +741,8 @@ def test_get_description(experiment, some_interdeps) -> None: def test_metadata(experiment, request: FixtureRequest) -> None: - - metadata1 = {'number': 1, "string": "Once upon a time..."} - metadata2 = {'more': 'meta'} + metadata1 = {"number": 1, "string": "Once upon a time..."} + metadata2 = {"more": "meta"} ds1 = DataSet(metadata=metadata1) request.addfinalizer(ds1.conn.close) @@ -726,31 +763,31 @@ def test_metadata(experiment, request: FixtureRequest) -> None: bad_tag = "lex luthor" bad_tag_msg = ( - f"Tag {bad_tag} is not a valid tag. " - "Use only alphanumeric characters and underscores!" + f"Tag {bad_tag} is not a valid tag. " + "Use only alphanumeric characters and underscores!" ) - with pytest.raises(RuntimeError, - match="Rolling back due to unhandled exception") as e1: + with pytest.raises( + RuntimeError, match="Rolling back due to unhandled exception" + ) as e1: ds1.add_metadata(bad_tag, "value") assert error_caused_by(e1, bad_tag_msg) good_tag = "tag" none_value_msg = ( - f"Tag {good_tag} has value None. " - "That is not a valid metadata value!" + f"Tag {good_tag} has value None. That is not a valid metadata value!" ) - with pytest.raises(RuntimeError, - match="Rolling back due to unhandled exception") as e2: + with pytest.raises( + RuntimeError, match="Rolling back due to unhandled exception" + ) as e2: ds1.add_metadata(good_tag, None) assert error_caused_by(e2, none_value_msg) def test_the_same_dataset_as(some_interdeps, experiment) -> None: - ds = DataSet() ds.set_interdependencies(some_interdeps[1]) ds.mark_started() - ds.add_results([{'ps1': 1, 'ps2': 2}]) + ds.add_results([{"ps1": 1, "ps2": 2}]) same_ds_from_load = DataSet(run_id=ds.run_id) assert ds.the_same_dataset_as(same_ds_from_load) @@ -771,12 +808,14 @@ def test_parent_dataset_links_invalid_input() -> None: for link in links: assert link.head != ds.guid - match = re.escape('Invalid input. Did not receive a list of Links') + match = re.escape("Invalid input. Did not receive a list of Links") with pytest.raises(ValueError, match=match): ds.parent_dataset_links = [ds.guid] # type: ignore[list-item] - match = re.escape('Invalid input. All links must point to this dataset. ' - 'Got link(s) with head(s) pointing to another dataset.') + match = re.escape( + "Invalid input. All links must point to this dataset. " + "Got link(s) with head(s) pointing to another dataset." + ) with pytest.raises(ValueError, match=match): ds.parent_dataset_links = links @@ -801,12 +840,13 @@ def test_parent_dataset_links(some_interdeps) -> None: ds.mark_started() - match = re.escape('Can not set parent dataset links on a dataset ' - 'that has been started.') + match = re.escape( + "Can not set parent dataset links on a dataset that has been started." + ) with pytest.raises(RuntimeError, match=match): ds.parent_dataset_links = links - ds.add_results([{'ps1': 1, 'ps2': 2}]) + ds.add_results([{"ps1": 1, "ps2": 2}]) run_id = ds.run_id ds_loaded = DataSet(run_id=run_id) @@ -815,7 +855,7 @@ def test_parent_dataset_links(some_interdeps) -> None: class TestGetData: - x = ParamSpecBase("x", paramtype='numeric') + x = ParamSpecBase("x", paramtype="numeric") n_vals = 5 xvals: ClassVar[list[int]] = list(range(n_vals)) # this is the format of how data is returned by DataSet.get_data @@ -841,15 +881,13 @@ def ds_with_vals(self, dataset): [ # test without start and end (None, None, xdata), - # test for start only (0, None, xdata), - (2, None, xdata[(2-1):]), + (2, None, xdata[(2 - 1) :]), (-2, None, xdata), - (n_vals, None, xdata[(n_vals-1):]), + (n_vals, None, xdata[(n_vals - 1) :]), (n_vals + 1, None, []), (n_vals + 2, None, []), - # test for end only (None, 0, []), (None, 2, xdata[:2]), @@ -857,16 +895,15 @@ def ds_with_vals(self, dataset): (None, n_vals, xdata), (None, n_vals + 1, xdata), (None, n_vals + 2, xdata), - # test for start and end (0, 0, []), - (1, 1, [xdata[1-1]]), + (1, 1, [xdata[1 - 1]]), (2, 1, []), (2, 0, []), (1, 0, []), - (n_vals, n_vals, [xdata[n_vals-1]]), + (n_vals, n_vals, [xdata[n_vals - 1]]), (n_vals, n_vals - 1, []), - (2, 4, xdata[(2-1):4]), + (2, 4, xdata[(2 - 1) : 4]), ], ) def test_get_data_with_start_and_end_args( @@ -878,8 +915,10 @@ def test_get_data_with_start_and_end_args( @settings(deadline=600, suppress_health_check=(HealthCheck.function_scoped_fixture,)) -@given(start=hst.one_of(hst.integers(1, 10**3), hst.none()), - end=hst.one_of(hst.integers(1, 10**3), hst.none())) +@given( + start=hst.one_of(hst.integers(1, 10**3), hst.none()), + end=hst.one_of(hst.integers(1, 10**3), hst.none()), +) def test_get_parameter_data(scalar_dataset, start, end) -> None: input_names = ["param_3"] @@ -892,17 +931,19 @@ def test_get_parameter_data(scalar_dataset, start, end) -> None: np.arange(10000 * a, 10000 * a + 1000) for a in range(3) ] - start, end = limit_data_to_start_end(start, end, input_names, - expected_names, expected_shapes, - expected_values) + start, end = limit_data_to_start_end( + start, end, input_names, expected_names, expected_shapes, expected_values + ) - parameter_test_helper(scalar_dataset, - input_names, - expected_names, - expected_shapes, - expected_values, - start, - end) + parameter_test_helper( + scalar_dataset, + input_names, + expected_names, + expected_shapes, + expected_values, + start, + end, + ) def test_get_scalar_parameter_data_no_nulls(scalar_dataset_with_nulls) -> None: @@ -926,12 +967,11 @@ def test_get_scalar_parameter_data_no_nulls(scalar_dataset_with_nulls) -> None: def test_get_array_parameter_data_no_nulls(array_dataset_with_nulls) -> None: - types = [p.type for p in array_dataset_with_nulls.paramspecs.values()] expected_names = {} - expected_names['val1'] = ['val1', 'sp1', 'sp2'] - expected_names['val2'] = ['val2', 'sp1'] + expected_names["val1"] = ["val1", "sp1", "sp2"] + expected_names["val2"] = ["val2", "sp1"] expected_shapes = {} expected_values = {} @@ -949,11 +989,13 @@ def test_get_array_parameter_data_no_nulls(array_dataset_with_nulls) -> None: ] expected_values["val2"] = [np.zeros(shape), np.arange(0, 5).reshape(shape)] - parameter_test_helper(array_dataset_with_nulls, - list(expected_names.keys()), - expected_names, - expected_shapes, - expected_values) + parameter_test_helper( + array_dataset_with_nulls, + list(expected_names.keys()), + expected_names, + expected_shapes, + expected_values, + ) def test_get_array_parameter_data(array_dataset) -> None: @@ -978,21 +1020,22 @@ def test_get_array_parameter_data(array_dataset) -> None: expected_shapes[par_name] = [(1, expected_len), (1, expected_len)] for i in range(len(expected_values[par_name])): expected_values[par_name][i] = expected_values[par_name][i].reshape( - 1, expected_len) - parameter_test_helper(array_dataset, - input_names, - expected_names, - expected_shapes, - expected_values) + 1, expected_len + ) + parameter_test_helper( + array_dataset, input_names, expected_names, expected_shapes, expected_values + ) def test_get_multi_parameter_data(multi_dataset) -> None: paramspecs = multi_dataset.paramspecs types = [param.type for param in paramspecs.values()] - input_names = ['this', 'that'] - sp_names = ['multi_2d_setpoint_param_this_setpoint', - 'multi_2d_setpoint_param_that_setpoint'] + input_names = ["this", "that"] + sp_names = [ + "multi_2d_setpoint_param_this_setpoint", + "multi_2d_setpoint_param_that_setpoint", + ] expected_names = {} expected_names["this"] = ["this"] + sp_names @@ -1004,8 +1047,7 @@ def test_get_multi_parameter_data(multi_dataset) -> None: this_data = np.zeros((shape_1, shape_2)) that_data = np.ones((shape_1, shape_2)) - sp_1_data = np.tile(np.linspace(5, 9, shape_1).reshape(shape_1, 1), - (1, shape_2)) + sp_1_data = np.tile(np.linspace(5, 9, shape_1).reshape(shape_1, 1), (1, shape_2)) sp_2_data = np.tile(np.linspace(9, 11, shape_2), (shape_1, 1)) if "array" in types: expected_shapes["this"] = [(1, shape_1, shape_2), (1, shape_1, shape_2)] @@ -1022,19 +1064,21 @@ def test_get_multi_parameter_data(multi_dataset) -> None: ] else: - expected_shapes['this'] = [(15,), (15,)] - expected_shapes['that'] = [(15,), (15,)] - expected_values['this'] = [this_data.ravel(), - sp_1_data.ravel(), - sp_2_data.ravel()] - expected_values['that'] = [that_data.ravel(), - sp_1_data.ravel(), - sp_2_data.ravel()] - parameter_test_helper(multi_dataset, - input_names, - expected_names, - expected_shapes, - expected_values) + expected_shapes["this"] = [(15,), (15,)] + expected_shapes["that"] = [(15,), (15,)] + expected_values["this"] = [ + this_data.ravel(), + sp_1_data.ravel(), + sp_2_data.ravel(), + ] + expected_values["that"] = [ + that_data.ravel(), + sp_1_data.ravel(), + sp_2_data.ravel(), + ] + parameter_test_helper( + multi_dataset, input_names, expected_names, expected_shapes, expected_values + ) @settings(suppress_health_check=(HealthCheck.function_scoped_fixture,)) @@ -1067,18 +1111,21 @@ def test_get_array_in_scalar_param_data(array_in_scalar_dataset, start, end) -> expected_values[par_name] = [ test_parameter_values, scalar_param_values, - setpoint_param_values] + setpoint_param_values, + ] - start, end = limit_data_to_start_end(start, end, input_names, - expected_names, expected_shapes, - expected_values) - parameter_test_helper(array_in_scalar_dataset, - input_names, - expected_names, - expected_shapes, - expected_values, - start, - end) + start, end = limit_data_to_start_end( + start, end, input_names, expected_names, expected_shapes, expected_values + ) + parameter_test_helper( + array_in_scalar_dataset, + input_names, + expected_names, + expected_shapes, + expected_values, + start, + end, + ) def test_get_varlen_array_in_scalar_param_data(varlen_array_in_scalar_dataset) -> None: @@ -1111,13 +1158,16 @@ def test_get_varlen_array_in_scalar_param_data(varlen_array_in_scalar_dataset) - expected_values[par_name] = [ test_parameter_values.ravel(), scalar_param_values.ravel(), - setpoint_param_values.ravel()] + setpoint_param_values.ravel(), + ] - parameter_test_helper(varlen_array_in_scalar_dataset, - input_names, - expected_names, - expected_shapes, - expected_values) + parameter_test_helper( + varlen_array_in_scalar_dataset, + input_names, + expected_names, + expected_shapes, + expected_values, + ) @settings(suppress_health_check=(HealthCheck.function_scoped_fixture,)) @@ -1151,18 +1201,21 @@ def test_get_array_in_scalar_param_unrolled( expected_values[par_name] = [ test_parameter_values.ravel(), scalar_param_values.ravel(), - setpoint_param_values.ravel()] + setpoint_param_values.ravel(), + ] - start, end = limit_data_to_start_end(start, end, input_names, - expected_names, expected_shapes, - expected_values) - parameter_test_helper(array_in_scalar_dataset_unrolled, - input_names, - expected_names, - expected_shapes, - expected_values, - start, - end) + start, end = limit_data_to_start_end( + start, end, input_names, expected_names, expected_shapes, expected_values + ) + parameter_test_helper( + array_in_scalar_dataset_unrolled, + input_names, + expected_names, + expected_shapes, + expected_values, + start, + end, + ) def test_get_array_in_str_param_data(array_in_str_dataset) -> None: @@ -1197,18 +1250,22 @@ def test_get_array_in_str_param_data(array_in_str_dataset) -> None: expected_values[par_name] = [ test_parameter_values, scalar_param_values, - setpoint_param_values] + setpoint_param_values, + ] else: expected_shapes[par_name] = [(15,), (15,)] expected_values[par_name] = [ test_parameter_values.ravel(), scalar_param_values.ravel(), - setpoint_param_values.ravel()] - parameter_test_helper(array_in_str_dataset, - input_names, - expected_names, - expected_shapes, - expected_values) + setpoint_param_values.ravel(), + ] + parameter_test_helper( + array_in_str_dataset, + input_names, + expected_names, + expected_shapes, + expected_values, + ) def test_get_parameter_data_independent_parameters( @@ -1219,7 +1276,7 @@ def test_get_parameter_data_independent_parameters( paramspecs = ds.description.interdeps.non_dependencies params = [ps.name for ps in paramspecs] - expected_toplevel_params = ['param_1', 'param_2', 'param_3'] + expected_toplevel_params = ["param_1", "param_2", "param_3"] assert params == expected_toplevel_params expected_names = {} @@ -1233,16 +1290,13 @@ def test_get_parameter_data_independent_parameters( expected_shapes["param_3"] = [(10**3,)] * 2 expected_values = {} - expected_values['param_1'] = [np.arange(10000, 10000 + 1000)] - expected_values['param_2'] = [np.arange(20000, 20000 + 1000)] - expected_values['param_3'] = [np.arange(30000, 30000 + 1000), - np.arange(0, 1000)] + expected_values["param_1"] = [np.arange(10000, 10000 + 1000)] + expected_values["param_2"] = [np.arange(20000, 20000 + 1000)] + expected_values["param_3"] = [np.arange(30000, 30000 + 1000), np.arange(0, 1000)] - parameter_test_helper(ds, - expected_toplevel_params, - expected_names, - expected_shapes, - expected_values) + parameter_test_helper( + ds, expected_toplevel_params, expected_names, expected_shapes, expected_values + ) def parameter_test_helper( @@ -1272,9 +1326,7 @@ def parameter_test_helper( """ data = ds.get_parameter_data(*toplevel_names, start=start, end=end) - dataframe = ds.to_pandas_dataframe_dict(*toplevel_names, - start=start, - end=end) + dataframe = ds.to_pandas_dataframe_dict(*toplevel_names, start=start, end=end) all_data = ds.get_parameter_data(start=start, end=end) all_dataframe = ds.to_pandas_dataframe_dict(start=start, end=end) @@ -1285,10 +1337,22 @@ def parameter_test_helper( assert len(data.keys()) == len(toplevel_names) assert len(dataframe.keys()) == len(toplevel_names) - verify_data_dict(data, dataframe, toplevel_names, expected_names, - expected_shapes, expected_values) - verify_data_dict(all_data, all_dataframe, toplevel_names, expected_names, - expected_shapes, expected_values) + verify_data_dict( + data, + dataframe, + toplevel_names, + expected_names, + expected_shapes, + expected_values, + ) + verify_data_dict( + all_data, + all_dataframe, + toplevel_names, + expected_names, + expected_shapes, + expected_values, + ) # Now lets remove a random element from the list # We do this one by one until there is only one element in the list @@ -1300,17 +1364,23 @@ def parameter_test_helper( expected_shapes.pop(name_removed) expected_values.pop(name_removed) - subset_data = ds.get_parameter_data(*subset_names, - start=start, end=end) - subset_dataframe = ds.to_pandas_dataframe_dict(*subset_names, - start=start, - end=end) - verify_data_dict(subset_data, subset_dataframe, subset_names, - expected_names, expected_shapes, expected_values) - - -def limit_data_to_start_end(start, end, input_names, expected_names, - expected_shapes, expected_values): + subset_data = ds.get_parameter_data(*subset_names, start=start, end=end) + subset_dataframe = ds.to_pandas_dataframe_dict( + *subset_names, start=start, end=end + ) + verify_data_dict( + subset_data, + subset_dataframe, + subset_names, + expected_names, + expected_shapes, + expected_values, + ) + + +def limit_data_to_start_end( + start, end, input_names, expected_names, expected_shapes, expected_values +): if not (start is None and end is None): if start is None: start = 1 @@ -1330,14 +1400,12 @@ def limit_data_to_start_end(start, end, input_names, expected_names, new_shapes.append(tuple(shape_list)) expected_shapes[name] = new_shapes for i in range(len(expected_values[name])): - expected_values[name][i] = \ - expected_values[name][i][start - 1:end] + expected_values[name][i] = expected_values[name][i][start - 1 : end] return start, end @pytest.mark.usefixtures("experiment") def test_empty_ds_parameters() -> None: - ds = new_data_set("mydataset") assert ds.parameters is None ds.mark_started() diff --git a/tests/dataset/test_dependencies.py b/tests/dataset/test_dependencies.py index ac1f775af5f..eeba17ca5c2 100644 --- a/tests/dataset/test_dependencies.py +++ b/tests/dataset/test_dependencies.py @@ -12,7 +12,7 @@ from qcodes.dataset.descriptions.param_spec import ParamSpec, ParamSpecBase from qcodes.dataset.descriptions.versioning.converters import new_to_old, old_to_new from qcodes.dataset.descriptions.versioning.v0 import InterDependencies -from qcodes.tests.common import error_caused_by +from tests.common import error_caused_by def test_wrong_input_raises() -> None: @@ -49,30 +49,30 @@ def test_init(some_paramspecbases) -> None: assert idps1.non_dependencies == (ps1,) assert idps2.non_dependencies == (ps1,) - idps = InterDependencies_(dependencies={ps1: (ps3, ps2), - ps4: (ps3,)}) + idps = InterDependencies_(dependencies={ps1: (ps3, ps2), ps4: (ps3,)}) assert set(idps.what_depends_on(ps3)) == {ps1, ps4} assert idps.non_dependencies == (ps1, ps4) def test_init_validation_raises(some_paramspecbases) -> None: - (ps1, ps2, ps3, ps4) = some_paramspecbases # First test validation of trees invalid in their own right - invalid_trees = ([ps1, ps2], - {'ps1': 'ps2'}, - {ps1: 'ps2'}, - {ps1: ('ps2',)}, - {ps1: (ps2,), ps2: (ps1,)} - ) - causes = ("ParamSpecTree must be a dict", - "ParamSpecTree must have ParamSpecs as keys", - "ParamSpecTree must have tuple values", - "ParamSpecTree can only have tuples " - "of ParamSpecs as values", - "ParamSpecTree can not have cycles") + invalid_trees = ( + [ps1, ps2], + {"ps1": "ps2"}, + {ps1: "ps2"}, + {ps1: ("ps2",)}, + {ps1: (ps2,), ps2: (ps1,)}, + ) + causes = ( + "ParamSpecTree must be a dict", + "ParamSpecTree must have ParamSpecs as keys", + "ParamSpecTree must have tuple values", + "ParamSpecTree can only have tuples of ParamSpecs as values", + "ParamSpecTree can not have cycles", + ) for tree, cause in zip(invalid_trees, causes): with pytest.raises(ValueError, match="Invalid dependencies") as ei: @@ -89,8 +89,7 @@ def test_init_validation_raises(some_paramspecbases) -> None: with pytest.raises(ValueError, match="Invalid standalones") as ei: InterDependencies_(standalones=("ps1", "ps2")) # type: ignore[arg-type] - assert error_caused_by(ei, cause='Standalones must be a sequence of ' - 'ParamSpecs') + assert error_caused_by(ei, cause="Standalones must be a sequence of ParamSpecs") # Now test trees that are invalid together @@ -106,7 +105,6 @@ def test_init_validation_raises(some_paramspecbases) -> None: def test_to_dict(some_paramspecbases) -> None: - def tester(idps) -> None: ser = idps._to_dict() json.dumps(ser) @@ -115,20 +113,19 @@ def tester(idps) -> None: (ps1, ps2, ps3, ps4) = some_paramspecbases - idps = InterDependencies_(standalones=(ps1, ps2), - dependencies={ps3: (ps4,)}) + idps = InterDependencies_(standalones=(ps1, ps2), dependencies={ps3: (ps4,)}) tester(idps) idps = InterDependencies_(standalones=(ps1, ps2, ps3, ps4)) tester(idps) - idps = InterDependencies_(dependencies={ps1: (ps2, ps3)}, - inferences={ps2: (ps4,), ps3: (ps4,)}) + idps = InterDependencies_( + dependencies={ps1: (ps2, ps3)}, inferences={ps2: (ps4,), ps3: (ps4,)} + ) tester(idps) def test_old_to_new_and_back(some_paramspecs) -> None: - idps_old = InterDependencies(*some_paramspecs[1].values()) idps_new = old_to_new(idps_old) @@ -136,13 +133,12 @@ def test_old_to_new_and_back(some_paramspecs) -> None: def test_old_to_new(some_paramspecs) -> None: - - ps1 = some_paramspecs[1]['ps1'] - ps2 = some_paramspecs[1]['ps2'] - ps3 = some_paramspecs[1]['ps3'] - ps4 = some_paramspecs[1]['ps4'] - ps5 = some_paramspecs[1]['ps5'] - ps6 = some_paramspecs[1]['ps6'] + ps1 = some_paramspecs[1]["ps1"] + ps2 = some_paramspecs[1]["ps2"] + ps3 = some_paramspecs[1]["ps3"] + ps4 = some_paramspecs[1]["ps4"] + ps5 = some_paramspecs[1]["ps5"] + ps6 = some_paramspecs[1]["ps6"] idps_old = InterDependencies(ps1, ps2, ps3) @@ -155,7 +151,6 @@ def test_old_to_new(some_paramspecs) -> None: ps5_base = ps5.base_version() ps6_base = ps6.base_version() - assert idps_new.dependencies == {} assert idps_new.inferences == {ps3_base: (ps1_base,)} assert idps_new.standalones == {ps2_base} @@ -166,10 +161,11 @@ def test_old_to_new(some_paramspecs) -> None: idps_new = old_to_new(idps_old) - assert idps_new.dependencies == {ps5_base: (ps3_base, ps4_base), - ps6_base: (ps3_base, ps4_base)} - assert idps_new.inferences == {ps3_base: (ps1_base,), - ps4_base: (ps2_base,)} + assert idps_new.dependencies == { + ps5_base: (ps3_base, ps4_base), + ps6_base: (ps3_base, ps4_base), + } + assert idps_new.inferences == {ps3_base: (ps1_base,), ps4_base: (ps2_base,)} assert idps_new.standalones == set() paramspecs2 = (ps1_base, ps2_base, ps3_base, ps4_base, ps5_base, ps6_base) assert idps_new._id_to_paramspec == {ps.name: ps for ps in paramspecs2} @@ -186,55 +182,66 @@ def test_old_to_new(some_paramspecs) -> None: def test_new_to_old(some_paramspecbases) -> None: - (ps1, ps2, ps3, ps4) = some_paramspecbases - idps_new = InterDependencies_(dependencies={ps1: (ps2, ps3)}, - standalones=(ps4,)) - - paramspec1 = ParamSpec(name=ps1.name, paramtype=ps1.type, - label=ps1.label, unit=ps1.unit, - depends_on=[ps2.name, ps3.name]) - paramspec2 = ParamSpec(name=ps2.name, paramtype=ps2.type, - label=ps2.label, unit=ps2.unit) - paramspec3 = ParamSpec(name=ps3.name, paramtype=ps3.type, - label=ps3.label, unit=ps3.unit) - paramspec4 = ParamSpec(name=ps4.name, paramtype=ps4.type, - label=ps4.label, unit=ps4.unit) - idps_old_expected = InterDependencies(paramspec2, paramspec3, - paramspec1, paramspec4) + idps_new = InterDependencies_(dependencies={ps1: (ps2, ps3)}, standalones=(ps4,)) + + paramspec1 = ParamSpec( + name=ps1.name, + paramtype=ps1.type, + label=ps1.label, + unit=ps1.unit, + depends_on=[ps2.name, ps3.name], + ) + paramspec2 = ParamSpec( + name=ps2.name, paramtype=ps2.type, label=ps2.label, unit=ps2.unit + ) + paramspec3 = ParamSpec( + name=ps3.name, paramtype=ps3.type, label=ps3.label, unit=ps3.unit + ) + paramspec4 = ParamSpec( + name=ps4.name, paramtype=ps4.type, label=ps4.label, unit=ps4.unit + ) + idps_old_expected = InterDependencies( + paramspec2, paramspec3, paramspec1, paramspec4 + ) assert new_to_old(idps_new) == idps_old_expected # - idps_new = InterDependencies_(inferences={ps1: (ps2, ps3)}, - standalones=(ps4,)) - - paramspec1 = ParamSpec(name=ps1.name, paramtype=ps1.type, - label=ps1.label, unit=ps1.unit, - inferred_from=[ps2.name, ps3.name]) - paramspec2 = ParamSpec(name=ps2.name, paramtype=ps2.type, - label=ps2.label, unit=ps2.unit) - paramspec3 = ParamSpec(name=ps3.name, paramtype=ps3.type, - label=ps3.label, unit=ps3.unit) - paramspec4 = ParamSpec(name=ps4.name, paramtype=ps4.type, - label=ps4.label, unit=ps4.unit) - idps_old_expected = InterDependencies(paramspec2, paramspec3, - paramspec1, paramspec4) + idps_new = InterDependencies_(inferences={ps1: (ps2, ps3)}, standalones=(ps4,)) + + paramspec1 = ParamSpec( + name=ps1.name, + paramtype=ps1.type, + label=ps1.label, + unit=ps1.unit, + inferred_from=[ps2.name, ps3.name], + ) + paramspec2 = ParamSpec( + name=ps2.name, paramtype=ps2.type, label=ps2.label, unit=ps2.unit + ) + paramspec3 = ParamSpec( + name=ps3.name, paramtype=ps3.type, label=ps3.label, unit=ps3.unit + ) + paramspec4 = ParamSpec( + name=ps4.name, paramtype=ps4.type, label=ps4.label, unit=ps4.unit + ) + idps_old_expected = InterDependencies( + paramspec2, paramspec3, paramspec1, paramspec4 + ) assert new_to_old(idps_new) == idps_old_expected - def test_extend_with_paramspec(some_paramspecs) -> None: - ps1 = some_paramspecs[1]['ps1'] - ps2 = some_paramspecs[1]['ps2'] - ps3 = some_paramspecs[1]['ps3'] - ps4 = some_paramspecs[1]['ps4'] - ps5 = some_paramspecs[1]['ps5'] - ps6 = some_paramspecs[1]['ps6'] - + ps1 = some_paramspecs[1]["ps1"] + ps2 = some_paramspecs[1]["ps2"] + ps3 = some_paramspecs[1]["ps3"] + ps4 = some_paramspecs[1]["ps4"] + ps5 = some_paramspecs[1]["ps5"] + ps6 = some_paramspecs[1]["ps6"] ps1_base = ps1.base_version() ps2_base = ps2.base_version() @@ -248,32 +255,34 @@ def test_extend_with_paramspec(some_paramspecs) -> None: assert idps_bare._extend_with_paramspec(ps3) == idps_extended - idps_bare = InterDependencies_(standalones=(ps2_base,), - inferences={ps3_base: (ps1_base,)}) - idps_extended = InterDependencies_(inferences={ps3_base: (ps1_base,), - ps4_base: (ps2_base,)}) + idps_bare = InterDependencies_( + standalones=(ps2_base,), inferences={ps3_base: (ps1_base,)} + ) + idps_extended = InterDependencies_( + inferences={ps3_base: (ps1_base,), ps4_base: (ps2_base,)} + ) assert idps_bare._extend_with_paramspec(ps4) == idps_extended idps_bare = InterDependencies_(standalones=(ps1_base, ps2_base)) idps_extended = InterDependencies_( - inferences={ps3_base: (ps1_base,), - ps4_base: (ps2_base,)}, - dependencies={ps5_base: (ps3_base, ps4_base), - ps6_base: (ps3_base, ps4_base)}) - assert (idps_bare. - _extend_with_paramspec(ps3). - _extend_with_paramspec(ps4). - _extend_with_paramspec(ps5). - _extend_with_paramspec(ps6)) == idps_extended + inferences={ps3_base: (ps1_base,), ps4_base: (ps2_base,)}, + dependencies={ps5_base: (ps3_base, ps4_base), ps6_base: (ps3_base, ps4_base)}, + ) + assert ( + idps_bare._extend_with_paramspec(ps3) + ._extend_with_paramspec(ps4) + ._extend_with_paramspec(ps5) + ._extend_with_paramspec(ps6) + ) == idps_extended def test_validate_subset(some_paramspecbases) -> None: - ps1, ps2, ps3, ps4 = some_paramspecbases - idps = InterDependencies_(dependencies={ps1: (ps2, ps3)}, - inferences={ps2: (ps4,), ps3: (ps4,)}) + idps = InterDependencies_( + dependencies={ps1: (ps2, ps3)}, inferences={ps2: (ps4,), ps3: (ps4,)} + ) idps.validate_subset((ps4,)) idps.validate_subset((ps2, ps4)) @@ -297,33 +306,32 @@ def test_validate_subset(some_paramspecbases) -> None: assert exc_info3.value._missing_params == {"psb4"} with pytest.raises(InferenceError) as exc_info4: - idps2 = InterDependencies_(dependencies={ps1: (ps2, ps3)}, - inferences={ps3: (ps4,)}) + idps2 = InterDependencies_( + dependencies={ps1: (ps2, ps3)}, inferences={ps3: (ps4,)} + ) idps2.validate_subset((ps1, ps2, ps3)) assert exc_info4.value._param_name == "psb3" assert exc_info4.value._missing_params == {"psb4"} - with pytest.raises(ValueError, match='ps42'): - ps42 = ParamSpecBase('ps42', paramtype='text', label='', unit='it') + with pytest.raises(ValueError, match="ps42"): + ps42 = ParamSpecBase("ps42", paramtype="text", label="", unit="it") idps.validate_subset((ps2, ps42, ps4)) def test_extend(some_paramspecbases) -> None: - ps1, ps2, ps3, _ = some_paramspecbases idps = InterDependencies_(standalones=(ps1, ps2)) idps_ext = idps.extend(dependencies={ps1: (ps3,)}) - idps_expected = InterDependencies_(standalones=(ps2,), - dependencies={ps1: (ps3,)}) + idps_expected = InterDependencies_(standalones=(ps2,), dependencies={ps1: (ps3,)}) assert idps_ext == idps_expected # lazily check that we get brand new objects idps._id_to_paramspec[ps1.name].label = "Something new and awful" idps._id_to_paramspec[ps2.name].unit = "Ghastly unit" - assert idps_ext._id_to_paramspec[ps1.name].label == 'blah' - assert idps_ext._id_to_paramspec[ps2.name].unit == 'V' + assert idps_ext._id_to_paramspec[ps1.name].label == "blah" + assert idps_ext._id_to_paramspec[ps2.name].unit == "V" # reset the objects that are never supposed to be mutated idps._id_to_paramspec[ps1.name].label = "blah" idps._id_to_paramspec[ps2.name].unit = "V" @@ -344,7 +352,7 @@ def test_extend(some_paramspecbases) -> None: assert idps_ext == idps_expected ps_nu = deepcopy(ps1) - ps_nu.unit += '/s' + ps_nu.unit += "/s" idps = InterDependencies_(standalones=(ps1,)) idps_ext = idps.extend(standalones=(ps_nu,)) idps_expected = InterDependencies_(standalones=(ps_nu, ps1)) @@ -359,49 +367,42 @@ def test_extend(some_paramspecbases) -> None: def test_remove(some_paramspecbases) -> None: ps1, ps2, ps3, ps4 = some_paramspecbases - idps = InterDependencies_(dependencies={ps1: (ps2, ps3)}, - inferences={ps2: (ps4, )}) + idps = InterDependencies_(dependencies={ps1: (ps2, ps3)}, inferences={ps2: (ps4,)}) idps_rem = idps.remove(ps1) - idps_expected = InterDependencies_(inferences={ps2: (ps4,)}, - standalones=(ps3,)) + idps_expected = InterDependencies_(inferences={ps2: (ps4,)}, standalones=(ps3,)) assert idps_rem == idps_expected for p in [ps4, ps2, ps3]: - match = re.escape(f'Cannot remove {p.name}, other parameters') + match = re.escape(f"Cannot remove {p.name}, other parameters") with pytest.raises(ValueError, match=match): idps_rem = idps.remove(p) - idps = InterDependencies_(dependencies={ps1: (ps3,)}, - inferences={ps2: (ps4,)}) + idps = InterDependencies_(dependencies={ps1: (ps3,)}, inferences={ps2: (ps4,)}) idps_rem = idps.remove(ps2) - idps_expected = InterDependencies_(dependencies={ps1: (ps3,)}, - standalones=(ps4,)) + idps_expected = InterDependencies_(dependencies={ps1: (ps3,)}, standalones=(ps4,)) assert idps_rem == idps_expected - idps = InterDependencies_(dependencies={ps1: (ps2, ps3)}, - standalones=(ps4, )) + idps = InterDependencies_(dependencies={ps1: (ps2, ps3)}, standalones=(ps4,)) idps_rem = idps.remove(ps4) idps_expected = InterDependencies_(dependencies={ps1: (ps2, ps3)}) assert idps_rem == idps_expected - idps = InterDependencies_(dependencies={ps1: (ps2, ps3)}, - standalones=(ps4, )) + idps = InterDependencies_(dependencies={ps1: (ps2, ps3)}, standalones=(ps4,)) idps_rem = idps.remove(ps1) idps_expected = InterDependencies_(standalones=(ps2, ps3, ps4)) assert idps_rem == idps_expected def test_equality_old(some_paramspecs) -> None: - # TODO: make this more fancy with itertools - ps1 = some_paramspecs[1]['ps1'] - ps2 = some_paramspecs[1]['ps2'] - ps3 = some_paramspecs[1]['ps3'] - ps4 = some_paramspecs[1]['ps4'] - ps5 = some_paramspecs[1]['ps5'] - ps6 = some_paramspecs[1]['ps6'] + ps1 = some_paramspecs[1]["ps1"] + ps2 = some_paramspecs[1]["ps2"] + ps3 = some_paramspecs[1]["ps3"] + ps4 = some_paramspecs[1]["ps4"] + ps5 = some_paramspecs[1]["ps5"] + ps6 = some_paramspecs[1]["ps6"] assert InterDependencies(ps1, ps2, ps3) == InterDependencies(ps3, ps2, ps1) assert InterDependencies(ps1, ps6, ps3) == InterDependencies(ps3, ps6, ps1) @@ -409,21 +410,17 @@ def test_equality_old(some_paramspecs) -> None: def test_non_dependents() -> None: - ps1 = ParamSpecBase('ps1', paramtype='numeric', label='Raw Data 1', - unit='V') - ps2 = ParamSpecBase('ps2', paramtype='array', label='Raw Data 2', - unit='V') - ps3 = ParamSpecBase('ps3', paramtype='text', label='Axis 1', - unit='') - ps4 = ParamSpecBase('ps4', paramtype='numeric', label='Axis 2', - unit='V') - ps5 = ParamSpecBase('ps5', paramtype='numeric', label='Signal', - unit='Conductance') - ps6 = ParamSpecBase('ps6', paramtype='text', label='Goodness', - unit='') - - idps1 = InterDependencies_(dependencies={ps5: (ps3, ps4), ps6: (ps3, ps4)}, - inferences={ps4: (ps2,), ps3: (ps1,)}) + ps1 = ParamSpecBase("ps1", paramtype="numeric", label="Raw Data 1", unit="V") + ps2 = ParamSpecBase("ps2", paramtype="array", label="Raw Data 2", unit="V") + ps3 = ParamSpecBase("ps3", paramtype="text", label="Axis 1", unit="") + ps4 = ParamSpecBase("ps4", paramtype="numeric", label="Axis 2", unit="V") + ps5 = ParamSpecBase("ps5", paramtype="numeric", label="Signal", unit="Conductance") + ps6 = ParamSpecBase("ps6", paramtype="text", label="Goodness", unit="") + + idps1 = InterDependencies_( + dependencies={ps5: (ps3, ps4), ps6: (ps3, ps4)}, + inferences={ps4: (ps2,), ps3: (ps1,)}, + ) assert idps1.non_dependencies == (ps5, ps6) @@ -431,7 +428,6 @@ def test_non_dependents() -> None: assert idps2.non_dependencies == (ps2,) - idps3 = InterDependencies_(dependencies={ps6: (ps1,)}, - standalones=(ps2,)) + idps3 = InterDependencies_(dependencies={ps6: (ps1,)}, standalones=(ps2,)) assert idps3.non_dependencies == (ps2, ps6) diff --git a/tests/dataset/test_fix_functions.py b/tests/dataset/test_fix_functions.py index 7f18f5f9fef..3d3ec8803a7 100644 --- a/tests/dataset/test_fix_functions.py +++ b/tests/dataset/test_fix_functions.py @@ -5,7 +5,7 @@ import qcodes.dataset.descriptions.versioning.serialization as serial import qcodes.dataset.descriptions.versioning.v0 as v0 -import qcodes.tests.dataset +import tests.dataset from qcodes.dataset.database_fix_functions import ( fix_version_4a_run_description_bug, fix_wrong_run_descriptions, @@ -15,45 +15,43 @@ from qcodes.dataset.descriptions.versioning.converters import old_to_new from qcodes.dataset.sqlite.db_upgrades.version import get_user_version from qcodes.dataset.sqlite.queries import get_run_description -from qcodes.tests.common import skip_if_no_fixtures -from qcodes.tests.dataset.conftest import temporarily_copied_DB +from tests.common import skip_if_no_fixtures +from tests.dataset.conftest import temporarily_copied_DB -fixturepath = os.sep.join(qcodes.tests.dataset.__file__.split(os.sep)[:-1]) -fixturepath = os.path.join(fixturepath, 'fixtures') +fixturepath = os.sep.join(tests.dataset.__file__.split(os.sep)[:-1]) +fixturepath = os.path.join(fixturepath, "fixtures") def test_version_4a_bugfix() -> None: - v4fixpath = os.path.join(fixturepath, 'db_files', 'version4a') + v4fixpath = os.path.join(fixturepath, "db_files", "version4a") - dbname_old = os.path.join(v4fixpath, 'some_runs.db') + dbname_old = os.path.join(v4fixpath, "some_runs.db") skip_if_no_fixtures(dbname_old) with temporarily_copied_DB(dbname_old, debug=False, version=4) as conn: - dd = fix_version_4a_run_description_bug(conn) - assert dd['runs_inspected'] == 10 - assert dd['runs_fixed'] == 10 + assert dd["runs_inspected"] == 10 + assert dd["runs_fixed"] == 10 # Ensure the structure of the run_description JSON after applying # the fix function - for run_id in range(1, 10+1): + for run_id in range(1, 10 + 1): rd_str = get_run_description(conn, run_id) rd_dict = json.loads(rd_str) - assert list(rd_dict.keys()) == ['interdependencies'] - assert list(rd_dict['interdependencies'].keys()) == ['paramspecs'] + assert list(rd_dict.keys()) == ["interdependencies"] + assert list(rd_dict["interdependencies"].keys()) == ["paramspecs"] dd = fix_version_4a_run_description_bug(conn) - assert dd['runs_inspected'] == 10 - assert dd['runs_fixed'] == 0 + assert dd["runs_inspected"] == 10 + assert dd["runs_fixed"] == 0 def test_version_4a_bugfix_raises() -> None: - - v3fixpath = os.path.join(fixturepath, 'db_files', 'version3') - dbname_old = os.path.join(v3fixpath, 'some_runs_without_run_description.db') + v3fixpath = os.path.join(fixturepath, "db_files", "version3") + dbname_old = os.path.join(v3fixpath, "some_runs_without_run_description.db") skip_if_no_fixtures(dbname_old) @@ -63,29 +61,30 @@ def test_version_4a_bugfix_raises() -> None: def test_fix_wrong_run_descriptions() -> None: - v3fixpath = os.path.join(fixturepath, 'db_files', 'version3') + v3fixpath = os.path.join(fixturepath, "db_files", "version3") - dbname_old = os.path.join(v3fixpath, 'some_runs_without_run_description.db') + dbname_old = os.path.join(v3fixpath, "some_runs_without_run_description.db") skip_if_no_fixtures(dbname_old) def make_ps(n): - ps = ParamSpec(f'p{n}', label=f'Parameter {n}', - unit=f'unit {n}', paramtype='numeric') + ps = ParamSpec( + f"p{n}", label=f"Parameter {n}", unit=f"unit {n}", paramtype="numeric" + ) return ps paramspecs = [make_ps(n) for n in range(6)] - paramspecs[2]._inferred_from = ['p0'] - paramspecs[3]._inferred_from = ['p1', 'p0'] - paramspecs[4]._depends_on = ['p2', 'p3'] - paramspecs[5]._inferred_from = ['p0'] + paramspecs[2]._inferred_from = ["p0"] + paramspecs[3]._inferred_from = ["p1", "p0"] + paramspecs[4]._depends_on = ["p2", "p3"] + paramspecs[5]._inferred_from = ["p0"] with temporarily_copied_DB(dbname_old, debug=False, version=3) as conn: - assert get_user_version(conn) == 3 expected_description = RunDescriber( - old_to_new(v0.InterDependencies(*paramspecs))) + old_to_new(v0.InterDependencies(*paramspecs)) + ) empty_description = RunDescriber(old_to_new(v0.InterDependencies())) @@ -102,10 +101,9 @@ def make_ps(n): def test_fix_wrong_run_descriptions_raises() -> None: + v4fixpath = os.path.join(fixturepath, "db_files", "version4a") - v4fixpath = os.path.join(fixturepath, 'db_files', 'version4a') - - dbname_old = os.path.join(v4fixpath, 'some_runs.db') + dbname_old = os.path.join(v4fixpath, "some_runs.db") skip_if_no_fixtures(dbname_old) diff --git a/tests/dataset/test_nested_measurements.py b/tests/dataset/test_nested_measurements.py index 083ae0f8d7e..a9352d632db 100644 --- a/tests/dataset/test_nested_measurements.py +++ b/tests/dataset/test_nested_measurements.py @@ -10,7 +10,7 @@ from qcodes.dataset.descriptions.dependencies import InterDependencies_ from qcodes.dataset.descriptions.param_spec import ParamSpecBase from qcodes.dataset.sqlite.connection import atomic_transaction -from qcodes.tests.common import retry_until_does_not_throw +from tests.common import retry_until_does_not_throw VALUE = Union[str, float, list, np.ndarray, bool] @@ -26,15 +26,14 @@ def test_nested_measurement_basic(DAC, DMM, bg_writing) -> None: meas2.register_parameter(DAC.ch2) meas2.register_parameter(DMM.v2, setpoints=(DAC.ch2,)) - with meas1.run(write_in_background=bg_writing) as ds1, \ - meas2.run(write_in_background=bg_writing) as ds2: + with meas1.run(write_in_background=bg_writing) as ds1, meas2.run( + write_in_background=bg_writing + ) as ds2: for i in range(10): DAC.ch1.set(i) DAC.ch2.set(i) - ds1.add_result((DAC.ch1, i), - (DMM.v1, DMM.v1())) - ds2.add_result((DAC.ch2, i), - (DMM.v2, DMM.v2())) + ds1.add_result((DAC.ch1, i), (DMM.v1, DMM.v1())) + ds2.add_result((DAC.ch2, i), (DMM.v2, DMM.v2())) data1 = ds1.dataset.get_parameter_data()["dummy_dmm_v1"] assert len(data1.keys()) == 2 @@ -55,21 +54,19 @@ def test_nested_measurement_basic(DAC, DMM, bg_writing) -> None: @pytest.mark.parametrize("bg_writing", [True, False]) def test_nested_measurement(bg_writing) -> None: meas1 = Measurement() - meas1.register_custom_parameter('foo1') - meas1.register_custom_parameter('bar1', setpoints=('foo1',)) + meas1.register_custom_parameter("foo1") + meas1.register_custom_parameter("bar1", setpoints=("foo1",)) meas2 = Measurement() - meas2.register_custom_parameter('foo2') - meas2.register_custom_parameter('bar2', setpoints=('foo2',)) + meas2.register_custom_parameter("foo2") + meas2.register_custom_parameter("bar2", setpoints=("foo2",)) - - with meas1.run(write_in_background=bg_writing) as ds1, \ - meas2.run(write_in_background=bg_writing) as ds2: + with meas1.run(write_in_background=bg_writing) as ds1, meas2.run( + write_in_background=bg_writing + ) as ds2: for i in range(10): - ds1.add_result(("foo1", i), - ("bar1", i**2)) - ds2.add_result(("foo2", 2*i), - ("bar2", (2*i)**2)) + ds1.add_result(("foo1", i), ("bar1", i**2)) + ds2.add_result(("foo2", 2 * i), ("bar2", (2 * i) ** 2)) data1 = ds1.dataset.get_parameter_data()["bar1"] assert len(data1.keys()) == 2 @@ -77,14 +74,14 @@ def test_nested_measurement(bg_writing) -> None: assert "bar1" in data1.keys() assert_allclose(data1["foo1"], np.arange(10)) - assert_allclose(data1["bar1"], np.arange(10)**2) + assert_allclose(data1["bar1"], np.arange(10) ** 2) data2 = ds2.dataset.get_parameter_data()["bar2"] assert len(data2.keys()) == 2 assert "foo2" in data2.keys() assert "bar2" in data2.keys() assert_allclose(data2["foo2"], np.arange(0, 20, 2)) - assert_allclose(data2["bar2"], np.arange(0, 20, 2)**2) + assert_allclose(data2["bar2"], np.arange(0, 20, 2) ** 2) @pytest.mark.usefixtures("experiment") @@ -99,30 +96,40 @@ def test_nested_measurement_array( bg_writing, outer_len, inner_len1, inner_len2 ) -> None: meas1 = Measurement() - meas1.register_custom_parameter('foo1', paramtype='numeric') - meas1.register_custom_parameter('bar1spt', paramtype='array') + meas1.register_custom_parameter("foo1", paramtype="numeric") + meas1.register_custom_parameter("bar1spt", paramtype="array") meas1.register_custom_parameter( - 'bar1', setpoints=('foo1', "bar1spt"), paramtype='array' + "bar1", setpoints=("foo1", "bar1spt"), paramtype="array" ) meas2 = Measurement() - meas2.register_custom_parameter('foo2', paramtype='numeric') - meas2.register_custom_parameter('bar2spt', paramtype='array') + meas2.register_custom_parameter("foo2", paramtype="numeric") + meas2.register_custom_parameter("bar2spt", paramtype="array") meas2.register_custom_parameter( - 'bar2', setpoints=('foo2', 'bar2spt',), paramtype='array' + "bar2", + setpoints=( + "foo2", + "bar2spt", + ), + paramtype="array", ) - with meas1.run(write_in_background=bg_writing) as ds1, \ - meas2.run(write_in_background=bg_writing) as ds2: + with meas1.run(write_in_background=bg_writing) as ds1, meas2.run( + write_in_background=bg_writing + ) as ds2: for i in range(outer_len): bar1sptdata = np.arange(inner_len1) bar2sptdata = np.arange(inner_len2) - ds1.add_result(("foo1", i), - ("bar1spt", bar1sptdata), - ("bar1", np.ones(inner_len1)*i*bar1sptdata)) - ds2.add_result(("foo2", i), - ("bar2spt", bar2sptdata), - ("bar2", np.ones(inner_len2)*i*bar2sptdata)) + ds1.add_result( + ("foo1", i), + ("bar1spt", bar1sptdata), + ("bar1", np.ones(inner_len1) * i * bar1sptdata), + ) + ds2.add_result( + ("foo2", i), + ("bar2spt", bar2sptdata), + ("bar2", np.ones(inner_len2) * i * bar2sptdata), + ) data1 = ds1.dataset.get_parameter_data()["bar1"] assert len(data1.keys()) == 3 @@ -130,15 +137,14 @@ def test_nested_measurement_array( assert "bar1spt" in data1.keys() assert "bar1" in data1.keys() - expected_foo1_data = np.repeat(np.arange(outer_len), - inner_len1).reshape(outer_len, - inner_len1) - expected_bar1spt_data = np.tile(np.arange(inner_len1), - (outer_len, 1)) + expected_foo1_data = np.repeat(np.arange(outer_len), inner_len1).reshape( + outer_len, inner_len1 + ) + expected_bar1spt_data = np.tile(np.arange(inner_len1), (outer_len, 1)) assert_allclose(data1["foo1"], expected_foo1_data) assert_allclose(data1["bar1spt"], expected_bar1spt_data) - assert_allclose(data1["bar1"], expected_foo1_data*expected_bar1spt_data) + assert_allclose(data1["bar1"], expected_foo1_data * expected_bar1spt_data) data2 = ds2.dataset.get_parameter_data()["bar2"] assert len(data2.keys()) == 3 @@ -146,16 +152,17 @@ def test_nested_measurement_array( assert "bar2spt" in data2.keys() assert "bar2" in data2.keys() - expected_foo2_data = np.repeat(np.arange(outer_len), - inner_len2).reshape(outer_len, inner_len2) + expected_foo2_data = np.repeat(np.arange(outer_len), inner_len2).reshape( + outer_len, inner_len2 + ) expected_bar2spt_data = np.tile(np.arange(inner_len2), (outer_len, 1)) assert_allclose(data2["foo2"], expected_foo2_data) assert_allclose(data2["bar2spt"], expected_bar2spt_data) - assert_allclose(data2["bar2"], expected_foo2_data*expected_bar2spt_data) + assert_allclose(data2["bar2"], expected_foo2_data * expected_bar2spt_data) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def basic_subscriber(): """ A basic subscriber that just puts results and length into @@ -171,26 +178,20 @@ def subscriber(results: list[tuple[VALUE]], length: int, state: dict) -> None: @pytest.mark.flaky(reruns=5) @pytest.mark.serial def test_subscription_on_dual_datasets(experiment, basic_subscriber) -> None: - xparam = ParamSpecBase(name='x', - paramtype='numeric', - label='x parameter', - unit='V') - yparam1 = ParamSpecBase(name='y1', - paramtype='numeric', - label='y parameter', - unit='Hz') - yparam2 = ParamSpecBase(name='y2', - paramtype='numeric', - label='y parameter', - unit='Hz') + xparam = ParamSpecBase(name="x", paramtype="numeric", label="x parameter", unit="V") + yparam1 = ParamSpecBase( + name="y1", paramtype="numeric", label="y parameter", unit="Hz" + ) + yparam2 = ParamSpecBase( + name="y2", paramtype="numeric", label="y parameter", unit="Hz" + ) dataset1 = new_data_set("test-dataset-1") idps_1 = InterDependencies_(dependencies={yparam1: (xparam,)}) dataset1.set_interdependencies(idps_1) dataset1.mark_started() - sub_id_1 = dataset1.subscribe(basic_subscriber, min_wait=0, min_count=1, - state={}) + sub_id_1 = dataset1.subscribe(basic_subscriber, min_wait=0, min_count=1, state={}) assert len(dataset1.subscribers) == 1 assert list(dataset1.subscribers.keys()) == [sub_id_1] @@ -200,28 +201,27 @@ def test_subscription_on_dual_datasets(experiment, basic_subscriber) -> None: dataset2.set_interdependencies(idps_2) dataset2.mark_started() - sub_id_2 = dataset2.subscribe(basic_subscriber, min_wait=0, min_count=1, - state={}) + sub_id_2 = dataset2.subscribe(basic_subscriber, min_wait=0, min_count=1, state={}) assert len(dataset2.subscribers) == 1 assert list(dataset2.subscribers.keys()) == [sub_id_2] assert sub_id_1 != sub_id_2 - expected_state_1 = {} expected_state_2 = {} for x in range(10): - y1 = -x**2 - y2 = x ** 2 - dataset1.add_results([{'x': x, 'y1': y1}]) - dataset2.add_results([{'x': x, 'y2': y2}]) - expected_state_1[x+1] = [(x, y1)] + y1 = -(x**2) + y2 = x**2 + dataset1.add_results([{"x": x, "y1": y1}]) + dataset2.add_results([{"x": x, "y2": y2}]) + expected_state_1[x + 1] = [(x, y1)] expected_state_2[x + 1] = [(x, y2)] @retry_until_does_not_throw( - exception_class_to_expect=AssertionError, delay=0.5, tries=10) + exception_class_to_expect=AssertionError, delay=0.5, tries=10 + ) def assert_expected_state(): assert dataset1.subscribers[sub_id_1].state == expected_state_1 assert dataset2.subscribers[sub_id_2].state == expected_state_2 @@ -239,10 +239,8 @@ def assert_expected_state(): # Ensure the trigger for the subscriber has been removed from the database get_triggers_sql = "SELECT * FROM sqlite_master WHERE TYPE = 'trigger';" - triggers1 = atomic_transaction( - dataset1.conn, get_triggers_sql).fetchall() + triggers1 = atomic_transaction(dataset1.conn, get_triggers_sql).fetchall() assert len(triggers1) == 0 - triggers2 = atomic_transaction( - dataset2.conn, get_triggers_sql).fetchall() + triggers2 = atomic_transaction(dataset2.conn, get_triggers_sql).fetchall() assert len(triggers2) == 0 diff --git a/tests/dataset/test_sqlite_base.py b/tests/dataset/test_sqlite_base.py index 24d4ab25218..354ea03fa43 100644 --- a/tests/dataset/test_sqlite_base.py +++ b/tests/dataset/test_sqlite_base.py @@ -29,12 +29,12 @@ from qcodes.dataset.sqlite import query_helpers as mut_help from qcodes.dataset.sqlite.connection import atomic_transaction, path_to_dbfile from qcodes.dataset.sqlite.database import get_DB_location -from qcodes.tests.common import error_caused_by from qcodes.utils import QCoDeSDeprecationWarning +from tests.common import error_caused_by from .helper_functions import verify_data_dict -_unicode_categories = ('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nd', 'Pc', 'Pd', 'Zs') +_unicode_categories = ("Lu", "Ll", "Lt", "Lm", "Lo", "Nd", "Pc", "Pd", "Zs") @contextmanager @@ -62,8 +62,7 @@ def _make_simple_run_describer(): def test_path_to_dbfile(tmp_path) -> None: - - tempdb = str(tmp_path / 'database.db') + tempdb = str(tmp_path / "database.db") conn = mut_db.connect(tempdb) try: assert path_to_dbfile(conn) == tempdb @@ -178,8 +177,9 @@ def test_insert_many_values_raises(experiment) -> None: conn = experiment.conn with pytest.raises(ValueError): - mut_help.insert_many_values(conn, 'some_string', ['column1'], - values=[[1], [1, 3]]) + mut_help.insert_many_values( + conn, "some_string", ["column1"], values=[[1], [1, 3]] + ) def test_get_non_existing_metadata_returns_none(experiment) -> None: @@ -206,7 +206,6 @@ def test__validate_table_raises(table_name) -> None: def test_get_dependents_simple(experiment, simple_run_describer) -> None: - (_, run_id, _) = mut_queries.create_run( experiment.conn, experiment.exp_id, @@ -250,16 +249,17 @@ def test_get_dependents(experiment) -> None: deps = mut_queries._get_dependents(experiment.conn, run_id) - expected_deps = [mut_queries._get_layout_id(experiment.conn, 'y', run_id), - mut_queries._get_layout_id(experiment.conn, 'z', run_id)] + expected_deps = [ + mut_queries._get_layout_id(experiment.conn, "y", run_id), + mut_queries._get_layout_id(experiment.conn, "z", run_id), + ] assert deps == expected_deps def test_column_in_table(dataset) -> None: assert mut_help.is_column_in_table(dataset.conn, "runs", "run_id") - assert not mut_help.is_column_in_table(dataset.conn, "runs", - "non-existing-column") + assert not mut_help.is_column_in_table(dataset.conn, "runs", "non-existing-column") def test_run_exist(dataset) -> None: @@ -268,10 +268,8 @@ def test_run_exist(dataset) -> None: def test_get_last_run(dataset) -> None: - assert dataset.run_id \ - == mut_queries.get_last_run(dataset.conn, dataset.exp_id) - assert dataset.run_id \ - == mut_queries.get_last_run(dataset.conn) + assert dataset.run_id == mut_queries.get_last_run(dataset.conn, dataset.exp_id) + assert dataset.run_id == mut_queries.get_last_run(dataset.conn) def test_get_last_run_no_runs(experiment) -> None: @@ -280,8 +278,7 @@ def test_get_last_run_no_runs(experiment) -> None: def test_get_last_experiment(experiment) -> None: - assert experiment.exp_id \ - == mut_queries.get_last_experiment(experiment.conn) + assert experiment.exp_id == mut_queries.get_last_experiment(experiment.conn) def test_get_last_experiment_no_experiments(empty_temp_db) -> None: @@ -290,12 +287,11 @@ def test_get_last_experiment_no_experiments(empty_temp_db) -> None: def test_update_runs_description(dataset) -> None: - invalid_descs = ['{}', 'description'] + invalid_descs = ["{}", "description"] for idesc in invalid_descs: with pytest.raises(ValueError): - mut_queries.update_run_description( - dataset.conn, dataset.run_id, idesc) + mut_queries.update_run_description(dataset.conn, dataset.run_id, idesc) desc = serial.to_json_for_storage(RunDescriber(InterDependencies_())) mut_queries.update_run_description(dataset.conn, dataset.run_id, desc) @@ -329,14 +325,14 @@ def test_get_data_no_columns(scalar_dataset) -> None: def test_get_parameter_data(scalar_dataset) -> None: ds = scalar_dataset - input_names = ['param_3'] + input_names = ["param_3"] data = mut_queries.get_parameter_data(ds.conn, ds.table_name, input_names) assert len(data.keys()) == len(input_names) expected_names = {"param_3": ["param_0", "param_1", "param_2", "param_3"]} - expected_shapes = {"param_3": [(10 ** 3,)] * 4} + expected_shapes = {"param_3": [(10**3,)] * 4} expected_values = { "param_3": [np.arange(10000 * a, 10000 * a + 1000) for a in range(4)] @@ -353,7 +349,7 @@ def test_get_parameter_data_independent_parameters( paramspecs = ds.description.interdeps.non_dependencies params = [ps.name for ps in paramspecs] - expected_toplevel_params = ['param_1', 'param_2', 'param_3'] + expected_toplevel_params = ["param_1", "param_2", "param_3"] assert params == expected_toplevel_params data = mut_queries.get_parameter_data(ds.conn, ds.table_name) @@ -367,9 +363,9 @@ def test_get_parameter_data_independent_parameters( } expected_shapes = { - "param_1": [(10 ** 3,)], - "param_2": [(10 ** 3,)], - "param_3": [(10 ** 3,)] * 2, + "param_1": [(10**3,)], + "param_2": [(10**3,)], + "param_3": [(10**3,)] * 2, } expected_values = { "param_1": [np.arange(10000, 10000 + 1000)], @@ -377,13 +373,19 @@ def test_get_parameter_data_independent_parameters( "param_3": [np.arange(30000, 30000 + 1000), np.arange(0, 1000)], } - verify_data_dict(data, None, expected_toplevel_params, expected_names, - expected_shapes, expected_values) + verify_data_dict( + data, + None, + expected_toplevel_params, + expected_names, + expected_shapes, + expected_values, + ) def test_is_run_id_in_db(empty_temp_db) -> None: conn = mut_db.connect(get_DB_location()) - mut_queries.new_experiment(conn, 'test_exp', 'no_sample') + mut_queries.new_experiment(conn, "test_exp", "no_sample") for _ in range(5): DataSet(conn=conn, run_id=None) @@ -402,7 +404,7 @@ def test_is_run_id_in_db(empty_temp_db) -> None: def test_atomic_creation(experiment, simple_run_describer) -> None: - """" + """ " Test that dataset creation is atomic. Test for https://github.com/QCoDeS/Qcodes/issues/1444 """ @@ -416,7 +418,6 @@ def just_throw(*args): with patch( "qcodes.dataset.sqlite.queries.add_data_to_dynamic_columns", new=just_throw ): - with pytest.raises( RuntimeError, match="Rolling back due to unhandled exception" ) as e: @@ -431,12 +432,10 @@ def just_throw(*args): assert error_caused_by(e, "This breaks adding metadata") # since we are starting from an empty database and the above transaction # should be rolled back there should be no runs in the run table - runs = mut_conn.transaction(experiment.conn, - 'SELECT run_id FROM runs').fetchall() + runs = mut_conn.transaction(experiment.conn, "SELECT run_id FROM runs").fetchall() assert len(runs) == 0 with shadow_conn(experiment.path_to_db) as new_conn: - runs = mut_conn.transaction(new_conn, - 'SELECT run_id FROM runs').fetchall() + runs = mut_conn.transaction(new_conn, "SELECT run_id FROM runs").fetchall() assert len(runs) == 0 # if the above was not correctly rolled back we @@ -450,18 +449,15 @@ def just_throw(*args): metadata={"a": 1}, ) - runs = mut_conn.transaction(experiment.conn, - 'SELECT run_id FROM runs').fetchall() + runs = mut_conn.transaction(experiment.conn, "SELECT run_id FROM runs").fetchall() assert len(runs) == 1 with shadow_conn(experiment.path_to_db) as new_conn: - runs = mut_conn.transaction(new_conn, - 'SELECT run_id FROM runs').fetchall() + runs = mut_conn.transaction(new_conn, "SELECT run_id FROM runs").fetchall() assert len(runs) == 1 def test_set_run_timestamp(dataset) -> None: - assert dataset.run_timestamp_raw is None assert dataset.completed_timestamp_raw is None @@ -473,16 +469,15 @@ def test_set_run_timestamp(dataset) -> None: assert dataset.run_timestamp_raw > time_now assert dataset.completed_timestamp_raw is None - with pytest.raises(RuntimeError, match="Rolling back due to unhandled " - "exception") as ei: + with pytest.raises( + RuntimeError, match="Rolling back due to unhandled exception" + ) as ei: mut_queries.set_run_timestamp(dataset.conn, dataset.run_id) - assert error_caused_by(ei, ("Can not set run_timestamp; it has already " - "been set")) + assert error_caused_by(ei, ("Can not set run_timestamp; it has already been set")) def test_set_run_timestamp_explicit(dataset) -> None: - assert dataset.run_timestamp_raw is None assert dataset.completed_timestamp_raw is None @@ -502,7 +497,6 @@ def test_set_run_timestamp_explicit(dataset) -> None: def test_mark_run_complete(dataset) -> None: - assert dataset.run_timestamp_raw is None assert dataset.completed_timestamp_raw is None @@ -556,7 +550,6 @@ def test_mark_run_complete_twice(dataset, caplog: LogCaptureFixture) -> None: def test_mark_run_complete_explicit_time(dataset) -> None: - assert dataset.run_timestamp_raw is None assert dataset.completed_timestamp_raw is None diff --git a/tests/dataset/test_sqlite_connection.py b/tests/dataset/test_sqlite_connection.py index 1b418b4263d..864abc3450f 100644 --- a/tests/dataset/test_sqlite_connection.py +++ b/tests/dataset/test_sqlite_connection.py @@ -10,7 +10,7 @@ make_connection_plus_from, ) from qcodes.dataset.sqlite.database import connect -from qcodes.tests.common import error_caused_by +from tests.common import error_caused_by def sqlite_conn_in_transaction(conn: sqlite3.Connection): @@ -44,45 +44,49 @@ def conn_plus_is_idle(conn: ConnectionPlus, isolation=None): def test_connection_plus() -> None: - sqlite_conn = sqlite3.connect(':memory:') + sqlite_conn = sqlite3.connect(":memory:") conn_plus = ConnectionPlus(sqlite_conn) - assert conn_plus.path_to_dbfile == '' + assert conn_plus.path_to_dbfile == "" assert isinstance(conn_plus, ConnectionPlus) assert isinstance(conn_plus, sqlite3.Connection) assert False is conn_plus.atomic_in_progress - match_str = re.escape('Attempted to create `ConnectionPlus` from a ' - '`ConnectionPlus` object which is not allowed.') + match_str = re.escape( + "Attempted to create `ConnectionPlus` from a " + "`ConnectionPlus` object which is not allowed." + ) with pytest.raises(ValueError, match=match_str): ConnectionPlus(conn_plus) def test_make_connection_plus_from_sqlite3_connection() -> None: - conn = sqlite3.connect(':memory:') + conn = sqlite3.connect(":memory:") conn_plus = make_connection_plus_from(conn) - assert conn_plus.path_to_dbfile == '' + assert conn_plus.path_to_dbfile == "" assert isinstance(conn_plus, ConnectionPlus) assert False is conn_plus.atomic_in_progress assert conn_plus is not conn def test_make_connection_plus_from_connecton_plus() -> None: - conn = ConnectionPlus(sqlite3.connect(':memory:')) + conn = ConnectionPlus(sqlite3.connect(":memory:")) conn_plus = make_connection_plus_from(conn) - assert conn_plus.path_to_dbfile == '' + assert conn_plus.path_to_dbfile == "" assert isinstance(conn_plus, ConnectionPlus) assert conn.atomic_in_progress is conn_plus.atomic_in_progress assert conn_plus is conn def test_atomic() -> None: - sqlite_conn = sqlite3.connect(':memory:') + sqlite_conn = sqlite3.connect(":memory:") - match_str = re.escape('atomic context manager only accepts ConnectionPlus ' - 'database connection objects.') + match_str = re.escape( + "atomic context manager only accepts ConnectionPlus " + "database connection objects." + ) with pytest.raises(ValueError, match=match_str): with atomic(sqlite_conn): # type: ignore[arg-type] pass @@ -109,41 +113,44 @@ def test_atomic() -> None: def test_atomic_with_exception() -> None: - sqlite_conn = sqlite3.connect(':memory:') + sqlite_conn = sqlite3.connect(":memory:") conn_plus = ConnectionPlus(sqlite_conn) - sqlite_conn.execute('PRAGMA user_version(25)') + sqlite_conn.execute("PRAGMA user_version(25)") sqlite_conn.commit() - assert 25 == sqlite_conn.execute('PRAGMA user_version').fetchall()[0][0] + assert 25 == sqlite_conn.execute("PRAGMA user_version").fetchall()[0][0] - with pytest.raises(RuntimeError, - match="Rolling back due to unhandled exception") as e: + with pytest.raises( + RuntimeError, match="Rolling back due to unhandled exception" + ) as e: with atomic(conn_plus) as atomic_conn: - atomic_conn.execute('PRAGMA user_version(42)') - raise Exception('intended exception') - assert error_caused_by(e, 'intended exception') + atomic_conn.execute("PRAGMA user_version(42)") + raise Exception("intended exception") + assert error_caused_by(e, "intended exception") - assert 25 == sqlite_conn.execute('PRAGMA user_version').fetchall()[0][0] + assert 25 == sqlite_conn.execute("PRAGMA user_version").fetchall()[0][0] def test_atomic_on_outmost_connection_that_is_in_transaction() -> None: - conn = ConnectionPlus(sqlite3.connect(':memory:')) + conn = ConnectionPlus(sqlite3.connect(":memory:")) - conn.execute('BEGIN') + conn.execute("BEGIN") assert True is conn.in_transaction - match_str = re.escape('SQLite connection has uncommitted transactions. ' - 'Please commit those before starting an atomic ' - 'transaction.') + match_str = re.escape( + "SQLite connection has uncommitted transactions. " + "Please commit those before starting an atomic " + "transaction." + ) with pytest.raises(RuntimeError, match=match_str): with atomic(conn): pass -@pytest.mark.parametrize('in_transaction', (True, False)) +@pytest.mark.parametrize("in_transaction", (True, False)) def test_atomic_on_connection_plus_that_is_in_progress(in_transaction) -> None: - sqlite_conn = sqlite3.connect(':memory:') + sqlite_conn = sqlite3.connect(":memory:") conn_plus = ConnectionPlus(sqlite_conn) # explicitly set to True for testing purposes @@ -151,7 +158,7 @@ def test_atomic_on_connection_plus_that_is_in_progress(in_transaction) -> None: # implement parametrizing over connection's `in_transaction` attribute if in_transaction: - conn_plus.cursor().execute('BEGIN') + conn_plus.cursor().execute("BEGIN") assert in_transaction is conn_plus.in_transaction isolation_level = conn_plus.isolation_level @@ -176,7 +183,7 @@ def test_atomic_on_connection_plus_that_is_in_progress(in_transaction) -> None: def test_two_nested_atomics() -> None: - sqlite_conn = sqlite3.connect(':memory:') + sqlite_conn = sqlite3.connect(":memory:") conn_plus = ConnectionPlus(sqlite_conn) atomic_in_progress = conn_plus.atomic_in_progress @@ -206,9 +213,11 @@ def test_two_nested_atomics() -> None: assert atomic_in_progress == atomic_conn_2.atomic_in_progress -@pytest.mark.parametrize(argnames='create_conn_plus', - argvalues=(make_connection_plus_from, ConnectionPlus), - ids=('make_connection_plus_from', 'ConnectionPlus')) +@pytest.mark.parametrize( + argnames="create_conn_plus", + argvalues=(make_connection_plus_from, ConnectionPlus), + ids=("make_connection_plus_from", "ConnectionPlus"), +) def test_that_use_of_atomic_commits_only_at_outermost_context( tmp_path, create_conn_plus ) -> None: @@ -216,7 +225,7 @@ def test_that_use_of_atomic_commits_only_at_outermost_context( This test tests the behavior of `ConnectionPlus` that is created from `sqlite3.Connection` with respect to `atomic` context manager and commits. """ - dbfile = str(tmp_path / 'temp.db') + dbfile = str(tmp_path / "temp.db") # just initialize the database file, connection objects needed for # testing in this test function are created separately, see below connect(dbfile) @@ -228,8 +237,8 @@ def test_that_use_of_atomic_commits_only_at_outermost_context( # committed to the database file control_conn = connect(dbfile) - get_all_runs = 'SELECT * FROM runs' - insert_run_with_name = 'INSERT INTO runs (name) VALUES (?)' + get_all_runs = "SELECT * FROM runs" + insert_run_with_name = "INSERT INTO runs (name) VALUES (?)" # assert that at the beginning of the test there are no runs in the # table; we'll be adding new rows to the runs table below @@ -242,12 +251,11 @@ def test_that_use_of_atomic_commits_only_at_outermost_context( # context manager is exited with atomic(conn_plus) as atomic_conn: - assert 0 == len(conn_plus.execute(get_all_runs).fetchall()) assert 0 == len(atomic_conn.execute(get_all_runs).fetchall()) assert 0 == len(control_conn.execute(get_all_runs).fetchall()) - atomic_conn.cursor().execute(insert_run_with_name, ['aaa']) + atomic_conn.cursor().execute(insert_run_with_name, ["aaa"]) assert 1 == len(conn_plus.execute(get_all_runs).fetchall()) assert 1 == len(atomic_conn.execute(get_all_runs).fetchall()) @@ -262,25 +270,23 @@ def test_that_use_of_atomic_commits_only_at_outermost_context( # the outermost context. with atomic(conn_plus) as atomic_conn_1: - assert 1 == len(conn_plus.execute(get_all_runs).fetchall()) assert 1 == len(atomic_conn_1.execute(get_all_runs).fetchall()) assert 1 == len(control_conn.execute(get_all_runs).fetchall()) - atomic_conn_1.cursor().execute(insert_run_with_name, ['bbb']) + atomic_conn_1.cursor().execute(insert_run_with_name, ["bbb"]) assert 2 == len(conn_plus.execute(get_all_runs).fetchall()) assert 2 == len(atomic_conn_1.execute(get_all_runs).fetchall()) assert 1 == len(control_conn.execute(get_all_runs).fetchall()) with atomic(atomic_conn_1) as atomic_conn_2: - assert 2 == len(conn_plus.execute(get_all_runs).fetchall()) assert 2 == len(atomic_conn_1.execute(get_all_runs).fetchall()) assert 2 == len(atomic_conn_2.execute(get_all_runs).fetchall()) assert 1 == len(control_conn.execute(get_all_runs).fetchall()) - atomic_conn_2.cursor().execute(insert_run_with_name, ['ccc']) + atomic_conn_2.cursor().execute(insert_run_with_name, ["ccc"]) assert 3 == len(conn_plus.execute(get_all_runs).fetchall()) assert 3 == len(atomic_conn_1.execute(get_all_runs).fetchall()) @@ -300,13 +306,13 @@ def test_that_use_of_atomic_commits_only_at_outermost_context( def test_atomic_transaction(tmp_path) -> None: """Test that atomic_transaction works for ConnectionPlus""" - dbfile = str(tmp_path / 'temp.db') + dbfile = str(tmp_path / "temp.db") conn = ConnectionPlus(sqlite3.connect(dbfile)) ctrl_conn = sqlite3.connect(dbfile) - sql_create_table = 'CREATE TABLE smth (name TEXT)' + sql_create_table = "CREATE TABLE smth (name TEXT)" sql_table_exists = 'SELECT sql FROM sqlite_master WHERE TYPE = "table"' atomic_transaction(conn, sql_create_table) @@ -316,19 +322,21 @@ def test_atomic_transaction(tmp_path) -> None: def test_atomic_transaction_on_sqlite3_connection_raises(tmp_path) -> None: """Test that atomic_transaction does not work for sqlite3.Connection""" - dbfile = str(tmp_path / 'temp.db') + dbfile = str(tmp_path / "temp.db") conn = sqlite3.connect(dbfile) - match_str = re.escape('atomic context manager only accepts ConnectionPlus ' - 'database connection objects.') + match_str = re.escape( + "atomic context manager only accepts ConnectionPlus " + "database connection objects." + ) with pytest.raises(ValueError, match=match_str): atomic_transaction(conn, "whatever sql query") # type: ignore[arg-type] def test_connect() -> None: - conn = connect(':memory:') + conn = connect(":memory:") assert isinstance(conn, sqlite3.Connection) assert isinstance(conn, ConnectionPlus) diff --git a/tests/dataset/test_subscribing.py b/tests/dataset/test_subscribing.py index 3e9482effd0..ef800c0123f 100644 --- a/tests/dataset/test_subscribing.py +++ b/tests/dataset/test_subscribing.py @@ -10,7 +10,7 @@ from qcodes.dataset.descriptions.dependencies import InterDependencies_ from qcodes.dataset.descriptions.param_spec import ParamSpecBase from qcodes.dataset.sqlite.connection import atomic_transaction -from qcodes.tests.common import retry_until_does_not_throw +from tests.common import retry_until_does_not_throw log = logging.getLogger(__name__) @@ -27,6 +27,7 @@ class MockSubscriber: is called from another thread than the one holding the connection of the dataset! """ + def __init__(self, ds, lg): self.lg = lg self.ds = ds @@ -46,7 +47,7 @@ def config_subscriber(results, length, state): return config_subscriber -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def basic_subscriber(): """ A basic subscriber that just puts results and length into @@ -67,7 +68,7 @@ def _make_working_subscriber_config(tmp_path): "subscription":{ "subscribers":{ "test_subscriber":{ - "factory": "qcodes.tests.dataset.test_subscribing.MockSubscriber", + "factory": "tests.dataset.test_subscribing.MockSubscriber", "factory_kwargs":{ "lg": false }, @@ -96,7 +97,7 @@ def _make_broken_subscriber_config(tmp_path): "subscription":{ "subscribers":{ "test_subscriber_wrong":{ - "factory": "qcodes.tests.dataset.test_subscribing.MockSubscriber", + "factory": "tests.dataset.test_subscribing.MockSubscriber", "factory_kwargs":{ "lg": false }, @@ -117,24 +118,18 @@ def _make_broken_subscriber_config(tmp_path): yield - @pytest.mark.flaky(reruns=5) @pytest.mark.serial def test_basic_subscription(dataset, basic_subscriber) -> None: - xparam = ParamSpecBase(name='x', - paramtype='numeric', - label='x parameter', - unit='V') - yparam = ParamSpecBase(name='y', - paramtype='numeric', - label='y parameter', - unit='Hz') + xparam = ParamSpecBase(name="x", paramtype="numeric", label="x parameter", unit="V") + yparam = ParamSpecBase( + name="y", paramtype="numeric", label="y parameter", unit="Hz" + ) idps = InterDependencies_(dependencies={yparam: (xparam,)}) dataset.set_interdependencies(idps) dataset.mark_started() - sub_id = dataset.subscribe(basic_subscriber, min_wait=0, min_count=1, - state={}) + sub_id = dataset.subscribe(basic_subscriber, min_wait=0, min_count=1, state={}) assert len(dataset.subscribers) == 1 assert list(dataset.subscribers.keys()) == [sub_id] @@ -142,12 +137,13 @@ def test_basic_subscription(dataset, basic_subscriber) -> None: expected_state = {} for x in range(10): - y = -x**2 - dataset.add_results([{'x': x, 'y': y}]) - expected_state[x+1] = [(x, y)] + y = -(x**2) + dataset.add_results([{"x": x, "y": y}]) + expected_state[x + 1] = [(x, y)] @retry_until_does_not_throw( - exception_class_to_expect=AssertionError, delay=0.5, tries=10) + exception_class_to_expect=AssertionError, delay=0.5, tries=10 + ) def assert_expected_state(): assert dataset.subscribers[sub_id].state == expected_state @@ -160,8 +156,7 @@ def assert_expected_state(): # Ensure the trigger for the subscriber has been removed from the database get_triggers_sql = "SELECT * FROM sqlite_master WHERE TYPE = 'trigger';" - triggers = atomic_transaction( - dataset.conn, get_triggers_sql).fetchall() + triggers = atomic_transaction(dataset.conn, get_triggers_sql).fetchall() assert len(triggers) == 0 diff --git a/tests/driver_test_case.py b/tests/driver_test_case.py deleted file mode 100644 index f6321cc369f..00000000000 --- a/tests/driver_test_case.py +++ /dev/null @@ -1,51 +0,0 @@ -# ruff: noqa: F401 -""" -Module left for backwards compatibility. Will be deprecated and removed along the rest of qcodes.tests""" - -from __future__ import annotations - -import unittest - -from qcodes.extensions import ( - DriverTestCase, -) -from qcodes.instrument import Instrument - - -def test_instruments(verbosity: int = 1) -> None: - """ - Discover available instruments and test them all - Unlike test_instrument, this does NOT reload tests prior to running them - - optional verbosity (default 1) - """ - import qcodes - import qcodes.instrument_drivers as qcdrivers - - driver_path = list(qcdrivers.__path__)[0] - suite = unittest.defaultTestLoader.discover( - driver_path, top_level_dir=list(qcodes.__path__)[0] - ) - unittest.TextTestRunner(verbosity=verbosity).run(suite) - - -def test_instrument(instrument_testcase, verbosity: int = 2) -> None: - """ - Runs one instrument testcase - Reloads the test case before running it - - optional verbosity (default 2) - """ - import importlib - import sys - - # reload the test case - module_name = instrument_testcase.__module__ - class_name = instrument_testcase.__name__ - del sys.modules[module_name] - - module = importlib.import_module(module_name) - reloaded_testcase = getattr(module, class_name) - - suite = unittest.defaultTestLoader.loadTestsFromTestCase(reloaded_testcase) - unittest.TextTestRunner(verbosity=verbosity).run(suite) diff --git a/tests/drivers/test_tektronix_AWG70000A.py b/tests/drivers/test_tektronix_AWG70000A.py index 44f5648b68b..e6824703965 100644 --- a/tests/drivers/test_tektronix_AWG70000A.py +++ b/tests/drivers/test_tektronix_AWG70000A.py @@ -12,7 +12,7 @@ from lxml import etree from pytest import LogCaptureFixture -import qcodes.tests.drivers.auxiliary_files as auxfiles +import tests.drivers.auxiliary_files as auxfiles from qcodes.instrument_drivers.tektronix.AWG70000A import AWG70000A from qcodes.instrument_drivers.tektronix.AWG70002A import AWG70002A @@ -23,18 +23,17 @@ def strip_outer_tags(sml: str) -> str: complies with the schema provided by tektronix """ # make function idempotent - if not sml[1:9] == 'DataFile': - print('Incorrect file format or outer tags ' - 'already stripped') + if not sml[1:9] == "DataFile": + print("Incorrect file format or outer tags already stripped") return sml - ind1 = sml.find('>\r\n') - sml = sml[ind1+3:] # strip off the opening DataFile tag + ind1 = sml.find(">\r\n") + sml = sml[ind1 + 3 :] # strip off the opening DataFile tag sml = sml[:-24] # remove the and closing tag return sml -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def awg2(): awg2_sim = AWG70002A( "awg2_sim", @@ -46,22 +45,24 @@ def awg2(): awg2_sim.close() -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def random_wfm_m1_m2_package(): """ Make a random 2400 points np.array([wfm, m1, m2]). The waveform has values in [-0.1, 0.1) """ + def make(): length = np.random.randint(2400, 2500) - wfm = 0.2*(np.random.rand(length) - 0.5) + wfm = 0.2 * (np.random.rand(length) - 0.5) m1 = np.random.randint(0, 2, length) m2 = np.random.randint(0, 2, length) return np.array([wfm, m1, m2]) + return make -@pytest.fixture(scope='module', name="forged_sequence") +@pytest.fixture(scope="module", name="forged_sequence") def _make_forged_sequence(): """ Return an example forged sequence containing a @@ -70,7 +71,7 @@ def _make_forged_sequence(): N = 5 num_chans = 3 - types = ['element']*(N-1) + ['subsequence'] + types = ["element"] * (N - 1) + ["subsequence"] def random_element(num_chans): """ @@ -80,75 +81,69 @@ def random_element(num_chans): """ data = {n: {} for n in range(1, 1 + num_chans)} for key in data.keys(): - data[key] = {'wfm': np.random.randn(2400), - 'm1': np.random.randint(0, 2, 2400), - 'm2': np.random.randint(0, 2, 2400)} + data[key] = { + "wfm": np.random.randn(2400), + "m1": np.random.randint(0, 2, 2400), + "m2": np.random.randint(0, 2, 2400), + } return data seq = {i: {} for i in range(1, 1 + N)} for pos1, typ in zip(seq.keys(), types): - seq[pos1] = {'type': typ, - 'content': {}, - 'sequencing': {}} + seq[pos1] = {"type": typ, "content": {}, "sequencing": {}} for pos1 in range(1, N): - seq[pos1]['content'] = {1: {'data': random_element(num_chans)}} + seq[pos1]["content"] = {1: {"data": random_element(num_chans)}} # and finally add the subsequence - seq[N]['content'] = {1: {'data': random_element(num_chans), - 'sequencing': {'nreps': 2}}, - 2: {'data': random_element(num_chans), - 'sequencing': {'nreps': 2}}} + seq[N]["content"] = { + 1: {"data": random_element(num_chans), "sequencing": {"nreps": 2}}, + 2: {"data": random_element(num_chans), "sequencing": {"nreps": 2}}, + } return seq def test_init_awg2(awg2) -> None: - idn_dict = awg2.IDN() - assert idn_dict['vendor'] == 'QCoDeS' + assert idn_dict["vendor"] == "QCoDeS" @settings(deadline=2500, max_examples=7) @given(N=hst.integers(1, 100)) def test_SML_successful_generation_vary_length(N) -> None: + tw = [0] * N + nreps = [1] * N + ejs = [0] * N + ejt = [0] * N + goto = [0] * N + wfm_names = [[f"pos{pos}ch{ch}" for ch in range(1, 3)] for pos in range(N)] - tw = [0]*N - nreps = [1]*N - ejs = [0]*N - ejt = [0]*N - goto = [0]*N - wfm_names = [[f'pos{pos}ch{ch}' - for ch in range(1, 3)] for pos in range(N)] + seqname = "seq" - seqname = 'seq' - - smlstring = AWG70000A._makeSMLFile(tw, nreps, ejs, ejt, goto, - wfm_names, seqname, chans=3) + smlstring = AWG70000A._makeSMLFile( + tw, nreps, ejs, ejt, goto, wfm_names, seqname, chans=3 + ) # This line will raise an exception if the XML is not valid etree.parse(StringIO(smlstring)) -@given(num_samples=hst.integers(min_value=2400), - markers_included=hst.booleans()) +@given(num_samples=hst.integers(min_value=2400), markers_included=hst.booleans()) def test_WFMXHeader_succesful(num_samples, markers_included) -> None: - xmlstr = AWG70000A._makeWFMXFileHeader(num_samples, markers_included) etree.parse(StringIO(xmlstr)) -@given(num_samples=hst.integers(max_value=2399), - markers_included=hst.booleans()) +@given(num_samples=hst.integers(max_value=2399), markers_included=hst.booleans()) def test_WFMXHeader_failing(num_samples, markers_included) -> None: with pytest.raises(ValueError): AWG70000A._makeWFMXFileHeader(num_samples, markers_included) def test_seqxfilefromfs_failing(forged_sequence) -> None: - # typing convenience make_seqx = AWG70000A.make_SEQX_from_forged_sequence @@ -159,12 +154,16 @@ def test_seqxfilefromfs_failing(forged_sequence) -> None: # the input dict (first argument) is not a valid forged # sequence dict with pytest.raises(InvalidForgedSequenceError): - make_seqx({}, [], 'yolo', {}) + make_seqx({}, [], "yolo", {}) # wrong number of channel amplitudes with pytest.raises(ValueError): - make_seqx(forged_sequence, amplitudes=[1, 1], - seqname='dummyname', channel_mapping=chan_map) + make_seqx( + forged_sequence, + amplitudes=[1, 1], + seqname="dummyname", + channel_mapping=chan_map, + ) # wrong channel mapping keys with pytest.raises(ValueError): @@ -177,9 +176,12 @@ def test_seqxfilefromfs_failing(forged_sequence) -> None: # wrong channel mapping values with pytest.raises(ValueError): - make_seqx(forged_sequence, [1, 1, 1], - seqname='dummyname', - channel_mapping={1: 10, 2: 8, 3: -1}) + make_seqx( + forged_sequence, + [1, 1, 1], + seqname="dummyname", + channel_mapping={1: 10, 2: 8, 3: -1}, + ) def test_seqxfilefromfs_warns(forged_sequence, caplog: LogCaptureFixture) -> None: @@ -188,39 +190,37 @@ def test_seqxfilefromfs_warns(forged_sequence, caplog: LogCaptureFixture) -> Non """ make_seqx = AWG70000A.make_SEQX_from_forged_sequence - max_elem = forged_sequence[1]['content'][1]['data'][1]['wfm'].max() - amplitude = max_elem/2 + max_elem = forged_sequence[1]["content"][1]["data"][1]["wfm"].max() + amplitude = max_elem / 2 with caplog.at_level(logging.WARNING): - make_seqx(forged_sequence, [amplitude, amplitude, amplitude], 'myseq') + make_seqx(forged_sequence, [amplitude, amplitude, amplitude], "myseq") assert len(caplog.messages) > 0 for message in caplog.messages: assert "Waveform exceeds specified channel range" in message def test_seqxfile_from_fs(forged_sequence) -> None: - # typing convenience make_seqx = AWG70000A.make_SEQX_from_forged_sequence - path_to_schema = auxfiles.__file__.replace('__init__.py', - 'awgSeqDataSets.xsd') + path_to_schema = auxfiles.__file__.replace("__init__.py", "awgSeqDataSets.xsd") with open(path_to_schema) as fid: raw_schema = fid.read() - schema = etree.XMLSchema(etree.XML(raw_schema.encode('utf-8'))) + schema = etree.XMLSchema(etree.XML(raw_schema.encode("utf-8"))) parser = etree.XMLParser(schema=schema) - seqx = make_seqx(forged_sequence, [10, 10, 10], 'myseq') + seqx = make_seqx(forged_sequence, [10, 10, 10], "myseq") zf = zipfile.ZipFile(BytesIO(seqx)) # Check for double/missing file extensions for filename in zf.namelist(): - assert filename.count('.') == 1 + assert filename.count(".") == 1 # validate the SML files (describing sequences) - seq_names = [fn for fn in zf.namelist() if 'Sequences/' in fn] + seq_names = [fn for fn in zf.namelist() if "Sequences/" in fn] for seq_name in seq_names: with zf.open(seq_name) as fid: @@ -230,6 +230,7 @@ def test_seqxfile_from_fs(forged_sequence) -> None: # XMLSyntaxError if something is wrong etree.XML(str_seq_sml, parser=parser) + # TODO: Add some failing tests for inproper input @@ -243,13 +244,13 @@ def test_makeSEQXFile(awg2, random_wfm_m1_m2_package) -> None: wfmpkg = random_wfm_m1_m2_package - trig_waits = [0]*seqlen - nreps = [1]*seqlen - event_jumps = [0]*seqlen - event_jump_to = [0]*seqlen - go_to = [0]*seqlen + trig_waits = [0] * seqlen + nreps = [1] * seqlen + event_jumps = [0] * seqlen + event_jump_to = [0] * seqlen + go_to = [0] * seqlen wfms = [[wfmpkg() for i in range(seqlen)] for j in range(chans)] - amplitudes = [0.5]*chans + amplitudes = [0.5] * chans seqname = "testseq" awg2.makeSEQXFile( diff --git a/tests/instrument_mocks.py b/tests/instrument_mocks.py deleted file mode 100644 index 679da7ec127..00000000000 --- a/tests/instrument_mocks.py +++ /dev/null @@ -1,52 +0,0 @@ -# ruff: noqa: F401 -# left for backwards compatibility will be deprecated and removed -# along with the rest of qcodes.tests -from __future__ import annotations - -import logging -import time -from collections.abc import Generator, Sequence -from functools import partial -from typing import Any - -import numpy as np - -from qcodes.instrument import ChannelList, Instrument, InstrumentBase, InstrumentChannel -from qcodes.instrument_drivers.mock_instruments import ( - ArraySetPointParam, - ComplexArraySetPointParam, - DmmExponentialParameter, - DmmGaussParameter, - DummyBase, - DummyChannel, - DummyChannelInstrument, - DummyFailingInstrument, - DummyInstrument, - DummyInstrumentWithMeasurement, - DummyParameterWithSetpoints1D, - DummyParameterWithSetpoints2D, - DummyParameterWithSetpointsComplex, - GeneratedSetPoints, - MockCustomChannel, - MockDAC, - MockDACChannel, - MockField, - MockLockin, - MockMetaParabola, - MockParabola, - Multi2DSetPointParam, - Multi2DSetPointParam2Sizes, - MultiScalarParam, - MultiSetPointParam, - SnapShotTestInstrument, - setpoint_generator, -) -from qcodes.parameters import ( - ArrayParameter, - MultiParameter, - Parameter, - ParameterWithSetpoints, - ParamRawDataType, -) -from qcodes.validators import Arrays, ComplexNumbers, Numbers, OnOff, Strings -from qcodes.validators import Sequence as ValidatorSequence diff --git a/tests/test_instrument.py b/tests/test_instrument.py index f326110ef4a..204e79c64f9 100644 --- a/tests/test_instrument.py +++ b/tests/test_instrument.py @@ -344,7 +344,7 @@ class GammyInstrument(Instrument): error_msg = ( "Instrument instr is but " - "" ".GammyInstrument'> was requested" ) diff --git a/tests/test_logger.py b/tests/test_logger.py index 27c0e93fbdf..7f5ba0445bb 100644 --- a/tests/test_logger.py +++ b/tests/test_logger.py @@ -16,9 +16,9 @@ from qcodes.instrument_drivers.american_magnetics import AMIModel430, AMIModel4303D from qcodes.instrument_drivers.tektronix import TektronixAWG5208 from qcodes.logger.log_analysis import capture_dataframe -from qcodes.tests.drivers.test_lakeshore import Model_372_Mock +from tests.drivers.test_lakeshore import Model_372_Mock -TEST_LOG_MESSAGE = 'test log message' +TEST_LOG_MESSAGE = "test log message" NUM_PYTEST_LOGGERS = 4 @@ -40,7 +40,6 @@ def cleanup_started_logger() -> Generator[None, None, None]: @pytest.fixture def awg5208(caplog: LogCaptureFixture) -> Generator[TektronixAWG5208, None, None]: - with caplog.at_level(logging.INFO): inst = TektronixAWG5208( "awg_sim", @@ -56,8 +55,6 @@ def awg5208(caplog: LogCaptureFixture) -> Generator[TektronixAWG5208, None, None @pytest.fixture def model372() -> Generator[Model_372_Mock, None, None]: - - inst = Model_372_Mock( "lakeshore_372", "GPIB::3::INSTR", @@ -96,7 +93,7 @@ def AMI430_3D() -> ( ) field_limit = [ lambda x, y, z: x == 0 and y == 0 and z < 3, - lambda x, y, z: np.linalg.norm([x, y, z]) < 2 + lambda x, y, z: np.linalg.norm([x, y, z]) < 2, ] driver = AMIModel4303D("AMI430_3D", mag_x, mag_y, mag_z, field_limit) try: @@ -113,7 +110,7 @@ def test_get_log_file_name() -> None: assert str(os.getpid()) in fp[-1] assert logger.logger.PYTHON_LOG_NAME in fp[-1] assert fp[-2] == logger.logger.LOGGING_DIR - assert fp[-3] == '.qcodes' + assert fp[-3] == ".qcodes" def test_start_logger() -> None: @@ -145,12 +142,12 @@ def test_start_logger_twice() -> None: # there is one or two loggers registered from pytest # depending on the version # and the telemetry logger is always off in the tests - assert len(handlers) == 2+NUM_PYTEST_LOGGERS + assert len(handlers) == 2 + NUM_PYTEST_LOGGERS def test_set_level_without_starting_raises() -> None: with pytest.raises(RuntimeError): - with logger.console_level('DEBUG'): + with logger.console_level("DEBUG"): pass assert len(logging.getLogger().handlers) == NUM_PYTEST_LOGGERS @@ -158,11 +155,10 @@ def test_set_level_without_starting_raises() -> None: def test_handler_level() -> None: with logger.LogCapture(level=logging.INFO) as logs: logging.debug(TEST_LOG_MESSAGE) - assert logs.value == '' + assert logs.value == "" with logger.LogCapture(level=logging.INFO) as logs: - with logger.handler_level(level=logging.DEBUG, - handler=logs.string_handler): + with logger.handler_level(level=logging.DEBUG, handler=logs.string_handler): print(logs.string_handler) logging.debug(TEST_LOG_MESSAGE) assert logs.value.strip() == TEST_LOG_MESSAGE diff --git a/tests/test_station.py b/tests/test_station.py index d77aba48564..4e58e0d2151 100644 --- a/tests/test_station.py +++ b/tests/test_station.py @@ -50,60 +50,61 @@ def treat_validation_warning_as_error(): def test_station() -> None: - bob = DummyInstrument('bob', gates=['one']) + bob = DummyInstrument("bob", gates=["one"]) station = Station(bob) - assert ['bob'] == list(station.components.keys()) - assert bob == station.components['bob'] + assert ["bob"] == list(station.components.keys()) + assert bob == station.components["bob"] assert station == station.default assert station == Station.default def test_station_getitem() -> None: - bob = DummyInstrument('bob', gates=['one']) + bob = DummyInstrument("bob", gates=["one"]) station = Station(bob) - assert bob == station['bob'] + assert bob == station["bob"] - with pytest.raises(KeyError, match='bobby'): - _ = station.components['bobby'] + with pytest.raises(KeyError, match="bobby"): + _ = station.components["bobby"] def test_station_delegated_attributes() -> None: - bob = DummyInstrument('bob', gates=['one']) + bob = DummyInstrument("bob", gates=["one"]) station = Station(bob) assert bob == station.bob - with pytest.raises(AttributeError, match="'Station' object and its " - "delegates have no attribute " - "'bobby'"): + with pytest.raises( + AttributeError, + match="'Station' object and its delegates have no attribute 'bobby'", + ): _ = station.bobby def test_add_component() -> None: - bob = DummyInstrument('bob', gates=['one']) + bob = DummyInstrument("bob", gates=["one"]) station = Station() - station.add_component(bob, 'bob') + station.add_component(bob, "bob") - assert ['bob'] == list(station.components.keys()) - assert bob == station.components['bob'] + assert ["bob"] == list(station.components.keys()) + assert bob == station.components["bob"] def test_add_component_without_specifying_name() -> None: """ Test that station looks for 'name' attribute in the component and uses it """ - bob = DummyInstrument('bob', gates=['one']) - assert hasattr(bob, 'name') - assert 'bob' == bob.name + bob = DummyInstrument("bob", gates=["one"]) + assert hasattr(bob, "name") + assert "bob" == bob.name station = Station() station.add_component(bob) - assert ['bob'] == list(station.components.keys()) - assert bob == station.components['bob'] + assert ["bob"] == list(station.components.keys()) + assert bob == station.components["bob"] def test_add_component_with_no_name() -> None: @@ -111,7 +112,7 @@ def test_add_component_with_no_name() -> None: Test that station comes up with a name for components without 'name' attribute """ - bob = {'name', 'bob'} + bob = {"name", "bob"} station = Station() station.add_component(bob) # type: ignore[arg-type] @@ -126,26 +127,25 @@ def test_add_component_with_no_name() -> None: def test_remove_component() -> None: - bob = DummyInstrument('bob', gates=['one']) + bob = DummyInstrument("bob", gates=["one"]) station = Station() - station.add_component(bob, 'bob') + station.add_component(bob, "bob") - assert ['bob'] == list(station.components.keys()) - assert bob == station.components['bob'] + assert ["bob"] == list(station.components.keys()) + assert bob == station.components["bob"] - bob2 = station.remove_component('bob') + bob2 = station.remove_component("bob") - with pytest.raises(KeyError, match='bob'): - _ = station.components['bob'] + with pytest.raises(KeyError, match="bob"): + _ = station.components["bob"] assert bob == bob2 - with pytest.raises(KeyError, match='Component bobby is not part of the ' - 'station'): - _ = station.remove_component('bobby') + with pytest.raises(KeyError, match="Component bobby is not part of the station"): + _ = station.remove_component("bobby") def test_close_all_registered_instruments() -> None: - names = [f'some_name_{i}' for i in range(10)] + names = [f"some_name_{i}" for i in range(10)] instrs = [Instrument(name=name) for name in names] st = Station(*instrs) for name in names: @@ -159,21 +159,22 @@ def test_snapshot() -> None: station = Station() empty_snapshot = station.snapshot() - assert {'instruments': {}, - 'parameters': {}, - 'components': {}, - 'config': None, - } == empty_snapshot - - instrument = DummyInstrument('instrument', gates=['one']) + assert { + "instruments": {}, + "parameters": {}, + "components": {}, + "config": None, + } == empty_snapshot + + instrument = DummyInstrument("instrument", gates=["one"]) station.add_component(instrument) instrument_snapshot = instrument.snapshot() - parameter = Parameter('parameter', label='Label', unit='m') + parameter = Parameter("parameter", label="Label", unit="m") station.add_component(parameter) parameter_snapshot = parameter.snapshot() - excluded_parameter = Parameter('excluded_parameter', snapshot_exclude=True) + excluded_parameter = Parameter("excluded_parameter", snapshot_exclude=True) station.add_component(excluded_parameter) component = DummyComponent("component") @@ -184,21 +185,22 @@ def test_snapshot() -> None: snapshot = station.snapshot() assert isinstance(snapshot, dict) - assert ['instruments', - 'parameters', - 'components', - 'config', - ] == list(snapshot.keys()) + assert [ + "instruments", + "parameters", + "components", + "config", + ] == list(snapshot.keys()) - assert ['instrument'] == list(snapshot['instruments'].keys()) - assert instrument_snapshot == snapshot['instruments']['instrument'] + assert ["instrument"] == list(snapshot["instruments"].keys()) + assert instrument_snapshot == snapshot["instruments"]["instrument"] # the list should not contain the excluded parameter - assert ['parameter'] == list(snapshot['parameters'].keys()) - assert parameter_snapshot == snapshot['parameters']['parameter'] + assert ["parameter"] == list(snapshot["parameters"].keys()) + assert parameter_snapshot == snapshot["parameters"]["parameter"] - assert ['component'] == list(snapshot['components'].keys()) - assert component_snapshot == snapshot['components']['component'] + assert ["component"] == list(snapshot["components"].keys()) + assert component_snapshot == snapshot["components"]["component"] def test_station_after_instrument_is_closed() -> None: @@ -209,16 +211,16 @@ def test_station_after_instrument_is_closed() -> None: perform an action on the station to ensure that the closed instrument does not break the work of the station object. """ - bob = DummyInstrument('bob', gates=['one']) + bob = DummyInstrument("bob", gates=["one"]) station = Station(bob) - assert bob == station['bob'] + assert bob == station["bob"] bob.close() # 'bob' is closed, but it is still part of the station - assert bob == station['bob'] + assert bob == station["bob"] # check that snapshot method executes without exceptions snapshot = station.snapshot() @@ -226,34 +228,34 @@ def test_station_after_instrument_is_closed() -> None: # check that 'bob's snapshot is not here (because 'bob' is closed, # hence it was ignored, and even removed from the station by # `snapshot_base` method) - assert {'instruments': {}, - 'parameters': {}, - 'components': {}, - 'config': None, - } == snapshot + assert { + "instruments": {}, + "parameters": {}, + "components": {}, + "config": None, + } == snapshot # check that 'bob' has been removed from the station - with pytest.raises(KeyError, match='bob'): - _ = station.components['bob'] + with pytest.raises(KeyError, match="bob"): + _ = station.components["bob"] # check that 'bob' has been removed from the station, again - with pytest.raises(KeyError, match='Component bob is not part of the ' - 'station'): - station.remove_component('bob') + with pytest.raises(KeyError, match="Component bob is not part of the station"): + station.remove_component("bob") def test_update_config_schema() -> None: update_config_schema() with open(SCHEMA_PATH) as f: schema = json.load(f) - assert len(schema['definitions']['instruments']['enum']) > 1 + assert len(schema["definitions"]["instruments"]["enum"]) > 1 @contextmanager def config_file_context(file_content): with tempfile.TemporaryDirectory() as tmpdirname: - filename = Path(tmpdirname, 'station_config.yaml') - with filename.open('w') as f: + filename = Path(tmpdirname, "station_config.yaml") + with filename.open("w") as f: f.write(file_content) yield str(filename) @@ -261,11 +263,11 @@ def config_file_context(file_content): @contextmanager def config_files_context(file_content1, file_content2): with tempfile.TemporaryDirectory() as tmpdirname: - filename1 = Path(tmpdirname, 'station_config1.yaml') - with filename1.open('w') as f: + filename1 = Path(tmpdirname, "station_config1.yaml") + with filename1.open("w") as f: f.write(file_content1) - filename2 = Path(tmpdirname, 'station_config2.yaml') - with filename2.open('w') as f: + filename2 = Path(tmpdirname, "station_config2.yaml") + with filename2.open("w") as f: f.write(file_content2) yield [str(filename1), str(filename2)] @@ -275,7 +277,7 @@ def _make_example_station_config(): """ Returns path to temp yaml file with station config. """ - sims_path = get_qcodes_path('instrument', 'sims') + sims_path = get_qcodes_path("instrument", "sims") test_config = f""" instruments: lakeshore: @@ -301,15 +303,15 @@ def _make_example_station_config(): def test_dynamic_reload_of_file(example_station_config) -> None: st = Station(config_file=example_station_config) - mock_dac = st.load_instrument('mock_dac') - assert 'ch1' in mock_dac.parameters + mock_dac = st.load_instrument("mock_dac") + assert "ch1" in mock_dac.parameters with open(example_station_config) as f: - filedata = f.read().replace('ch1', 'gate1') - with open(example_station_config, 'w') as f: + filedata = f.read().replace("ch1", "gate1") + with open(example_station_config, "w") as f: f.write(filedata) - mock_dac = st.load_instrument('mock_dac') - assert 'ch1' not in mock_dac.parameters - assert 'gate1' in mock_dac.parameters + mock_dac = st.load_instrument("mock_dac") + assert "ch1" not in mock_dac.parameters + assert "gate1" in mock_dac.parameters def station_from_config_str(config: str) -> Station: @@ -350,7 +352,7 @@ def test_station_config_path_resolution(example_station_config) -> None: # upon station initialization. assert station_config_has_been_loaded(Station()) - config["default_file"] = 'random.yml' + config["default_file"] = "random.yml" config["default_folder"] = str(path.parent) # In this case, the station configuration file specified in the qcodes # config does not exist, hence the initialized station is not expected to @@ -358,7 +360,7 @@ def test_station_config_path_resolution(example_station_config) -> None: assert not station_config_has_been_loaded(Station()) config["default_file"] = str(path) - config["default_folder"] = r'C:\SomeOtherFolder' + config["default_folder"] = r"C:\SomeOtherFolder" # In this case, the default_file setting of the qcodes config contains # absolute path to the station configuration file, while the default_folder # setting is set to some non-existent folder. @@ -434,16 +436,16 @@ def test_simple_mock_load_mock(simple_mock_station) -> None: st = simple_mock_station mock = st.load_mock() assert isinstance(mock, DummyInstrument) - assert mock.name == 'mock' - assert st.components['mock'] is mock + assert mock.name == "mock" + assert st.components["mock"] is mock def test_simple_mock_load_instrument(simple_mock_station) -> None: st = simple_mock_station - mock = st.load_instrument('mock') + mock = st.load_instrument("mock") assert isinstance(mock, DummyInstrument) - assert mock.name == 'mock' - assert st.components['mock'] is mock + assert mock.name == "mock" + assert st.components["mock"] is mock def test_enable_force_reconnect() -> None: @@ -458,40 +460,34 @@ def get_instrument_config(enable_forced_reconnect: Optional[bool]) -> str: gates: {{"ch1", "ch2"}} """ - def assert_on_reconnect(*, use_user_cfg: Optional[bool], - use_instr_cfg: Optional[bool], - expect_failure: bool) -> None: - qcodes.config["station"]\ - ['enable_forced_reconnect'] = use_user_cfg - st = station_from_config_str( - get_instrument_config(use_instr_cfg)) - st.load_instrument('mock') + def assert_on_reconnect( + *, + use_user_cfg: Optional[bool], + use_instr_cfg: Optional[bool], + expect_failure: bool, + ) -> None: + qcodes.config["station"]["enable_forced_reconnect"] = use_user_cfg + st = station_from_config_str(get_instrument_config(use_instr_cfg)) + st.load_instrument("mock") if expect_failure: with pytest.raises(KeyError) as excinfo: - st.load_instrument('mock') - assert ("Another instrument has the name: mock" - in str(excinfo.value)) + st.load_instrument("mock") + assert "Another instrument has the name: mock" in str(excinfo.value) else: - st.load_instrument('mock') + st.load_instrument("mock") Instrument.close_all() for use_user_cfg in [None, True, False]: - assert_on_reconnect(use_user_cfg=use_user_cfg, - use_instr_cfg=False, - expect_failure=True) - assert_on_reconnect(use_user_cfg=use_user_cfg, - use_instr_cfg=True, - expect_failure=False) - - assert_on_reconnect(use_user_cfg=True, - use_instr_cfg=None, - expect_failure=False) - assert_on_reconnect(use_user_cfg=False, - use_instr_cfg=None, - expect_failure=True) - assert_on_reconnect(use_user_cfg=None, - use_instr_cfg=None, - expect_failure=True) + assert_on_reconnect( + use_user_cfg=use_user_cfg, use_instr_cfg=False, expect_failure=True + ) + assert_on_reconnect( + use_user_cfg=use_user_cfg, use_instr_cfg=True, expect_failure=False + ) + + assert_on_reconnect(use_user_cfg=True, use_instr_cfg=None, expect_failure=False) + assert_on_reconnect(use_user_cfg=False, use_instr_cfg=None, expect_failure=True) + assert_on_reconnect(use_user_cfg=None, use_instr_cfg=None, expect_failure=True) def test_revive_instance() -> None: @@ -511,7 +507,7 @@ def test_revive_instance() -> None: assert mock is not st.mock assert mock2 is st.mock - mock3 = st.load_instrument('mock', revive_instance=True) + mock3 = st.load_instrument("mock", revive_instance=True) assert mock3 == mock2 assert mock3 == st.mock @@ -527,19 +523,20 @@ def test_init_parameters() -> None: gates: {"ch1", "ch2"} """ ) - mock = st.load_instrument('mock') + mock = st.load_instrument("mock") for ch in ["ch1", "ch2"]: assert ch in mock.parameters.keys() assert len(mock.parameters) == 3 # there is also IDN # Overwrite parameter - mock = st.load_instrument('mock', gates=["TestGate"]) + mock = st.load_instrument("mock", gates=["TestGate"]) assert "TestGate" in mock.parameters.keys() assert len(mock.parameters) == 2 # there is also IDN # test address - sims_path = get_qcodes_path('instrument', 'sims') - st = station_from_config_str(f""" + sims_path = get_qcodes_path("instrument", "sims") + st = station_from_config_str( + f""" instruments: lakeshore: type: qcodes.instrument_drivers.Lakeshore.Model_336.Model_336 @@ -547,16 +544,17 @@ def test_init_parameters() -> None: address: GPIB::2::INSTR init: visalib: '{sims_path}lakeshore_model336.yaml@sim' - """) - st.load_instrument('lakeshore') + """ + ) + st.load_instrument("lakeshore") def test_name_init_kwarg(simple_mock_station) -> None: # special case of `name` as kwarg st = simple_mock_station - mock = st.load_instrument('mock', name='test') - assert mock.name == 'test' - assert st.components['test'] is mock + mock = st.load_instrument("mock", name="test") + assert mock.name == "test" + assert st.components["test"] is mock def test_name_specified_in_init_in_yaml_is_used() -> None: @@ -570,10 +568,10 @@ def test_name_specified_in_init_in_yaml_is_used() -> None: """ ) - mock = st.load_instrument('mock') + mock = st.load_instrument("mock") assert isinstance(mock, DummyInstrument) - assert mock.name == 'dummy' - assert st.components['dummy'] is mock + assert mock.name == "dummy" + assert st.components["dummy"] is mock class InstrumentWithNameAsNotFirstArgument(Instrument): @@ -587,13 +585,14 @@ def test_able_to_load_instrument_with_name_argument_not_being_the_first() -> Non """ instruments: name_goes_second: - type: qcodes.tests.test_station.InstrumentWithNameAsNotFirstArgument - """) + type: tests.test_station.InstrumentWithNameAsNotFirstArgument + """ + ) - instr = st.load_instrument('name_goes_second', first_arg=42) + instr = st.load_instrument("name_goes_second", first_arg=42) assert isinstance(instr, InstrumentWithNameAsNotFirstArgument) - assert instr.name == 'name_goes_second' - assert st.components['name_goes_second'] is instr + assert instr.name == "name_goes_second" + assert st.components["name_goes_second"] is instr def test_setup_alias_parameters() -> None: @@ -620,12 +619,12 @@ def test_setup_alias_parameters() -> None: mock = st.load_instrument("mock") p = getattr(mock, "gate_a") assert isinstance(p, Parameter) - assert p.unit == 'mV' - assert p.label == 'main gate' + assert p.unit == "mV" + assert p.label == "main gate" assert p.scale == 2 assert p.offset == 1 assert isinstance(p.vals, validators.Numbers) - assert str(p.vals) == '' + assert str(p.vals) == "" assert p() == 9 mock.ch1(1) assert p() == 1 @@ -666,26 +665,27 @@ def test_setup_delegate_parameters() -> None: mock = st.load_instrument("mock") p = getattr(mock, "gate_a") assert isinstance(p, DelegateParameter) - assert p.unit == 'mV' - assert p.label == 'main gate' + assert p.unit == "mV" + assert p.label == "main gate" assert p.scale == 2 assert p.offset == 1 assert isinstance(p.vals, validators.Numbers) - assert str(p.vals) == '' + assert str(p.vals) == "" assert p() == 2 - assert mock.ch1.unit == 'V' - assert mock.ch1.label == 'ch1' + assert mock.ch1.unit == "V" + assert mock.ch1.label == "ch1" assert mock.ch1.scale == 1 assert mock.ch1.offset == 0 assert isinstance(p.vals, validators.Numbers) - assert str(mock.ch1.vals) == '' + assert str(mock.ch1.vals) == "" assert mock.ch1() == 5 mock.ch1(7) assert p() == 3 assert p.raw_value == 7 assert mock.ch1.raw_value == 7 - assert (json.dumps(mock.ch1.snapshot()) == - json.dumps(p.snapshot()['source_parameter'])) + assert json.dumps(mock.ch1.snapshot()) == json.dumps( + p.snapshot()["source_parameter"] + ) def test_channel_instrument() -> None: @@ -729,32 +729,32 @@ def test_setting_channel_parameter() -> None: def test_monitor_not_loaded_by_default(example_station_config) -> None: st = Station(config_file=example_station_config) - st.load_instrument('mock_dac') + st.load_instrument("mock_dac") assert Monitor.running is None def test_monitor_loaded_if_specified(example_station_config) -> None: st = Station(config_file=example_station_config, use_monitor=True) - st.load_instrument('mock_dac') + st.load_instrument("mock_dac") assert Monitor.running is not None assert len(Monitor.running._parameters) == 1 - assert Monitor.running._parameters[0].name == 'ch1' + assert Monitor.running._parameters[0].name == "ch1" Monitor.running.stop() def test_monitor_loaded_by_default_if_in_config(example_station_config) -> None: - qcodes.config["station"]['use_monitor'] = True + qcodes.config["station"]["use_monitor"] = True st = Station(config_file=example_station_config) - st.load_instrument('mock_dac') + st.load_instrument("mock_dac") assert Monitor.running is not None assert len(Monitor.running._parameters) == 1 - assert Monitor.running._parameters[0].name == 'ch1' + assert Monitor.running._parameters[0].name == "ch1" Monitor.running.stop() def test_monitor_not_loaded_if_specified(example_station_config) -> None: st = Station(config_file=example_station_config, use_monitor=False) - st.load_instrument('mock_dac') + st.load_instrument("mock_dac") assert Monitor.running is None @@ -772,7 +772,7 @@ def test_deprecated_driver_keyword() -> None: alternative='the "type"-keyword instead, prepending the driver value to it', ) with pytest.warns(QCoDeSDeprecationWarning, match=message): - st.load_instrument('mock') + st.load_instrument("mock") def test_deprecated_limits_keyword_as_string() -> None: @@ -793,7 +793,7 @@ def test_deprecated_limits_keyword_as_string() -> None: alternative=r'an array like "\[lower_lim, upper_lim\]"', ) with pytest.warns(QCoDeSDeprecationWarning, match=message): - st.load_instrument('mock') + st.load_instrument("mock") def test_config_validation_failure() -> None: @@ -823,18 +823,20 @@ def test_config_validation_failure_on_file() -> None: def test_config_validation_comprehensive_config() -> None: - Station(config_file=os.path.join( - get_qcodes_path(), 'dist', 'tests', 'station', 'example.station.yaml') + Station( + config_file=os.path.join( + get_qcodes_path(), "dist", "tests", "station", "example.station.yaml" + ) ) def test_load_all_instruments_raises_on_both_only_names_and_only_types_passed( - example_station + example_station, ) -> None: with pytest.raises( ValueError, match="It is an error to supply both ``only_names`` " - "and ``only_types`` arguments.", + "and ``only_types`` arguments.", ): example_station.load_all_instruments(only_names=(), only_types=()) @@ -865,8 +867,7 @@ def test_load_all_instruments_only_types(example_station) -> None: assert Instrument.exist(instrument) other_instruments = ( - set(example_station.config["instruments"].keys()) - - all_dummy_instruments + set(example_station.config["instruments"].keys()) - all_dummy_instruments ) for instrument in other_instruments: @@ -888,8 +889,7 @@ def test_load_all_instruments_only_names(example_station) -> None: assert Instrument.exist(instrument) other_instruments = ( - set(example_station.config["instruments"].keys()) - - instruments_to_load + set(example_station.config["instruments"].keys()) - instruments_to_load ) for instrument in other_instruments: @@ -904,7 +904,6 @@ def test_load_all_instruments_without_config_raises() -> None: def test_station_config_created_with_multiple_config_files() -> None: - test_config1 = """ instruments: mock_dac1: @@ -921,9 +920,7 @@ def test_station_config_created_with_multiple_config_files() -> None: mock_dac2: type: qcodes.instrument_drivers.mock_instruments.DummyInstrument """ - with config_files_context( - test_config1, test_config2 - ) as file_list: + with config_files_context(test_config1, test_config2) as file_list: assert station_config_has_been_loaded(Station(config_file=file_list)) From 7e0355ed7a47919ff0593914647fb4747f9cd610 Mon Sep 17 00:00:00 2001 From: "Jens H. Nielsen" Date: Mon, 6 Nov 2023 11:44:29 +0100 Subject: [PATCH 6/9] update config/ci to run new layout --- .github/workflows/pytest.yaml | 4 ++-- pyproject.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pytest.yaml b/.github/workflows/pytest.yaml index 94cb5f9c688..61326c6912e 100644 --- a/.github/workflows/pytest.yaml +++ b/.github/workflows/pytest.yaml @@ -82,11 +82,11 @@ jobs: if: ${{ !matrix.min-version }} - name: Run parallel tests run: | - pytest -m "not serial" --cov=qcodes --cov-report xml --hypothesis-profile ci --durations=20 src/qcodes + pytest -m "not serial" --cov=qcodes --cov-report xml --hypothesis-profile ci --durations=20 tests # a subset of the tests fails when run in parallel on Windows so run those in serial here - name: Run serial tests run: | - pytest -m "serial" -n 0 --dist no --cov=qcodes --cov-report xml --cov-append --hypothesis-profile ci src/qcodes + pytest -m "serial" -n 0 --dist no --cov=qcodes --cov-report xml --cov-append --hypothesis-profile ci tests - name: Upload coverage to Codecov uses: codecov/codecov-action@v3.1.4 with: diff --git a/pyproject.toml b/pyproject.toml index 0cb6d261c23..281ef5b12ee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -205,7 +205,7 @@ stubPath = "typings/stubs" [tool.pytest.ini_options] minversion = "6.0" junit_family = "legacy" - +testpaths = "tests" addopts = "-n auto --dist=loadfile" markers = "serial" From 5e8fb55bf80401f685cdb4d36a982b74d8de23a8 Mon Sep 17 00:00:00 2001 From: "Jens H. Nielsen" Date: Thu, 9 Nov 2023 08:09:16 +0100 Subject: [PATCH 7/9] replace qcodes.test with a deprecated mock that does nothing --- src/qcodes/__init__.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/src/qcodes/__init__.py b/src/qcodes/__init__.py index 692dcc960cc..60ee901ff77 100644 --- a/src/qcodes/__init__.py +++ b/src/qcodes/__init__.py @@ -75,6 +75,7 @@ combine, ) from qcodes.station import Station +from qcodes.utils import deprecate # ensure to close all instruments when interpreter is closed atexit.register(Instrument.close_all) @@ -89,22 +90,18 @@ QCoDeSDeprecationWarning, ) + +@deprecate( + reason="tests are no longer shipped as part of QCoDeS", + alternative="Clone git repo to matching tag and run `pytest tests` from the root of the repo.", +) def test(**kwargs: Any) -> int: """ - Run QCoDeS tests. This requires the test requirements given - in test_requirements.txt to be installed. - All arguments are forwarded to pytest.main + Deprecated """ - try: - import pytest - from hypothesis import settings - settings(deadline=1000) - except ImportError: - print("Need pytest and hypothesis to run tests") - return 1 - args = ['--pyargs', 'qcodes.tests'] - retcode = pytest.main(args, **kwargs) - return retcode + return 0 + +del deprecate test.__test__ = False # type: ignore[attr-defined] # Don't try to run this method as a test From 36f58c798c567c085c8ac35a5bb8803a091cdfec Mon Sep 17 00:00:00 2001 From: "Jens H. Nielsen" Date: Thu, 9 Nov 2023 08:26:59 +0100 Subject: [PATCH 8/9] add newsfragment for 5452 --- docs/changes/newsfragments/5452.breaking | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 docs/changes/newsfragments/5452.breaking diff --git a/docs/changes/newsfragments/5452.breaking b/docs/changes/newsfragments/5452.breaking new file mode 100644 index 00000000000..8222f5a2e24 --- /dev/null +++ b/docs/changes/newsfragments/5452.breaking @@ -0,0 +1,5 @@ +Tests are no longer shipped as part of the qcodes package. The qcodes.tests +namespace still exists but will be deprecated in QCoDeS 0.43.0. +`qcodes.test` is deprecated and will be removed in a future release. +To run the tests against an installed version clone git repo to matching tag and +run `pytest tests` from the root of the repo. From fbcd666c6a5e9cd5459e502094df97bf659d9641 Mon Sep 17 00:00:00 2001 From: "Jens H. Nielsen" Date: Thu, 9 Nov 2023 08:34:19 +0100 Subject: [PATCH 9/9] fix typecheck --- src/qcodes/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qcodes/__init__.py b/src/qcodes/__init__.py index 60ee901ff77..bfe1e6786e0 100644 --- a/src/qcodes/__init__.py +++ b/src/qcodes/__init__.py @@ -104,4 +104,4 @@ def test(**kwargs: Any) -> int: del deprecate -test.__test__ = False # type: ignore[attr-defined] # Don't try to run this method as a test +test.__test__ = False # Don't try to run this method as a test