-
Notifications
You must be signed in to change notification settings - Fork 302
[ADD] Search statistics #332
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Codecov Report
@@ Coverage Diff @@
## development #332 +/- ##
===============================================
- Coverage 82.01% 76.65% -5.37%
===============================================
Files 152 154 +2
Lines 8674 9376 +702
Branches 1325 1441 +116
===============================================
+ Hits 7114 7187 +73
- Misses 1093 1718 +625
- Partials 467 471 +4
Continue to review full report at Codecov.
|
|
All the commits did NOT change your files. |
| @@ -0,0 +1,313 @@ | |||
| import io | |||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Basically, the suggestions in this file and suggestion_base_task.py tell you what fixes we should make.
| @@ -0,0 +1,1452 @@ | |||
| import copy | |||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Please compare using something like diff.
The changes are some deletions.
| assert 'lr_scheduler' in X | ||
| if isinstance(pipeline.named_steps['lr_scheduler'].choice, NoScheduler): | ||
| pytest.skip("This scheduler does not support `get_scheduler`") | ||
| lr_scheduler = pipeline.named_steps['lr_scheduler'].choice.get_scheduler() | ||
| if isinstance(lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): | ||
| pytest.skip("This scheduler is not a child of _LRScheduler") | ||
| assert isinstance(lr_scheduler, _LRScheduler) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is this related to this PR? (just a question, because I am confused)
| def make_dict_run_history_data(data): | ||
| run_history_data = dict() | ||
| for row in data: | ||
| run_key = RunKey( | ||
| config_id=row[0][0], | ||
| instance_id=row[0][1], | ||
| seed=row[0][2], | ||
| budget=row[0][3]) | ||
|
|
||
| run_value = RunValue( | ||
| cost=row[1][0], | ||
| time=row[1][1], | ||
| status=StatusType.SUCCESS if 'SUCCESS' in row[1][2]['__enum__'] else StatusType.RUNNING, | ||
| starttime=row[1][3], | ||
| endtime=row[1][4], | ||
| additional_info=row[1][5], | ||
| ) | ||
| run_history_data[run_key] = run_value | ||
| return run_history_data |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Let me make this test case stronger tomorrow.
| @@ -0,0 +1,89 @@ | |||
| from ConfigSpace.configuration_space import ConfigurationSpace | |||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This part as well, is this related to the PR? (I am confused)
| def test_search_results_sprint_statistics(): | ||
| api = BaseTask() | ||
| run_history_data = json.load(open(os.path.join(os.path.dirname(__file__), | ||
| '.tmp_api/runhistory.json'), | ||
| mode='r'))['data'] | ||
| api.run_history = MagicMock() | ||
| api.run_history.empty = MagicMock(return_value=False) | ||
| api.run_history.data = make_dict_run_history_data(run_history_data) | ||
| api._metric = accuracy | ||
| api.dataset_name = 'iris' | ||
| api._scoring_functions = [accuracy, balanced_accuracy] | ||
| api.search_space = MagicMock(spec=ConfigurationSpace) | ||
| search_results = api.search_results_ | ||
|
|
||
| # assert that contents of search_results are of expected types | ||
| assert isinstance(search_results['mean_opt_scores'], np.ndarray) | ||
| assert search_results['mean_opt_scores'].dtype is np.dtype(np.float) | ||
| assert isinstance(search_results['mean_fit_times'], np.ndarray) | ||
| assert search_results['mean_fit_times'].dtype is np.dtype(np.float) | ||
| assert isinstance(search_results['metric_accuracy'], list) | ||
| assert search_results['metric_accuracy'][0] > 0 | ||
| assert isinstance(search_results['metric_balanced_accuracy'], list) | ||
| assert search_results['metric_balanced_accuracy'][0] > 0 | ||
| assert isinstance(search_results['params'], list) | ||
| assert isinstance(search_results['rank_test_scores'], np.ndarray) | ||
| assert search_results['rank_test_scores'].dtype is np.dtype(np.int) | ||
| assert isinstance(search_results['status'], list) | ||
| assert isinstance(search_results['status'][0], str) | ||
| assert isinstance(search_results['budgets'], list) | ||
| assert isinstance(search_results['budgets'][0], float) | ||
|
|
||
| assert isinstance(api.sprint_statistics(), str) | ||
|
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I will make this test stronger tomorrow.
Addresses #326.