") == 2
# check the difference between the public and private leaderboard
- assert leaderboard_private.count('') > leaderboard_public.count(' ')
- for private_term in ['bag', 'mean', 'std', 'private']:
+ assert leaderboard_private.count(" ") > leaderboard_public.count(" ")
+ for private_term in ["bag", "mean", "std", "private"]:
assert private_term not in leaderboard_public
assert private_term in leaderboard_private
# check the column name in each leaderboard
- assert """ submission ID
+ assert (
+ """submission ID
team
submission
bag public acc
@@ -230,8 +235,11 @@ def test_get_leaderboard(session_toy_db):
validation time [s]
test time [s]
max RAM [MB]
- submitted at (UTC) """ in leaderboard_private
- assert """team
+ submitted at (UTC) """
+ in leaderboard_private
+ )
+ assert (
+ """team
submission
acc
error
@@ -240,21 +248,30 @@ def test_get_leaderboard(session_toy_db):
train time [s]
validation time [s]
max RAM [MB]
- submitted at (UTC) """ in leaderboard_public
- assert """team
+ submitted at (UTC) """
+ in leaderboard_public
+ )
+ assert (
+ """team
submission
submitted at (UTC)
- error """ in leaderboard_failed
+ error """
+ in leaderboard_failed
+ )
# check the same for the competition leaderboard
- assert """rank
+ assert (
+ """rank
team
submission
acc
train time [s]
validation time [s]
- submitted at (UTC) """ in competition_public
- assert """rank
+ submitted at (UTC) """
+ in competition_public
+ )
+ assert (
+ """rank
move
team
submission
@@ -262,24 +279,21 @@ def test_get_leaderboard(session_toy_db):
train time [s]
validation time [s]
test time [s]
- submitted at (UTC) """ in competition_private
+ submitted at (UTC) """
+ in competition_private
+ )
@pytest.mark.parametrize(
- 'event_name, expected_size',
- [('iris_test', 4),
- ('iris_aws_test', 0),
- ('boston_housing_test', 0)]
+ "event_name, expected_size",
+ [("iris_test", 4), ("iris_aws_test", 0), ("boston_housing_test", 0)],
)
-def test_export_leaderboard_to_dataframe(session_toy_db,
- event_name, expected_size):
- """ it will run iris_test if it was not run previously, ie
- test test_get_leaderboard already run """
+def test_export_leaderboard_to_dataframe(session_toy_db, event_name, expected_size):
+ """it will run iris_test if it was not run previously, ie
+ test test_get_leaderboard already run"""
config = read_config(database_config_template())
event_config = read_config(ramp_config_template())
- dispatcher = Dispatcher(
- config, event_config, n_workers=-1, hunger_policy='exit'
- )
+ dispatcher = Dispatcher(config, event_config, n_workers=-1, hunger_policy="exit")
dispatcher.launch()
session_toy_db.commit()
@@ -287,9 +301,11 @@ def test_export_leaderboard_to_dataframe(session_toy_db,
# assert only submissions with the event_name
assert leaderboard.shape[0] == expected_size
- submissions = (session_toy_db.query(Submission)
- .filter(Event.name == event_name)
- .filter(Event.id == EventTeam.event_id)
- .filter(EventTeam.id == Submission.event_team_id)
- .filter(Submission.state == 'scored')).all()
+ submissions = (
+ session_toy_db.query(Submission)
+ .filter(Event.name == event_name)
+ .filter(Event.id == EventTeam.event_id)
+ .filter(EventTeam.id == Submission.event_team_id)
+ .filter(Submission.state == "scored")
+ ).all()
assert len(submissions) == leaderboard.shape[0]
diff --git a/ramp-database/ramp_database/tools/tests/test_query.py b/ramp-database/ramp_database/tools/tests/test_query.py
index 1e230b454..981195266 100644
--- a/ramp-database/ramp_database/tools/tests/test_query.py
+++ b/ramp-database/ramp_database/tools/tests/test_query.py
@@ -15,32 +15,29 @@
select_submission_by_name,
select_submission_by_id,
select_user_by_name,
-
)
def _change_state_db(session):
# change the state of one of the submission in the iris event
submission_id = 1
- sub = (session.query(Submission)
- .filter(Submission.id == submission_id)
- .first())
- sub.set_state('trained')
+ sub = session.query(Submission).filter(Submission.id == submission_id).first()
+ sub.set_state("trained")
session.commit()
-@pytest.fixture(scope='module')
+@pytest.fixture(scope="module")
def session_scope_module(database_connection):
database_config = read_config(database_config_template())
ramp_config = ramp_config_template()
try:
deployment_dir = create_toy_db(database_config, ramp_config)
- with session_scope(database_config['sqlalchemy']) as session:
+ with session_scope(database_config["sqlalchemy"]) as session:
_change_state_db(session)
yield session
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
@@ -60,8 +57,7 @@ def test_select_submissions_by_state(session_scope_module):
def test_select_submissions_by_name(session_scope_module):
session = session_scope_module
- res = select_submission_by_name(session, "iris_test", "test_user_2",
- "starting_kit")
+ res = select_submission_by_name(session, "iris_test", "test_user_2", "starting_kit")
assert isinstance(res, Submission)
res = select_submission_by_name(session, "unknown", "unknown", "unknown")
diff --git a/ramp-database/ramp_database/tools/tests/test_submission.py b/ramp-database/ramp_database/tools/tests/test_submission.py
index 5e472a991..d881d1dc1 100644
--- a/ramp-database/ramp_database/tools/tests/test_submission.py
+++ b/ramp-database/ramp_database/tools/tests/test_submission.py
@@ -76,36 +76,34 @@ def base_db(database_connection):
ramp_config = ramp_config_template()
try:
deployment_dir = create_test_db(database_config, ramp_config)
- with session_scope(database_config['sqlalchemy']) as session:
+ with session_scope(database_config["sqlalchemy"]) as session:
yield session
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
def _change_state_db(session):
# change the state of one of the submission in the iris event
submission_id = 1
- sub = (session.query(Submission)
- .filter(Submission.id == submission_id)
- .first())
- sub.set_state('trained')
+ sub = session.query(Submission).filter(Submission.id == submission_id).first()
+ sub.set_state("trained")
session.commit()
-@pytest.fixture(scope='module')
+@pytest.fixture(scope="module")
def session_scope_module(database_connection):
database_config = read_config(database_config_template())
ramp_config = ramp_config_template()
try:
deployment_dir = create_toy_db(database_config, ramp_config)
- with session_scope(database_config['sqlalchemy']) as session:
+ with session_scope(database_config["sqlalchemy"]) as session:
_change_state_db(session)
yield session
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
@@ -115,7 +113,7 @@ def _setup_sign_up(session):
add_problems(session)
add_events(session)
sign_up_teams_to_events(session)
- return 'iris_test', 'test_user'
+ return "iris_test", "test_user"
def test_add_submission_create_new_submission(base_db):
@@ -126,18 +124,17 @@ def test_add_submission_create_new_submission(base_db):
event_name, username = _setup_sign_up(session)
ramp_config = generate_ramp_config(read_config(config))
- submission_name = 'random_forest_10_10'
+ submission_name = "random_forest_10_10"
path_submission = os.path.join(
- os.path.dirname(ramp_config['ramp_sandbox_dir']), submission_name
+ os.path.dirname(ramp_config["ramp_sandbox_dir"]), submission_name
)
- add_submission(session, event_name, username, submission_name,
- path_submission)
+ add_submission(session, event_name, username, submission_name, path_submission)
all_submissions = get_submissions(session, event_name, None)
# check that the submissions have been copied
for sub_id, _, _ in all_submissions:
sub = get_submission_by_id(session, sub_id)
assert os.path.exists(sub.path)
- assert os.path.exists(os.path.join(sub.path, 'estimator.py'))
+ assert os.path.exists(os.path.join(sub.path, "estimator.py"))
# `sign_up_team` make a submission (sandbox) by user. This submission will
# be the third submission.
@@ -145,14 +142,15 @@ def test_add_submission_create_new_submission(base_db):
# check that the number of submissions for an event was updated
event = session.query(Event).filter(Event.name == event_name).one_or_none()
assert event.n_submissions == 1
- submission = get_submission_by_name(session, event_name, username,
- submission_name)
+ submission = get_submission_by_name(session, event_name, username, submission_name)
assert submission.name == submission_name
submission_file = submission.files[0]
- assert submission_file.name == 'estimator'
- assert submission_file.extension == 'py'
- assert (os.path.join('submission_00000000' + str(ID_SUBMISSION),
- 'estimator.py') in submission_file.path)
+ assert submission_file.name == "estimator"
+ assert submission_file.extension == "py"
+ assert (
+ os.path.join("submission_00000000" + str(ID_SUBMISSION), "estimator.py")
+ in submission_file.path
+ )
def test_add_submission_too_early_submission(base_db):
@@ -164,25 +162,30 @@ def test_add_submission_too_early_submission(base_db):
ramp_config = generate_ramp_config(read_config(config))
# check that we have an awaiting time for the event
- event = (session.query(Event)
- .filter(Event.name == event_name)
- .one_or_none())
+ event = session.query(Event).filter(Event.name == event_name).one_or_none()
assert event.min_duration_between_submissions == 900
# make 2 submissions which are too close from each other
- for submission_idx, submission_name in enumerate(['random_forest_10_10',
- 'too_early_submission']):
+ for submission_idx, submission_name in enumerate(
+ ["random_forest_10_10", "too_early_submission"]
+ ):
path_submission = os.path.join(
- os.path.dirname(ramp_config['ramp_sandbox_dir']), submission_name
+ os.path.dirname(ramp_config["ramp_sandbox_dir"]), submission_name
)
if submission_idx == 1:
- err_msg = 'You need to wait'
+ err_msg = "You need to wait"
with pytest.raises(TooEarlySubmissionError, match=err_msg):
- add_submission(session, event_name, username, submission_name,
- path_submission)
+ add_submission(
+ session,
+ event_name,
+ username,
+ submission_name,
+ path_submission,
+ )
else:
- add_submission(session, event_name, username, submission_name,
- path_submission)
+ add_submission(
+ session, event_name, username, submission_name, path_submission
+ )
def test_make_submission_resubmission(base_db):
@@ -195,40 +198,50 @@ def test_make_submission_resubmission(base_db):
# submitting the starting_kit which is used as the default submission for
# the sandbox should raise an error
- err_msg = ('Submission "starting_kit" of team "test_user" at event '
- '"iris_test" exists already')
+ err_msg = (
+ 'Submission "starting_kit" of team "test_user" at event '
+ '"iris_test" exists already'
+ )
with pytest.raises(DuplicateSubmissionError, match=err_msg):
- add_submission(session, event_name, username,
- os.path.basename(ramp_config['ramp_sandbox_dir']),
- ramp_config['ramp_sandbox_dir'])
+ add_submission(
+ session,
+ event_name,
+ username,
+ os.path.basename(ramp_config["ramp_sandbox_dir"]),
+ ramp_config["ramp_sandbox_dir"],
+ )
# submitting twice a normal submission should raise an error as well
- submission_name = 'random_forest_10_10'
+ submission_name = "random_forest_10_10"
path_submission = os.path.join(
- os.path.dirname(ramp_config['ramp_sandbox_dir']), submission_name
+ os.path.dirname(ramp_config["ramp_sandbox_dir"]), submission_name
)
# first submission
- add_submission(session, event_name, username, submission_name,
- path_submission,)
+ add_submission(
+ session,
+ event_name,
+ username,
+ submission_name,
+ path_submission,
+ )
# mock that we scored the submission
- set_submission_state(session, ID_SUBMISSION, 'scored')
+ set_submission_state(session, ID_SUBMISSION, "scored")
# second submission
- err_msg = ('Submission "random_forest_10_10" of team "test_user" at event '
- '"iris_test" exists already')
+ err_msg = (
+ 'Submission "random_forest_10_10" of team "test_user" at event '
+ '"iris_test" exists already'
+ )
with pytest.raises(DuplicateSubmissionError, match=err_msg):
- add_submission(session, event_name, username, submission_name,
- path_submission)
+ add_submission(session, event_name, username, submission_name, path_submission)
# a resubmission can take place if it is tagged as "new" or failed
# mock that the submission failed during the training
- set_submission_state(session, ID_SUBMISSION, 'training_error')
- add_submission(session, event_name, username, submission_name,
- path_submission)
+ set_submission_state(session, ID_SUBMISSION, "training_error")
+ add_submission(session, event_name, username, submission_name, path_submission)
# mock that the submissions are new submissions
- set_submission_state(session, ID_SUBMISSION, 'new')
- add_submission(session, event_name, username, submission_name,
- path_submission)
+ set_submission_state(session, ID_SUBMISSION, "new")
+ add_submission(session, event_name, username, submission_name, path_submission)
def test_add_submission_wrong_submission_files(base_db):
@@ -239,33 +252,30 @@ def test_add_submission_wrong_submission_files(base_db):
event_name, username = _setup_sign_up(session)
ramp_config = generate_ramp_config(read_config(config))
- submission_name = 'corrupted_submission'
+ submission_name = "corrupted_submission"
path_submission = os.path.join(
- os.path.dirname(ramp_config['ramp_sandbox_dir']), submission_name
+ os.path.dirname(ramp_config["ramp_sandbox_dir"]), submission_name
)
os.makedirs(path_submission)
# case that there is not files in the submission
- err_msg = 'No file corresponding to the workflow element'
+ err_msg = "No file corresponding to the workflow element"
with pytest.raises(MissingSubmissionFileError, match=err_msg):
- add_submission(session, event_name, username, submission_name,
- path_submission)
+ add_submission(session, event_name, username, submission_name, path_submission)
# case that there is not file corresponding to the workflow component
- filename = os.path.join(path_submission, 'unknown_file.xxx')
+ filename = os.path.join(path_submission, "unknown_file.xxx")
open(filename, "w+").close()
- err_msg = 'No file corresponding to the workflow element'
+ err_msg = "No file corresponding to the workflow element"
with pytest.raises(MissingSubmissionFileError, match=err_msg):
- add_submission(session, event_name, username, submission_name,
- path_submission)
+ add_submission(session, event_name, username, submission_name, path_submission)
# case that we have the correct filename but not the right extension
- filename = os.path.join(path_submission, 'estimator.xxx')
+ filename = os.path.join(path_submission, "estimator.xxx")
open(filename, "w+").close()
err_msg = 'All extensions "xxx" are unknown for the submission'
with pytest.raises(MissingExtensionError, match=err_msg):
- add_submission(session, event_name, username, submission_name,
- path_submission)
+ add_submission(session, event_name, username, submission_name, path_submission)
def test_submit_starting_kits(base_db):
@@ -274,111 +284,145 @@ def test_submit_starting_kits(base_db):
event_name, username = _setup_sign_up(session)
ramp_config = generate_ramp_config(read_config(config))
- submit_starting_kits(session, event_name, username,
- ramp_config['ramp_kit_submissions_dir'])
+ submit_starting_kits(
+ session, event_name, username, ramp_config["ramp_kit_submissions_dir"]
+ )
submissions = get_submissions(session, event_name, None)
submissions_id = [sub[0] for sub in submissions]
assert len(submissions) == 5
- expected_submission_name = {'starting_kit', 'starting_kit_test',
- 'random_forest_10_10', 'error'}
- submission_name = {get_submission_by_id(session, sub_id).name
- for sub_id in submissions_id}
+ expected_submission_name = {
+ "starting_kit",
+ "starting_kit_test",
+ "random_forest_10_10",
+ "error",
+ }
+ submission_name = {
+ get_submission_by_id(session, sub_id).name for sub_id in submissions_id
+ }
assert submission_name == expected_submission_name
@pytest.mark.parametrize(
"state, expected_id",
- [('new', [2, 7, 8, 9, 10, 11, 12]),
- ('trained', [1]),
- ('tested', []),
- (None, [1, 2, 7, 8, 9, 10, 11, 12])]
+ [
+ ("new", [2, 7, 8, 9, 10, 11, 12]),
+ ("trained", [1]),
+ ("tested", []),
+ (None, [1, 2, 7, 8, 9, 10, 11, 12]),
+ ],
)
def test_get_submissions(session_scope_module, state, expected_id):
- submissions = get_submissions(session_scope_module, 'iris_test',
- state=state)
+ submissions = get_submissions(session_scope_module, "iris_test", state=state)
assert len(submissions) == len(expected_id)
for submission_id, sub_name, sub_path in submissions:
assert submission_id in expected_id
- assert 'submission_{:09d}'.format(submission_id) == sub_name
- path_file = os.path.join('submission_{:09d}'.format(submission_id),
- 'estimator.py')
+ assert "submission_{:09d}".format(submission_id) == sub_name
+ path_file = os.path.join(
+ "submission_{:09d}".format(submission_id), "estimator.py"
+ )
assert path_file in sub_path[0]
def test_get_submission_unknown_state(session_scope_module):
- with pytest.raises(UnknownStateError, match='Unrecognized state'):
- get_submissions(session_scope_module, 'iris_test', state='whatever')
+ with pytest.raises(UnknownStateError, match="Unrecognized state"):
+ get_submissions(session_scope_module, "iris_test", state="whatever")
def test_get_submission_by_id(session_scope_module):
submission = get_submission_by_id(session_scope_module, 1)
assert isinstance(submission, Submission)
- assert submission.basename == 'submission_000000001'
- assert os.path.exists(os.path.join(submission.path, 'estimator.py'))
- assert submission.state == 'trained'
+ assert submission.basename == "submission_000000001"
+ assert os.path.exists(os.path.join(submission.path, "estimator.py"))
+ assert submission.state == "trained"
def test_get_submission_by_name(session_scope_module):
- submission = get_submission_by_name(session_scope_module, 'iris_test',
- 'test_user', 'starting_kit')
+ submission = get_submission_by_name(
+ session_scope_module, "iris_test", "test_user", "starting_kit"
+ )
assert isinstance(submission, Submission)
- assert submission.basename == 'submission_000000001'
- assert os.path.exists(os.path.join(submission.path, 'estimator.py'))
- assert submission.state == 'trained'
+ assert submission.basename == "submission_000000001"
+ assert os.path.exists(os.path.join(submission.path, "estimator.py"))
+ assert submission.state == "trained"
def test_get_event_nb_folds(session_scope_module):
- assert get_event_nb_folds(session_scope_module, 'iris_test') == 2
+ assert get_event_nb_folds(session_scope_module, "iris_test") == 2
-@pytest.mark.parametrize("submission_id, state", [(1, 'trained'), (2, 'new')])
+@pytest.mark.parametrize("submission_id, state", [(1, "trained"), (2, "new")])
def test_get_submission_state(session_scope_module, submission_id, state):
assert get_submission_state(session_scope_module, submission_id) == state
def test_set_submission_state(session_scope_module):
submission_id = 2
- set_submission_state(session_scope_module, submission_id, 'trained')
+ set_submission_state(session_scope_module, submission_id, "trained")
state = get_submission_state(session_scope_module, submission_id)
- assert state == 'trained'
+ assert state == "trained"
def test_set_submission_state_unknown_state(session_scope_module):
- with pytest.raises(UnknownStateError, match='Unrecognized state'):
- set_submission_state(session_scope_module, 2, 'unknown')
+ with pytest.raises(UnknownStateError, match="Unrecognized state"):
+ set_submission_state(session_scope_module, 2, "unknown")
def test_check_time(session_scope_module):
# check both set_time and get_time function
submission_id = 1
- path_results = os.path.join(HERE, 'data', 'iris_predictions')
+ path_results = os.path.join(HERE, "data", "iris_predictions")
set_time(session_scope_module, submission_id, path_results)
submission_time = get_time(session_scope_module, submission_id)
expected_df = pd.DataFrame(
- {'fold': [0, 1],
- 'train': [0.032130, 0.002414],
- 'valid': [0.000583648681640625, 0.000548362731933594],
- 'test': [0.000515460968017578, 0.000481128692626953]}
- ).set_index('fold')
+ {
+ "fold": [0, 1],
+ "train": [0.032130, 0.002414],
+ "valid": [0.000583648681640625, 0.000548362731933594],
+ "test": [0.000515460968017578, 0.000481128692626953],
+ }
+ ).set_index("fold")
assert_frame_equal(submission_time, expected_df, check_less_precise=True)
def test_check_scores(session_scope_module):
# check both set_scores and get_scores
submission_id = 1
- path_results = os.path.join(HERE, 'data', 'iris_predictions')
+ path_results = os.path.join(HERE, "data", "iris_predictions")
set_scores(session_scope_module, submission_id, path_results)
scores = get_scores(session_scope_module, submission_id)
multi_index = pd.MultiIndex.from_product(
- [[0, 1], ['train', 'valid', 'test']], names=['fold', 'step']
+ [[0, 1], ["train", "valid", "test"]], names=["fold", "step"]
)
expected_df = pd.DataFrame(
- {'acc': [0.604167, 0.583333, 0.733333, 0.604167, 0.583333, 0.733333],
- 'error': [0.395833, 0.416667, 0.266667, 0.395833, 0.416667, 0.266667],
- 'nll': [0.732763, 2.194549, 0.693464, 0.746132, 2.030762, 0.693992],
- 'f1_70': [0.333333, 0.33333, 0.666667, 0.33333, 0.33333, 0.666667]},
- index=multi_index
+ {
+ "acc": [
+ 0.604167,
+ 0.583333,
+ 0.733333,
+ 0.604167,
+ 0.583333,
+ 0.733333,
+ ],
+ "error": [
+ 0.395833,
+ 0.416667,
+ 0.266667,
+ 0.395833,
+ 0.416667,
+ 0.266667,
+ ],
+ "nll": [
+ 0.732763,
+ 2.194549,
+ 0.693464,
+ 0.746132,
+ 2.030762,
+ 0.693992,
+ ],
+ "f1_70": [0.333333, 0.33333, 0.666667, 0.33333, 0.33333, 0.666667],
+ },
+ index=multi_index,
)
assert_frame_equal(scores, expected_df, check_less_precise=True)
@@ -386,21 +430,34 @@ def test_check_scores(session_scope_module):
def test_check_bagged_scores(session_scope_module):
# check both set_bagged_scores and get_bagged_scores
submission_id = 1
- path_results = os.path.join(HERE, 'data', 'iris_predictions')
+ path_results = os.path.join(HERE, "data", "iris_predictions")
set_bagged_scores(session_scope_module, submission_id, path_results)
scores = get_bagged_scores(session_scope_module, submission_id)
- multi_index = pd.MultiIndex(levels=[['test', 'valid'], [0, 1]],
- codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
- names=['step', 'n_bag'])
+ multi_index = pd.MultiIndex(
+ levels=[["test", "valid"], [0, 1]],
+ codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
+ names=["step", "n_bag"],
+ )
expected_df = pd.DataFrame(
- {'acc': [0.70833333333, 0.70833333333, 0.65, 0.6486486486486],
- 'error': [0.29166666667, 0.29166666667, 0.35, 0.35135135135],
- 'nll': [0.80029268745, 0.66183018275, 0.52166532641, 0.58510855181],
- 'f1_70': [0.66666666667, 0.33333333333, 0.33333333333,
- 0.33333333333]},
- index=multi_index
+ {
+ "acc": [0.70833333333, 0.70833333333, 0.65, 0.6486486486486],
+ "error": [0.29166666667, 0.29166666667, 0.35, 0.35135135135],
+ "nll": [
+ 0.80029268745,
+ 0.66183018275,
+ 0.52166532641,
+ 0.58510855181,
+ ],
+ "f1_70": [
+ 0.66666666667,
+ 0.33333333333,
+ 0.33333333333,
+ 0.33333333333,
+ ],
+ },
+ index=multi_index,
)
- expected_df.columns = expected_df.columns.rename('scores')
+ expected_df.columns = expected_df.columns.rename("scores")
assert_frame_equal(scores, expected_df, check_less_precise=True)
@@ -416,9 +473,8 @@ def test_check_submission_max_ram(session_scope_module):
def test_check_submission_error_msg(session_scope_module):
# check both get_submission_error_msg and set_submission_error_msg
submission_id = 1
- expected_err_msg = 'tag submission as failed'
- set_submission_error_msg(session_scope_module, submission_id,
- expected_err_msg)
+ expected_err_msg = "tag submission as failed"
+ set_submission_error_msg(session_scope_module, submission_id, expected_err_msg)
err_msg = get_submission_error_msg(session_scope_module, submission_id)
assert err_msg == expected_err_msg
@@ -436,8 +492,11 @@ def test_get_source_submission(session_scope_module):
event = submission.event_team.event
user = submission.event_team.team.admin
add_user_interaction(
- session_scope_module, user=user, interaction='looking at submission',
- event=event, submission=get_submission_by_id(session_scope_module, 2)
+ session_scope_module,
+ user=user,
+ interaction="looking at submission",
+ event=event,
+ submission=get_submission_by_id(session_scope_module, 2),
)
submissions = get_source_submissions(session_scope_module, submission_id)
assert not submissions
@@ -446,21 +505,26 @@ def test_get_source_submission(session_scope_module):
submission.submission_timestamp += datetime.timedelta(days=1)
submissions = get_source_submissions(session_scope_module, submission_id)
assert submissions
- assert all([sub.event_team.event.name == event.name
- for sub in submissions])
+ assert all([sub.event_team.event.name == event.name for sub in submissions])
def test_add_submission_similarity(session_scope_module):
- user = get_user_by_name(session_scope_module, 'test_user')
+ user = get_user_by_name(session_scope_module, "test_user")
source_submission = get_submission_by_id(session_scope_module, 1)
target_submission = get_submission_by_id(session_scope_module, 2)
- add_submission_similarity(session_scope_module, 'target_credit', user,
- source_submission, target_submission, 0.5,
- datetime.datetime.utcnow())
+ add_submission_similarity(
+ session_scope_module,
+ "target_credit",
+ user,
+ source_submission,
+ target_submission,
+ 0.5,
+ datetime.datetime.utcnow(),
+ )
similarity = session_scope_module.query(SubmissionSimilarity).all()
assert len(similarity) == 1
similarity = similarity[0]
- assert similarity.type == 'target_credit'
+ assert similarity.type == "target_credit"
assert similarity.user == user
assert similarity.source_submission == source_submission
assert similarity.target_submission == target_submission
@@ -473,13 +537,14 @@ def test_compute_contributivity(session_scope_module):
config = ramp_config_template()
ramp_config = generate_ramp_config(read_config(config))
- ramp_kit_dir = ramp_config['ramp_kit_dir']
- ramp_data_dir = ramp_config['ramp_data_dir']
- ramp_submission_dir = ramp_config['ramp_submissions_dir']
- ramp_predictions_dir = ramp_config['ramp_predictions_dir']
+ ramp_kit_dir = ramp_config["ramp_kit_dir"]
+ ramp_data_dir = ramp_config["ramp_data_dir"]
+ ramp_submission_dir = ramp_config["ramp_submissions_dir"]
+ ramp_predictions_dir = ramp_config["ramp_predictions_dir"]
- submission = get_submission_by_name(session_scope_module, 'iris_test',
- 'test_user', 'starting_kit')
+ submission = get_submission_by_name(
+ session_scope_module, "iris_test", "test_user", "starting_kit"
+ )
# for testing blending, we need to train a submission
# ouputting predictions into the submission directory
@@ -488,22 +553,21 @@ def test_compute_contributivity(session_scope_module):
ramp_data_dir=ramp_data_dir,
ramp_submission_dir=ramp_submission_dir,
submission=submission.basename,
- save_output=True)
+ save_output=True,
+ )
# Mark the submission as scored in the DB
- sub = (session.query(Submission)
- .filter(Submission.id == submission.id)
- .first())
- sub.set_state('scored')
+ sub = session.query(Submission).filter(Submission.id == submission.id).first()
+ sub.set_state("scored")
session.commit()
compute_contributivity(
- session, 'iris_test',
- ramp_kit_dir, ramp_data_dir, ramp_predictions_dir)
- submissions = get_submissions(session, 'iris_test', 'scored')
+ session, "iris_test", ramp_kit_dir, ramp_data_dir, ramp_predictions_dir
+ )
+ submissions = get_submissions(session, "iris_test", "scored")
assert len(submissions)
s = get_submission_by_id(session, submissions[0][0])
assert s.contributivity == pytest.approx(1.0)
for s_on_cv_fold in s.on_cv_folds:
s_on_cv_fold.contributivity == pytest.approx(1.0)
- compute_historical_contributivity(session, 'iris_test')
+ compute_historical_contributivity(session, "iris_test")
diff --git a/ramp-database/ramp_database/tools/tests/test_team.py b/ramp-database/ramp_database/tools/tests/test_team.py
index 070c71bd2..4626a628b 100644
--- a/ramp-database/ramp_database/tools/tests/test_team.py
+++ b/ramp-database/ramp_database/tools/tests/test_team.py
@@ -34,19 +34,19 @@ def session_scope_function(database_connection):
ramp_config = ramp_config_template()
try:
deployment_dir = create_test_db(database_config, ramp_config)
- with session_scope(database_config['sqlalchemy']) as session:
+ with session_scope(database_config["sqlalchemy"]) as session:
add_users(session)
add_problems(session)
add_events(session)
yield session
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
def test_ask_sign_up_team(session_scope_function):
- event_name, username = 'iris_test', 'test_user'
+ event_name, username = "iris_test", "test_user"
ask_sign_up_team(session_scope_function, event_name, username)
event_team = session_scope_function.query(EventTeam).all()
@@ -64,7 +64,7 @@ def test_ask_sign_up_team(session_scope_function):
def test_sign_up_team(session_scope_function):
- event_name, username = 'iris_test', 'test_user'
+ event_name, username = "iris_test", "test_user"
sign_up_team(session_scope_function, event_name, username)
event_team = session_scope_function.query(EventTeam).all()
@@ -73,30 +73,28 @@ def test_sign_up_team(session_scope_function):
# when signing up a team, the team is approved and the sandbox is setup:
# the starting kit is submitted without training it.
- assert event_team.last_submission_name == 'starting_kit'
+ assert event_team.last_submission_name == "starting_kit"
assert event_team.approved is True
# check the status of the sandbox submission
submission = session_scope_function.query(Submission).all()
assert len(submission) == 1
submission = submission[0]
- assert submission.name == 'starting_kit'
+ assert submission.name == "starting_kit"
assert submission.event_team == event_team
submission_file = submission.files[0]
- assert submission_file.name == 'estimator'
- assert submission_file.extension == 'py'
- assert (os.path.join('submission_000000001',
- 'estimator.py') in submission_file.path)
+ assert submission_file.name == "estimator"
+ assert submission_file.extension == "py"
+ assert os.path.join("submission_000000001", "estimator.py") in submission_file.path
# check the submission on cv fold
- cv_folds = (session_scope_function.query(SubmissionOnCVFold)
- .all())
+ cv_folds = session_scope_function.query(SubmissionOnCVFold).all()
for fold in cv_folds:
- assert fold.state == 'new'
+ assert fold.state == "new"
assert fold.best is False
assert fold.contributivity == pytest.approx(0)
def test_delete_event_team(session_scope_function):
- event_name, username = 'iris_test', 'test_user'
+ event_name, username = "iris_test", "test_user"
sign_up_team(session_scope_function, event_name, username)
event_team = session_scope_function.query(EventTeam).all()
@@ -107,11 +105,7 @@ def test_delete_event_team(session_scope_function):
assert len(event_team) == 0
# check that the user still exist
- user = (session_scope_function.query(User)
- .filter(User.name == username)
- .all())
+ user = session_scope_function.query(User).filter(User.name == username).all()
assert len(user) == 1
- event = (session_scope_function.query(Event)
- .filter(Event.name == event_name)
- .all())
+ event = session_scope_function.query(Event).filter(Event.name == event_name).all()
assert len(event) == 1
diff --git a/ramp-database/ramp_database/tools/tests/test_user.py b/ramp-database/ramp_database/tools/tests/test_user.py
index 1e28e2acb..5cc572670 100644
--- a/ramp-database/ramp_database/tools/tests/test_user.py
+++ b/ramp-database/ramp_database/tools/tests/test_user.py
@@ -36,24 +36,30 @@ def session_scope_function(database_connection):
ramp_config = ramp_config_template()
try:
deployment_dir = create_test_db(database_config, ramp_config)
- with session_scope(database_config['sqlalchemy']) as session:
+ with session_scope(database_config["sqlalchemy"]) as session:
yield session
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
def test_add_user(session_scope_function):
- name = 'test_user'
- password = 'test'
- lastname = 'Test'
- firstname = 'User'
- email = 'test.user@gmail.com'
- access_level = 'asked'
- add_user(session_scope_function, name=name, password=password,
- lastname=lastname, firstname=firstname, email=email,
- access_level=access_level)
+ name = "test_user"
+ password = "test"
+ lastname = "Test"
+ firstname = "User"
+ email = "test.user@gmail.com"
+ access_level = "asked"
+ add_user(
+ session_scope_function,
+ name=name,
+ password=password,
+ lastname=lastname,
+ firstname=firstname,
+ email=email,
+ access_level=access_level,
+ )
user = get_user_by_name(session_scope_function, name)
assert user.name == name
assert check_password(password, user.hashed_password)
@@ -66,69 +72,98 @@ def test_add_user(session_scope_function):
assert team.name == name
assert team.admin_id == user.id
# check that we get an error if we try to add the same user
- with pytest.raises(NameClashError, match='email is already in use'):
- add_user(session_scope_function, name=name, password=password,
- lastname=lastname, firstname=firstname, email=email,
- access_level=access_level)
+ with pytest.raises(NameClashError, match="email is already in use"):
+ add_user(
+ session_scope_function,
+ name=name,
+ password=password,
+ lastname=lastname,
+ firstname=firstname,
+ email=email,
+ access_level=access_level,
+ )
# check that the checking is case insensitive
- with pytest.raises(NameClashError, match='email is already in use'):
- add_user(session_scope_function, name=name, password=password,
- lastname=lastname, firstname=firstname,
- email=email.capitalize(), access_level=access_level)
+ with pytest.raises(NameClashError, match="email is already in use"):
+ add_user(
+ session_scope_function,
+ name=name,
+ password=password,
+ lastname=lastname,
+ firstname=firstname,
+ email=email.capitalize(),
+ access_level=access_level,
+ )
# add a user email with some capital letters and check that only lower case
# are stored in the database
- name = 'new_user_name'
- email = 'MixCase@mail.com'
- add_user(session_scope_function, name=name, password=password,
- lastname=lastname, firstname=firstname, email=email,
- access_level=access_level)
+ name = "new_user_name"
+ email = "MixCase@mail.com"
+ add_user(
+ session_scope_function,
+ name=name,
+ password=password,
+ lastname=lastname,
+ firstname=firstname,
+ email=email,
+ access_level=access_level,
+ )
user = get_user_by_name(session_scope_function, name)
- assert user.email == 'mixcase@mail.com'
+ assert user.email == "mixcase@mail.com"
def test_delete_user(session_scope_function):
- username = 'test_user'
+ username = "test_user"
add_user(
- session_scope_function, name=username, password='password',
- lastname='lastname', firstname='firstname',
- email='test_user@email.com', access_level='asked')
- user = (session_scope_function.query(User)
- .filter(User.name == username)
- .all())
+ session_scope_function,
+ name=username,
+ password="password",
+ lastname="lastname",
+ firstname="firstname",
+ email="test_user@email.com",
+ access_level="asked",
+ )
+ user = session_scope_function.query(User).filter(User.name == username).all()
assert len(user) == 1
delete_user(session_scope_function, username)
- user = (session_scope_function.query(User)
- .filter(User.name == username)
- .one_or_none())
+ user = (
+ session_scope_function.query(User).filter(User.name == username).one_or_none()
+ )
assert user is None
- team = (session_scope_function.query(Team)
- .filter(Team.name == username)
- .all())
+ team = session_scope_function.query(Team).filter(Team.name == username).all()
assert len(team) == 0
def test_make_user_admin(session_scope_function):
- username = 'test_user'
+ username = "test_user"
user = add_user(
- session_scope_function, name=username, password='password',
- lastname='lastname', firstname='firstname',
- email='test_user@email.com', access_level='asked')
- assert user.access_level == 'asked'
+ session_scope_function,
+ name=username,
+ password="password",
+ lastname="lastname",
+ firstname="firstname",
+ email="test_user@email.com",
+ access_level="asked",
+ )
+ assert user.access_level == "asked"
assert user.is_authenticated is False
make_user_admin(session_scope_function, username)
user = get_user_by_name(session_scope_function, username)
- assert user.access_level == 'admin'
+ assert user.access_level == "admin"
assert user.is_authenticated is True
@pytest.mark.parametrize("access_level", ["asked", "user", "admin"])
def test_set_user_access_level(session_scope_function, access_level):
- username = 'test_user'
+ username = "test_user"
user = add_user(
- session_scope_function, name=username, password='password',
- lastname='lastname', firstname='firstname',
- email='test_user@email.com', access_level='asked')
- assert user.access_level == 'asked'
+ session_scope_function,
+ name=username,
+ password="password",
+ lastname="lastname",
+ firstname="firstname",
+ email="test_user@email.com",
+ access_level="asked",
+ )
+ assert user.access_level == "asked"
assert user.is_authenticated is False
set_user_access_level(session_scope_function, username, access_level)
user = get_user_by_name(session_scope_function, username)
@@ -136,98 +171,153 @@ def test_set_user_access_level(session_scope_function, access_level):
assert user.is_authenticated is True
-@pytest.mark.parametrize(
- "name, query_type", [(None, list), ('test_user', User)]
-)
+@pytest.mark.parametrize("name, query_type", [(None, list), ("test_user", User)])
def test_get_user_by_name(session_scope_function, name, query_type):
- add_user(session_scope_function, name='test_user', password='password',
- lastname='lastname', firstname='firstname',
- email='test_user@email.com', access_level='asked')
- add_user(session_scope_function, name='test_user_2',
- password='password', lastname='lastname',
- firstname='firstname', email='test_user_2@email.com',
- access_level='asked')
+ add_user(
+ session_scope_function,
+ name="test_user",
+ password="password",
+ lastname="lastname",
+ firstname="firstname",
+ email="test_user@email.com",
+ access_level="asked",
+ )
+ add_user(
+ session_scope_function,
+ name="test_user_2",
+ password="password",
+ lastname="lastname",
+ firstname="firstname",
+ email="test_user_2@email.com",
+ access_level="asked",
+ )
user = get_user_by_name(session_scope_function, name)
assert isinstance(user, query_type)
def test_set_user_by_instance(session_scope_function):
- add_user(session_scope_function, name='test_user', password='password',
- lastname='lastname', firstname='firstname',
- email='test_user@email.com', access_level='asked')
- add_user(session_scope_function, name='test_user_2',
- password='password', lastname='lastname',
- firstname='firstname', email='test_user_2@email.com',
- access_level='asked')
- user = get_user_by_name(session_scope_function, 'test_user')
- set_user_by_instance(session_scope_function, user, lastname='a',
- firstname='b', email='c', linkedin_url='d',
- twitter_url='e', facebook_url='f', google_url='g',
- github_url='h', website_url='i', bio='j',
- is_want_news=False)
- user = get_user_by_name(session_scope_function, 'test_user')
- assert user.lastname == 'a'
- assert user.firstname == 'b'
- assert user.email == 'c'
- assert user.linkedin_url == 'd'
- assert user.twitter_url == 'e'
- assert user.facebook_url == 'f'
- assert user.google_url == 'g'
- assert user.github_url == 'h'
- assert user.website_url == 'i'
- assert user.bio == 'j'
+ add_user(
+ session_scope_function,
+ name="test_user",
+ password="password",
+ lastname="lastname",
+ firstname="firstname",
+ email="test_user@email.com",
+ access_level="asked",
+ )
+ add_user(
+ session_scope_function,
+ name="test_user_2",
+ password="password",
+ lastname="lastname",
+ firstname="firstname",
+ email="test_user_2@email.com",
+ access_level="asked",
+ )
+ user = get_user_by_name(session_scope_function, "test_user")
+ set_user_by_instance(
+ session_scope_function,
+ user,
+ lastname="a",
+ firstname="b",
+ email="c",
+ linkedin_url="d",
+ twitter_url="e",
+ facebook_url="f",
+ google_url="g",
+ github_url="h",
+ website_url="i",
+ bio="j",
+ is_want_news=False,
+ )
+ user = get_user_by_name(session_scope_function, "test_user")
+ assert user.lastname == "a"
+ assert user.firstname == "b"
+ assert user.email == "c"
+ assert user.linkedin_url == "d"
+ assert user.twitter_url == "e"
+ assert user.facebook_url == "f"
+ assert user.google_url == "g"
+ assert user.github_url == "h"
+ assert user.website_url == "i"
+ assert user.bio == "j"
assert user.is_want_news is False
-@pytest.mark.parametrize(
- "name, query_type", [(None, list), ('test_user', Team)]
-)
+@pytest.mark.parametrize("name, query_type", [(None, list), ("test_user", Team)])
def test_get_team_by_name(session_scope_function, name, query_type):
- add_user(session_scope_function, name='test_user', password='password',
- lastname='lastname', firstname='firstname',
- email='test_user@email.com', access_level='asked')
- add_user(session_scope_function, name='test_user_2',
- password='password', lastname='lastname',
- firstname='firstname', email='test_user_2@email.com',
- access_level='asked')
+ add_user(
+ session_scope_function,
+ name="test_user",
+ password="password",
+ lastname="lastname",
+ firstname="firstname",
+ email="test_user@email.com",
+ access_level="asked",
+ )
+ add_user(
+ session_scope_function,
+ name="test_user_2",
+ password="password",
+ lastname="lastname",
+ firstname="firstname",
+ email="test_user_2@email.com",
+ access_level="asked",
+ )
team = get_team_by_name(session_scope_function, name)
assert isinstance(team, query_type)
def test_approve_user(session_scope_function):
- add_user(session_scope_function, name='test_user', password='test',
- lastname='Test', firstname='User', email='test.user@gmail.com',
- access_level='asked')
- user = get_user_by_name(session_scope_function, 'test_user')
- assert user.access_level == 'asked'
+ add_user(
+ session_scope_function,
+ name="test_user",
+ password="test",
+ lastname="Test",
+ firstname="User",
+ email="test.user@gmail.com",
+ access_level="asked",
+ )
+ user = get_user_by_name(session_scope_function, "test_user")
+ assert user.access_level == "asked"
assert user.is_authenticated is False
- approve_user(session_scope_function, 'test_user')
- user = get_user_by_name(session_scope_function, 'test_user')
- assert user.access_level == 'user'
+ approve_user(session_scope_function, "test_user")
+ user = get_user_by_name(session_scope_function, "test_user")
+ assert user.access_level == "user"
assert user.is_authenticated is True
@pytest.mark.parametrize(
"output_format, expected_format",
- [('dataframe', pd.DataFrame),
- ('html', str)]
+ [("dataframe", pd.DataFrame), ("html", str)],
)
-def test_check_user_interactions(session_scope_function, output_format,
- expected_format):
- add_user(session_scope_function, name='test_user', password='password',
- lastname='lastname', firstname='firstname',
- email='test_user@email.com', access_level='asked')
- params = {'interaction': 'landing'}
+def test_check_user_interactions(
+ session_scope_function, output_format, expected_format
+):
+ add_user(
+ session_scope_function,
+ name="test_user",
+ password="password",
+ lastname="lastname",
+ firstname="firstname",
+ email="test_user@email.com",
+ access_level="asked",
+ )
+ params = {"interaction": "landing"}
add_user_interaction(session_scope_function, **params)
- params = {'interaction': 'landing',
- 'user': get_user_by_name(session_scope_function, 'test_user')}
+ params = {
+ "interaction": "landing",
+ "user": get_user_by_name(session_scope_function, "test_user"),
+ }
add_user_interaction(session_scope_function, **params)
user_interaction = get_user_interactions_by_name(
- session_scope_function, output_format=output_format)
+ session_scope_function, output_format=output_format
+ )
if isinstance(user_interaction, pd.DataFrame):
assert user_interaction.shape[0] == 2
assert isinstance(user_interaction, expected_format)
user_interaction = get_user_interactions_by_name(
- session_scope_function, name='test_user', output_format=output_format)
+ session_scope_function, name="test_user", output_format=output_format
+ )
if isinstance(user_interaction, pd.DataFrame):
assert user_interaction.shape[0] == 1
diff --git a/ramp-database/ramp_database/tools/user.py b/ramp-database/ramp_database/tools/user.py
index faccfbe84..9ba87fa5b 100644
--- a/ramp-database/ramp_database/tools/user.py
+++ b/ramp-database/ramp_database/tools/user.py
@@ -15,13 +15,27 @@
from ._query import select_user_by_email
from ._query import select_user_by_name
-logger = logging.getLogger('RAMP-DATABASE')
-
-
-def add_user(session, name, password, lastname, firstname, email,
- access_level='user', hidden_notes='', linkedin_url='',
- twitter_url='', facebook_url='', google_url='', github_url='',
- website_url='', bio='', is_want_news=True):
+logger = logging.getLogger("RAMP-DATABASE")
+
+
+def add_user(
+ session,
+ name,
+ password,
+ lastname,
+ firstname,
+ email,
+ access_level="user",
+ hidden_notes="",
+ linkedin_url="",
+ twitter_url="",
+ facebook_url="",
+ google_url="",
+ github_url="",
+ website_url="",
+ bio="",
+ is_want_news=True,
+):
"""Add a new user in the database.
Parameters
@@ -68,13 +82,23 @@ def add_user(session, name, password, lastname, firstname, email,
# String
hashed_password = hash_password(password).decode()
lower_case_email = email.lower()
- user = User(name=name, hashed_password=hashed_password,
- lastname=lastname, firstname=firstname, email=lower_case_email,
- access_level=access_level, hidden_notes=hidden_notes,
- linkedin_url=linkedin_url, twitter_url=twitter_url,
- facebook_url=facebook_url, google_url=google_url,
- github_url=github_url, website_url=website_url, bio=bio,
- is_want_news=is_want_news)
+ user = User(
+ name=name,
+ hashed_password=hashed_password,
+ lastname=lastname,
+ firstname=firstname,
+ email=lower_case_email,
+ access_level=access_level,
+ hidden_notes=hidden_notes,
+ linkedin_url=linkedin_url,
+ twitter_url=twitter_url,
+ facebook_url=facebook_url,
+ google_url=google_url,
+ github_url=github_url,
+ website_url=website_url,
+ bio=bio,
+ is_want_news=is_want_news,
+ )
# Creating default team with the same name as the user
# user is admin of his/her own team
@@ -85,22 +109,22 @@ def add_user(session, name, password, lastname, firstname, email,
session.commit()
except IntegrityError as e:
session.rollback()
- message = ''
+ message = ""
if select_user_by_name(session, name) is not None:
- message += 'username is already in use'
+ message += "username is already in use"
elif select_team_by_name(session, name) is not None:
# We only check for team names if username is not in db
- message += 'username is already in use as a team name'
+ message += "username is already in use as a team name"
if select_user_by_email(session, lower_case_email) is not None:
if message:
- message += ' and '
- message += 'email is already in use'
+ message += " and "
+ message += "email is already in use"
if message:
raise NameClashError(message)
else:
raise e
- logger.info('Creating {}'.format(user))
- logger.info('Creating {}'.format(team))
+ logger.info("Creating {}".format(user))
+ logger.info("Creating {}".format(team))
return user
@@ -130,7 +154,7 @@ def make_user_admin(session, name):
The name of the user.
"""
user = select_user_by_name(session, name)
- user.access_level = 'admin'
+ user.access_level = "admin"
user.is_authenticated = True
session.commit()
@@ -155,9 +179,19 @@ def set_user_access_level(session, name, access_level="user"):
session.commit()
-def add_user_interaction(session, interaction=None, user=None, problem=None,
- event=None, ip=None, note=None, submission=None,
- submission_file=None, diff=None, similarity=None):
+def add_user_interaction(
+ session,
+ interaction=None,
+ user=None,
+ problem=None,
+ event=None,
+ ip=None,
+ note=None,
+ submission=None,
+ submission_file=None,
+ diff=None,
+ similarity=None,
+):
"""Add a user interaction in the database.
Parameters
@@ -188,9 +222,17 @@ def add_user_interaction(session, interaction=None, user=None, problem=None,
The similarity of the submission.
"""
user_interaction = UserInteraction(
- session=session, interaction=interaction, user=user, problem=problem,
- ip=ip, note=note, submission=submission, event=event,
- submission_file=submission_file, diff=diff, similarity=similarity
+ session=session,
+ interaction=interaction,
+ user=user,
+ problem=problem,
+ ip=ip,
+ note=note,
+ submission=submission,
+ event=event,
+ submission_file=submission_file,
+ diff=diff,
+ similarity=similarity,
)
session.add(user_interaction)
session.commit()
@@ -207,8 +249,8 @@ def approve_user(session, name):
The name of the user.
"""
user = select_user_by_name(session, name)
- if user.access_level == 'asked':
- user.access_level = 'user'
+ if user.access_level == "asked":
+ user.access_level = "user"
user.is_authenticated = True
session.commit()
@@ -249,8 +291,7 @@ def get_user_by_name_or_email(session, name):
:class:`ramp_database.model.User`
The queried user.
"""
- return (select_user_by_email(session, name) or
- select_user_by_name(session, name))
+ return select_user_by_email(session, name) or select_user_by_name(session, name)
def get_team_by_name(session, name):
@@ -272,8 +313,7 @@ def get_team_by_name(session, name):
return select_team_by_name(session, name)
-def get_user_interactions_by_name(session, name=None,
- output_format='dataframe'):
+def get_user_interactions_by_name(session, name=None, output_format="dataframe"):
"""Get the user interactions.
Parameters
@@ -296,45 +336,68 @@ def get_user_interactions_by_name(session, name=None,
if name is None:
user_interactions = user_interactions.all()
else:
- user_interactions = \
- (user_interactions.filter(UserInteraction.user_id == User.id)
- .filter(User.name == name)
- .all())
+ user_interactions = (
+ user_interactions.filter(UserInteraction.user_id == User.id)
+ .filter(User.name == name)
+ .all()
+ )
map_columns_attributes = defaultdict(list)
for ui in user_interactions:
- map_columns_attributes['timestamp (UTC)'].append(ui.timestamp)
- map_columns_attributes['IP'].append(ui.ip)
- map_columns_attributes['interaction'].append(ui.interaction)
- map_columns_attributes['user'].append(getattr(ui.user, 'name', None))
- map_columns_attributes['event'].append(getattr(
- getattr(ui.event_team, 'event', None), 'name', None))
- map_columns_attributes['team'].append(getattr(
- getattr(ui.event_team, 'team', None), 'name', None))
- map_columns_attributes['submission_id'].append(ui.submission_id)
- map_columns_attributes['submission'].append(
- getattr(ui.submission, 'name_with_link', None))
- map_columns_attributes['file'].append(
- getattr(ui.submission_file, 'name_with_link', None))
- map_columns_attributes['code similarity'].append(
- ui.submission_file_similarity)
- map_columns_attributes['diff'].append(
- None if ui.submission_file_diff is None
- else 'diff '.format(
- ui.submission_file_diff))
- df = (pd.DataFrame(map_columns_attributes)
- .sort_values('timestamp (UTC)', ascending=False)
- .set_index('timestamp (UTC)'))
- if output_format == 'html':
- return df.to_html(escape=False, index=False, max_cols=None,
- max_rows=None, justify='left')
+ map_columns_attributes["timestamp (UTC)"].append(ui.timestamp)
+ map_columns_attributes["IP"].append(ui.ip)
+ map_columns_attributes["interaction"].append(ui.interaction)
+ map_columns_attributes["user"].append(getattr(ui.user, "name", None))
+ map_columns_attributes["event"].append(
+ getattr(getattr(ui.event_team, "event", None), "name", None)
+ )
+ map_columns_attributes["team"].append(
+ getattr(getattr(ui.event_team, "team", None), "name", None)
+ )
+ map_columns_attributes["submission_id"].append(ui.submission_id)
+ map_columns_attributes["submission"].append(
+ getattr(ui.submission, "name_with_link", None)
+ )
+ map_columns_attributes["file"].append(
+ getattr(ui.submission_file, "name_with_link", None)
+ )
+ map_columns_attributes["code similarity"].append(ui.submission_file_similarity)
+ map_columns_attributes["diff"].append(
+ None
+ if ui.submission_file_diff is None
+ else 'diff '.format(ui.submission_file_diff)
+ )
+ df = (
+ pd.DataFrame(map_columns_attributes)
+ .sort_values("timestamp (UTC)", ascending=False)
+ .set_index("timestamp (UTC)")
+ )
+ if output_format == "html":
+ return df.to_html(
+ escape=False,
+ index=False,
+ max_cols=None,
+ max_rows=None,
+ justify="left",
+ )
return df
-def set_user_by_instance(session, user, lastname, firstname, email,
- linkedin_url='', twitter_url='', facebook_url='',
- google_url='', github_url='', website_url='', bio='',
- is_want_news=True):
+def set_user_by_instance(
+ session,
+ user,
+ lastname,
+ firstname,
+ email,
+ linkedin_url="",
+ twitter_url="",
+ facebook_url="",
+ google_url="",
+ github_url="",
+ website_url="",
+ bio="",
+ is_want_news=True,
+):
"""Set the information of a user.
Parameters
@@ -368,22 +431,35 @@ def set_user_by_instance(session, user, lastname, firstname, email,
"""
logger.info('Update the profile of "{}"'.format(user))
- for field in ('lastname', 'firstname', 'linkedin_url', 'twitter_url',
- 'facebook_url', 'google_url', 'github_url', 'website_url',
- 'bio', 'email', 'is_want_news'):
+ for field in (
+ "lastname",
+ "firstname",
+ "linkedin_url",
+ "twitter_url",
+ "facebook_url",
+ "google_url",
+ "github_url",
+ "website_url",
+ "bio",
+ "email",
+ "is_want_news",
+ ):
local_attr = locals()[field]
- if field == 'email':
+ if field == "email":
local_attr = local_attr.lower()
if getattr(user, field) != local_attr:
- logger.info('Update the "{}" field from {} to {}'
- .format(field, getattr(user, field), local_attr))
+ logger.info(
+ 'Update the "{}" field from {} to {}'.format(
+ field, getattr(user, field), local_attr
+ )
+ )
setattr(user, field, local_attr)
try:
session.commit()
except IntegrityError as e:
session.rollback()
if select_user_by_email(session, user.email) is not None:
- message = 'email is already in use'
+ message = "email is already in use"
logger.error(message)
raise NameClashError(message)
diff --git a/ramp-database/ramp_database/utils.py b/ramp-database/ramp_database/utils.py
index b3c377fa3..7ac3f7863 100644
--- a/ramp-database/ramp_database/utils.py
+++ b/ramp-database/ramp_database/utils.py
@@ -72,7 +72,7 @@ def session_scope(config):
def _encode_string(text):
- return bytes(text, 'utf-8') if isinstance(text, str) else text
+ return bytes(text, "utf-8") if isinstance(text, str) else text
def hash_password(password):
@@ -106,6 +106,4 @@ def check_password(password, hashed_password):
is_same_password : bool
Return True if the two passwords are identical.
"""
- return bcrypt.checkpw(
- _encode_string(password), _encode_string(hashed_password)
- )
+ return bcrypt.checkpw(_encode_string(password), _encode_string(hashed_password))
diff --git a/ramp-database/setup.py b/ramp-database/setup.py
index 377a01f57..e6c07ac7e 100755
--- a/ramp-database/setup.py
+++ b/ramp-database/setup.py
@@ -5,43 +5,53 @@
from setuptools import find_packages, setup
# get __version__ from _version.py
-ver_file = os.path.join('ramp_database', '_version.py')
+ver_file = os.path.join("ramp_database", "_version.py")
with open(ver_file) as f:
exec(f.read())
-DISTNAME = 'ramp-database'
+DISTNAME = "ramp-database"
DESCRIPTION = "Database model used in the RAMP bundle"
-with codecs.open('README.rst', encoding='utf-8-sig') as f:
+with codecs.open("README.rst", encoding="utf-8-sig") as f:
LONG_DESCRIPTION = f.read()
-MAINTAINER = 'A. Boucaud, B. Kegl, G. Lemaitre, J. Van den Bossche'
-MAINTAINER_EMAIL = 'boucaud.alexandre@gmail.com, guillaume.lemaitre@inria.fr'
-URL = 'https://github.com/paris-saclay-cds/ramp-board'
-LICENSE = 'BSD (3-clause)'
-DOWNLOAD_URL = 'https://github.com/paris-saclay-cds/ramp-board'
+MAINTAINER = "A. Boucaud, B. Kegl, G. Lemaitre, J. Van den Bossche"
+MAINTAINER_EMAIL = "boucaud.alexandre@gmail.com, guillaume.lemaitre@inria.fr"
+URL = "https://github.com/paris-saclay-cds/ramp-board"
+LICENSE = "BSD (3-clause)"
+DOWNLOAD_URL = "https://github.com/paris-saclay-cds/ramp-board"
VERSION = __version__ # noqa
-CLASSIFIERS = ['Intended Audience :: Science/Research',
- 'Intended Audience :: Developers',
- 'License :: OSI Approved',
- 'Programming Language :: Python',
- 'Topic :: Software Development',
- 'Topic :: Scientific/Engineering',
- 'Operating System :: Microsoft :: Windows',
- 'Operating System :: POSIX',
- 'Operating System :: Unix',
- 'Operating System :: MacOS',
- 'Programming Language :: Python :: 3.6',
- 'Programming Language :: Python :: 3.7',
- 'Programming Language :: Python :: 3.8']
-INSTALL_REQUIRES = ['bcrypt', 'click', 'gitpython', 'nbconvert', 'numpy',
- 'pandas', 'psycopg2-binary', 'sqlalchemy']
+CLASSIFIERS = [
+ "Intended Audience :: Science/Research",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved",
+ "Programming Language :: Python",
+ "Topic :: Software Development",
+ "Topic :: Scientific/Engineering",
+ "Operating System :: Microsoft :: Windows",
+ "Operating System :: POSIX",
+ "Operating System :: Unix",
+ "Operating System :: MacOS",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+]
+INSTALL_REQUIRES = [
+ "bcrypt",
+ "click",
+ "gitpython",
+ "nbconvert",
+ "numpy",
+ "pandas",
+ "psycopg2-binary",
+ "sqlalchemy",
+]
EXTRAS_REQUIRE = {
- 'tests': ['pytest', 'pytest-cov'],
- 'docs': ['sphinx', 'sphinx_rtd_theme', 'numpydoc']
+ "tests": ["pytest", "pytest-cov"],
+ "docs": ["sphinx", "sphinx_rtd_theme", "numpydoc"],
}
PACKAGE_DATA = {
- 'ramp_database': [
- os.path.join('tests', 'data', 'ramp_config_iris.yml'),
- os.path.join('tests', 'data', 'ramp_config_boston_housing.yml')
+ "ramp_database": [
+ os.path.join("tests", "data", "ramp_config_iris.yml"),
+ os.path.join("tests", "data", "ramp_config_boston_housing.yml"),
]
}
@@ -62,7 +72,5 @@
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
python_requires=">=3.7",
- entry_points={
- 'console_scripts': ['ramp-database = ramp_database.cli:start']
- }
+ entry_points={"console_scripts": ["ramp-database = ramp_database.cli:start"]},
)
diff --git a/ramp-engine/ramp_engine/__init__.py b/ramp-engine/ramp_engine/__init__.py
index d1ae3c100..c9895e78e 100644
--- a/ramp-engine/ramp_engine/__init__.py
+++ b/ramp-engine/ramp_engine/__init__.py
@@ -5,15 +5,17 @@
from ._version import __version__
-available_workers = {'conda': CondaEnvWorker,
- 'aws': AWSWorker,
- 'dask': DaskWorker}
+available_workers = {
+ "conda": CondaEnvWorker,
+ "aws": AWSWorker,
+ "dask": DaskWorker,
+}
__all__ = [
- 'AWSWorker',
- 'CondaEnvWorker',
- 'DaskWorker',
- 'Dispatcher',
- 'available_workers',
- '__version__'
+ "AWSWorker",
+ "CondaEnvWorker",
+ "DaskWorker",
+ "Dispatcher",
+ "available_workers",
+ "__version__",
]
diff --git a/ramp-engine/ramp_engine/_version.py b/ramp-engine/ramp_engine/_version.py
index 531186263..4fc44501a 100644
--- a/ramp-engine/ramp_engine/_version.py
+++ b/ramp-engine/ramp_engine/_version.py
@@ -21,4 +21,4 @@
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
-__version__ = '0.9.0.dev0'
+__version__ = "0.9.0.dev0"
diff --git a/ramp-engine/ramp_engine/aws/api.py b/ramp-engine/ramp_engine/aws/api.py
index 82aec2242..4bcc86077 100644
--- a/ramp-engine/ramp_engine/aws/api.py
+++ b/ramp-engine/ramp_engine/aws/api.py
@@ -13,88 +13,89 @@
__all__ = [
- 'launch_ec2_instances',
- 'terminate_ec2_instance',
- 'list_ec2_instance_ids',
- 'status_of_ec2_instance',
- 'upload_submission',
- 'download_log',
- 'download_predictions',
- 'launch_train',
- 'abort_training',
+ "launch_ec2_instances",
+ "terminate_ec2_instance",
+ "list_ec2_instance_ids",
+ "status_of_ec2_instance",
+ "upload_submission",
+ "download_log",
+ "download_predictions",
+ "launch_train",
+ "abort_training",
]
# we disable the boto3 loggers because they are too verbose
-for k in (logging.Logger.manager.loggerDict.keys()):
- if 'boto' in k:
+for k in logging.Logger.manager.loggerDict.keys():
+ if "boto" in k:
logging.getLogger(k).disabled = True
-logger = logging.getLogger('RAMP-AWS')
+logger = logging.getLogger("RAMP-AWS")
# configuration fields
-AWS_CONFIG_SECTION = 'aws'
-PROFILE_NAME_FIELD = 'profile_name'
-ACCESS_KEY_ID_FIELD = 'access_key_id'
-SECRET_ACCESS_KEY_FIELD = 'secret_access_key'
-REGION_NAME_FIELD = 'region_name'
-AMI_IMAGE_ID_FIELD = 'ami_image_id'
-AMI_IMAGE_NAME_FIELD = 'ami_image_name'
-AMI_USER_NAME_FIELD = 'ami_user_name'
-INSTANCE_TYPE_FIELD = 'instance_type'
-USE_SPOT_INSTANCE_FIELD = 'use_spot_instance'
-KEY_PATH_FIELD = 'key_path'
-KEY_NAME_FIELD = 'key_name'
-SECURITY_GROUP_FIELD = 'security_group'
-REMOTE_RAMP_KIT_FOLDER_FIELD = 'remote_ramp_kit_folder'
-LOCAL_PREDICTIONS_FOLDER_FIELD = 'predictions_dir'
-CHECK_STATUS_INTERVAL_SECS_FIELD = 'check_status_interval_secs'
-CHECK_FINISHED_TRAINING_INTERVAL_SECS_FIELD = (
- 'check_finished_training_interval_secs')
-LOCAL_LOG_FOLDER_FIELD = 'logs_dir'
-TRAIN_LOOP_INTERVAL_SECS_FIELD = 'train_loop_interval_secs'
-MEMORY_PROFILING_FIELD = 'memory_profiling'
+AWS_CONFIG_SECTION = "aws"
+PROFILE_NAME_FIELD = "profile_name"
+ACCESS_KEY_ID_FIELD = "access_key_id"
+SECRET_ACCESS_KEY_FIELD = "secret_access_key"
+REGION_NAME_FIELD = "region_name"
+AMI_IMAGE_ID_FIELD = "ami_image_id"
+AMI_IMAGE_NAME_FIELD = "ami_image_name"
+AMI_USER_NAME_FIELD = "ami_user_name"
+INSTANCE_TYPE_FIELD = "instance_type"
+USE_SPOT_INSTANCE_FIELD = "use_spot_instance"
+KEY_PATH_FIELD = "key_path"
+KEY_NAME_FIELD = "key_name"
+SECURITY_GROUP_FIELD = "security_group"
+REMOTE_RAMP_KIT_FOLDER_FIELD = "remote_ramp_kit_folder"
+LOCAL_PREDICTIONS_FOLDER_FIELD = "predictions_dir"
+CHECK_STATUS_INTERVAL_SECS_FIELD = "check_status_interval_secs"
+CHECK_FINISHED_TRAINING_INTERVAL_SECS_FIELD = "check_finished_training_interval_secs"
+LOCAL_LOG_FOLDER_FIELD = "logs_dir"
+TRAIN_LOOP_INTERVAL_SECS_FIELD = "train_loop_interval_secs"
+MEMORY_PROFILING_FIELD = "memory_profiling"
# how long to wait for connections
WAIT_MINUTES = 2
MAX_TRIES_TO_CONNECT = 1
-HOOKS_SECTION = 'hooks'
-HOOK_START_TRAINING = 'start_training'
-HOOK_SUCCESSFUL_TRAINING = 'successful_training'
-HOOK_FAILED_TRAINING = 'failed_training'
+HOOKS_SECTION = "hooks"
+HOOK_START_TRAINING = "start_training"
+HOOK_SUCCESSFUL_TRAINING = "successful_training"
+HOOK_FAILED_TRAINING = "failed_training"
HOOKS = [
HOOK_START_TRAINING,
HOOK_SUCCESSFUL_TRAINING,
HOOK_FAILED_TRAINING,
]
-ALL_FIELDS = set([
- PROFILE_NAME_FIELD,
- ACCESS_KEY_ID_FIELD,
- SECRET_ACCESS_KEY_FIELD,
- REGION_NAME_FIELD,
- AMI_IMAGE_ID_FIELD,
- AMI_IMAGE_NAME_FIELD,
- AMI_USER_NAME_FIELD,
- INSTANCE_TYPE_FIELD,
- USE_SPOT_INSTANCE_FIELD,
- KEY_PATH_FIELD,
- KEY_NAME_FIELD,
- SECURITY_GROUP_FIELD,
- REMOTE_RAMP_KIT_FOLDER_FIELD,
- LOCAL_PREDICTIONS_FOLDER_FIELD,
- CHECK_STATUS_INTERVAL_SECS_FIELD,
- CHECK_FINISHED_TRAINING_INTERVAL_SECS_FIELD,
- LOCAL_LOG_FOLDER_FIELD,
- TRAIN_LOOP_INTERVAL_SECS_FIELD,
- MEMORY_PROFILING_FIELD,
- HOOKS_SECTION,
-])
+ALL_FIELDS = set(
+ [
+ PROFILE_NAME_FIELD,
+ ACCESS_KEY_ID_FIELD,
+ SECRET_ACCESS_KEY_FIELD,
+ REGION_NAME_FIELD,
+ AMI_IMAGE_ID_FIELD,
+ AMI_IMAGE_NAME_FIELD,
+ AMI_USER_NAME_FIELD,
+ INSTANCE_TYPE_FIELD,
+ USE_SPOT_INSTANCE_FIELD,
+ KEY_PATH_FIELD,
+ KEY_NAME_FIELD,
+ SECURITY_GROUP_FIELD,
+ REMOTE_RAMP_KIT_FOLDER_FIELD,
+ LOCAL_PREDICTIONS_FOLDER_FIELD,
+ CHECK_STATUS_INTERVAL_SECS_FIELD,
+ CHECK_FINISHED_TRAINING_INTERVAL_SECS_FIELD,
+ LOCAL_LOG_FOLDER_FIELD,
+ TRAIN_LOOP_INTERVAL_SECS_FIELD,
+ MEMORY_PROFILING_FIELD,
+ HOOKS_SECTION,
+ ]
+)
REQUIRED_FIELDS = ALL_FIELDS - {HOOKS_SECTION}
# constants
-RAMP_AWS_BACKEND_TAG = 'ramp_aws_backend_instance'
-SUBMISSIONS_FOLDER = 'submissions'
+RAMP_AWS_BACKEND_TAG = "ramp_aws_backend_instance"
+SUBMISSIONS_FOLDER = "submissions"
def _wait_until_train_finished(config, instance_id, submission_name):
@@ -104,15 +105,17 @@ def _wait_until_train_finished(config, instance_id, submission_name):
the screen is still active. If the screen is not active anymore,
then we consider that the training has either finished or failed.
"""
- logger.info('Wait until training of submission "{}" is '
- 'finished on instance "{}"...'.format(submission_name,
- instance_id))
+ logger.info(
+ 'Wait until training of submission "{}" is '
+ 'finished on instance "{}"...'.format(submission_name, instance_id)
+ )
secs = int(config[CHECK_FINISHED_TRAINING_INTERVAL_SECS_FIELD])
while not _training_finished(config, instance_id, submission_name):
time.sleep(secs)
- logger.info('Training of submission "{}" is '
- 'finished on instance "{}".'.format(submission_name,
- instance_id))
+ logger.info(
+ 'Training of submission "{}" is '
+ 'finished on instance "{}".'.format(submission_name, instance_id)
+ )
def launch_ec2_instances(config, nb=1):
@@ -124,9 +127,10 @@ def launch_ec2_instances(config, nb=1):
ami_name = config.get(AMI_IMAGE_NAME_FIELD)
if ami_image_id and ami_name:
raise ValueError(
- 'The fields ami_image_id and ami_image_name cannot be both'
- 'specified at the same time. Please specify either ami_image_id'
- 'or ami_image_name')
+ "The fields ami_image_id and ami_image_name cannot be both"
+ "specified at the same time. Please specify either ami_image_id"
+ "or ami_image_name"
+ )
if ami_name:
try:
ami_image_id = _get_image_id(config, ami_name)
@@ -136,40 +140,42 @@ def launch_ec2_instances(config, nb=1):
key_name = config[KEY_NAME_FIELD]
security_group = config[SECURITY_GROUP_FIELD]
- logger.info('Launching {} new ec2 instance(s)...'.format(nb))
+ logger.info("Launching {} new ec2 instance(s)...".format(nb))
# tag all instances using RAMP_AWS_BACKEND_TAG to be able
# to list all instances later
- tags = [{
- 'ResourceType': 'instance',
- 'Tags': [
- {'Key': RAMP_AWS_BACKEND_TAG, 'Value': '1'},
- ]
- }]
+ tags = [
+ {
+ "ResourceType": "instance",
+ "Tags": [
+ {"Key": RAMP_AWS_BACKEND_TAG, "Value": "1"},
+ ],
+ }
+ ]
sess = _get_boto_session(config)
- client = sess.client('ec2')
- resource = sess.resource('ec2')
+ client = sess.client("ec2")
+ resource = sess.resource("ec2")
switch_to_on_demand = False
if use_spot_instance:
- logger.info('Attempting to use spot instance.')
+ logger.info("Attempting to use spot instance.")
now = datetime.utcnow() + timedelta(seconds=3)
wait_minutes = WAIT_MINUTES
max_tries_to_connect = MAX_TRIES_TO_CONNECT
request_wait = timedelta(minutes=wait_minutes)
n_try = 0
response = None
- while not(response) and (n_try < max_tries_to_connect):
+ while not (response) and (n_try < max_tries_to_connect):
try:
response = client.request_spot_instances(
InstanceCount=nb,
LaunchSpecification={
- 'SecurityGroups': [security_group],
- 'ImageId': ami_image_id,
- 'InstanceType': instance_type,
- 'KeyName': key_name,
+ "SecurityGroups": [security_group],
+ "ImageId": ami_image_id,
+ "InstanceType": instance_type,
+ "KeyName": key_name,
},
- Type='one-time',
+ Type="one-time",
ValidFrom=now,
ValidUntil=(now + request_wait),
)
@@ -178,57 +184,69 @@ def launch_ec2_instances(config, nb=1):
n_try += 1
if n_try < max_tries_to_connect:
# wait before you try again
- logger.warning('Not enough instances available: I am going'
- f' to wait for {wait_minutes} minutes'
- ' before trying again (this was'
- f' {n_try} out of {max_tries_to_connect}'
- ' tries to connect)')
- time.sleep(wait_minutes*60)
+ logger.warning(
+ "Not enough instances available: I am going"
+ f" to wait for {wait_minutes} minutes"
+ " before trying again (this was"
+ f" {n_try} out of {max_tries_to_connect}"
+ " tries to connect)"
+ )
+ time.sleep(wait_minutes * 60)
else:
- logger.error(f'Not enough instances available: {e}')
- return None, 'retry'
+ logger.error(f"Not enough instances available: {e}")
+ return None, "retry"
except Exception as e:
# unknown error
- logger.error(f'AWS worker error: {e}')
+ logger.error(f"AWS worker error: {e}")
return None, e
# Wait until request fulfilled
- waiter = client.get_waiter('spot_instance_request_fulfilled')
- request_id = \
- response['SpotInstanceRequests'][0]['SpotInstanceRequestId']
+ waiter = client.get_waiter("spot_instance_request_fulfilled")
+ request_id = response["SpotInstanceRequests"][0]["SpotInstanceRequestId"]
try:
- waiter.wait(SpotInstanceRequestIds=[request_id, ])
+ waiter.wait(
+ SpotInstanceRequestIds=[
+ request_id,
+ ]
+ )
except botocore.exceptions.WaiterError:
- logger.info('Spot instance request failed due to time out. Using '
- 'on-demand instance instead')
+ logger.info(
+ "Spot instance request failed due to time out. Using "
+ "on-demand instance instead"
+ )
switch_to_on_demand = True
client.cancel_spot_instance_requests(
- SpotInstanceRequestIds=[request_id, ]
+ SpotInstanceRequestIds=[
+ request_id,
+ ]
)
else:
- logger.info('Spot instance request fulfilled.')
+ logger.info("Spot instance request fulfilled.")
# Small wait before getting instance ID
time.sleep(1)
# Get instance ID
response_updated = client.describe_spot_instance_requests(
SpotInstanceRequestIds=[request_id]
)
- instance_id = \
- response_updated['SpotInstanceRequests'][0]['InstanceId']
+ instance_id = response_updated["SpotInstanceRequests"][0]["InstanceId"]
# Create EC2.Instance class
instance = resource.Instance(instance_id)
instance.create_tags(
- Resources=[instance_id, ],
+ Resources=[
+ instance_id,
+ ],
Tags=[
- {
- 'Key': RAMP_AWS_BACKEND_TAG,
- 'Value': '1'
- },
- ])
- instances = [instance, ]
- instance_ids = [instance_id, ]
+ {"Key": RAMP_AWS_BACKEND_TAG, "Value": "1"},
+ ],
+ )
+ instances = [
+ instance,
+ ]
+ instance_ids = [
+ instance_id,
+ ]
if switch_to_on_demand or not use_spot_instance:
- logger.info('Using on-demand instance.')
+ logger.info("Using on-demand instance.")
instances = resource.create_instances(
ImageId=ami_image_id,
MinCount=nb,
@@ -240,7 +258,7 @@ def launch_ec2_instances(config, nb=1):
)
instance_ids = [instance.id for instance in instances]
# Wait until instance is okay
- waiter = client.get_waiter('instance_status_ok')
+ waiter = client.get_waiter("instance_status_ok")
try:
waiter.wait(InstanceIds=instance_ids)
except botocore.exceptions.WaiterError as e:
@@ -251,26 +269,25 @@ def launch_ec2_instances(config, nb=1):
def _get_image_id(config, image_name):
sess = _get_boto_session(config)
- client = sess.client('ec2')
+ client = sess.client("ec2")
# get all the images with the given image_name in the name
- result = client.describe_images(Filters=[
- {
- 'Name': 'name',
- 'Values': [f'{image_name}*'
- ],
- }
- ])
+ result = client.describe_images(
+ Filters=[
+ {
+ "Name": "name",
+ "Values": [f"{image_name}*"],
+ }
+ ]
+ )
- images = result['Images']
+ images = result["Images"]
if len(images) == 0:
- raise ValueError(
- 'No image corresponding to the name "{}"'.format(image_name))
+ raise ValueError('No image corresponding to the name "{}"'.format(image_name))
# get only the newest image if there are more than one
- image = sorted(images, key=lambda x: x['CreationDate'],
- reverse=True)[0]
- return image['ImageId']
+ image = sorted(images, key=lambda x: x["CreationDate"], reverse=True)[0]
+ return image["ImageId"]
def terminate_ec2_instance(config, instance_id):
@@ -287,8 +304,8 @@ def terminate_ec2_instance(config, instance_id):
instance id
"""
sess = _get_boto_session(config)
- resource = sess.resource('ec2')
- logger.info('Killing the instance {}...'.format(instance_id))
+ resource = sess.resource("ec2")
+ logger.info("Killing the instance {}...".format(instance_id))
return resource.instances.filter(InstanceIds=[instance_id]).terminate()
@@ -308,16 +325,15 @@ def list_ec2_instance_ids(config):
list of str
"""
sess = _get_boto_session(config)
- client = sess.client('ec2')
+ client = sess.client("ec2")
instances = client.describe_instances(
Filters=[
- {'Name': 'tag:' + RAMP_AWS_BACKEND_TAG, 'Values': ['1']},
- {'Name': 'instance-state-name', 'Values': ['running']},
+ {"Name": "tag:" + RAMP_AWS_BACKEND_TAG, "Values": ["1"]},
+ {"Name": "instance-state-name", "Values": ["running"]},
]
)
instance_ids = [
- inst['Instances'][0]['InstanceId']
- for inst in instances['Reservations']
+ inst["Instances"][0]["InstanceId"] for inst in instances["Reservations"]
]
return instance_ids
@@ -343,17 +359,17 @@ def status_of_ec2_instance(config, instance_id):
not even ready to give the status.
"""
sess = _get_boto_session(config)
- client = sess.client('ec2')
- responses = client.describe_instance_status(
- InstanceIds=[instance_id])['InstanceStatuses']
+ client = sess.client("ec2")
+ responses = client.describe_instance_status(InstanceIds=[instance_id])[
+ "InstanceStatuses"
+ ]
if len(responses) == 1:
return responses[0]
else:
return None
-def upload_submission(config, instance_id, submission_name,
- submissions_dir):
+def upload_submission(config, instance_id, submission_name, submissions_dir):
"""
Upload a submission on an ec2 instance
@@ -378,9 +394,9 @@ def upload_submission(config, instance_id, submission_name,
out = _upload(config, instance_id, submission_path, dest_folder)
return out
except subprocess.CalledProcessError as e:
- logger.error(f'Unable to connect during log download: {e}')
+ logger.error(f"Unable to connect during log download: {e}")
except Exception as e:
- logger.error(f'Unknown error occured during log download: {e}')
+ logger.error(f"Unknown error occured during log download: {e}")
return 1
@@ -408,10 +424,10 @@ def download_log(config, instance_id, submission_name, folder=None):
"""
ramp_kit_folder = config[REMOTE_RAMP_KIT_FOLDER_FIELD]
source_path = os.path.join(
- ramp_kit_folder, SUBMISSIONS_FOLDER, submission_name, 'log')
+ ramp_kit_folder, SUBMISSIONS_FOLDER, submission_name, "log"
+ )
if folder is None:
- dest_path = os.path.join(
- config[LOCAL_LOG_FOLDER_FIELD], submission_name, 'log')
+ dest_path = os.path.join(config[LOCAL_LOG_FOLDER_FIELD], submission_name, "log")
else:
dest_path = folder
try:
@@ -426,11 +442,11 @@ def download_log(config, instance_id, submission_name, folder=None):
out = _download(config, instance_id, source_path, dest_path)
return out
except Exception as e:
- logger.error(f'Unknown error occured during log download: {e}')
- if n_try == n_tries-1:
- raise(e)
+ logger.error(f"Unknown error occured during log download: {e}")
+ if n_try == n_tries - 1:
+ raise (e)
else:
- logger.error('Trying to download the log once again')
+ logger.error("Trying to download the log once again")
def _get_log_content(config, submission_name):
@@ -444,24 +460,23 @@ def _get_log_content(config, submission_name):
a str with the content of the log file
"""
- path = os.path.join(
- config[LOCAL_LOG_FOLDER_FIELD],
- submission_name,
- 'log')
+ path = os.path.join(config[LOCAL_LOG_FOLDER_FIELD], submission_name, "log")
try:
- content = codecs.open(path, encoding='utf-8').read()
+ content = codecs.open(path, encoding="utf-8").read()
content = _filter_colors(content)
return content
except IOError:
- logger.error('Could not open log file of "{}" when trying to get '
- 'log content'.format(submission_name))
- return ''
+ logger.error(
+ 'Could not open log file of "{}" when trying to get '
+ "log content".format(submission_name)
+ )
+ return ""
def _filter_colors(content):
# filter linux colors from a string
# check (https://pypi.org/project/colored/)
- return re.sub(r'(\x1b\[)([\d]+;[\d]+;)?[\d]+m', '', content)
+ return re.sub(r"(\x1b\[)([\d]+;[\d]+;)?[\d]+m", "", content)
def download_mprof_data(config, instance_id, submission_name, folder=None):
@@ -491,24 +506,22 @@ def download_mprof_data(config, instance_id, submission_name, folder=None):
"""
ramp_kit_folder = config[REMOTE_RAMP_KIT_FOLDER_FIELD]
source_path = os.path.join(
- ramp_kit_folder,
- SUBMISSIONS_FOLDER,
- submission_name,
- 'mprof.dat')
+ ramp_kit_folder, SUBMISSIONS_FOLDER, submission_name, "mprof.dat"
+ )
if folder is None:
- dest_path = os.path.join(
- config[LOCAL_LOG_FOLDER_FIELD], submission_name) + os.sep
+ dest_path = (
+ os.path.join(config[LOCAL_LOG_FOLDER_FIELD], submission_name) + os.sep
+ )
else:
dest_path = folder
return _download(config, instance_id, source_path, dest_path)
def _get_submission_max_ram(config, submission_name):
- dest_path = os.path.join(
- config[LOCAL_LOG_FOLDER_FIELD], submission_name)
- filename = os.path.join(dest_path, 'mprof.dat')
- max_mem = 0.
- for line in codecs.open(filename, encoding='utf-8').readlines()[1:]:
+ dest_path = os.path.join(config[LOCAL_LOG_FOLDER_FIELD], submission_name)
+ filename = os.path.join(dest_path, "mprof.dat")
+ max_mem = 0.0
+ for line in codecs.open(filename, encoding="utf-8").readlines()[1:]:
_, mem, _ = line.split()
max_mem = max(max_mem, float(mem))
return max_mem
@@ -541,11 +554,13 @@ def download_predictions(config, instance_id, submission_name, folder=None):
path of the folder of `training_output` containing the predictions
"""
- source_path = _get_remote_training_output_folder(
- config, instance_id, submission_name) + '/'
+ source_path = (
+ _get_remote_training_output_folder(config, instance_id, submission_name) + "/"
+ )
if folder is None:
dest_path = os.path.join(
- config[LOCAL_PREDICTIONS_FOLDER_FIELD], submission_name)
+ config[LOCAL_PREDICTIONS_FOLDER_FIELD], submission_name
+ )
else:
dest_path = folder
try:
@@ -558,12 +573,13 @@ def download_predictions(config, instance_id, submission_name, folder=None):
_download(config, instance_id, source_path, dest_path)
return dest_path
except Exception as e:
- logger.error('Unknown error occured when downloading prediction'
- f' e: {str(e)}')
- if n_try == n_tries-1:
- raise(e)
+ logger.error(
+ "Unknown error occured when downloading prediction" f" e: {str(e)}"
+ )
+ if n_try == n_tries - 1:
+ raise (e)
else:
- logger.error('Trying to download the prediction once again')
+ logger.error("Trying to download the prediction once again")
def _get_remote_training_output_folder(config, instance_id, submission_name):
@@ -573,8 +589,9 @@ def _get_remote_training_output_folder(config, instance_id, submission_name):
~/ramp-kits/iris/submissions/submission_000001/training_output.
"""
ramp_kit_folder = config[REMOTE_RAMP_KIT_FOLDER_FIELD]
- path = os.path.join(ramp_kit_folder, SUBMISSIONS_FOLDER,
- submission_name, 'training_output')
+ path = os.path.join(
+ ramp_kit_folder, SUBMISSIONS_FOLDER, submission_name, "training_output"
+ )
return path
@@ -596,36 +613,40 @@ def launch_train(config, instance_id, submission_name):
"""
ramp_kit_folder = config[REMOTE_RAMP_KIT_FOLDER_FIELD]
values = {
- 'ramp_kit_folder': ramp_kit_folder,
- 'submission': submission_name,
- 'submission_folder': os.path.join(ramp_kit_folder, SUBMISSIONS_FOLDER,
- submission_name),
- 'log': os.path.join(ramp_kit_folder, SUBMISSIONS_FOLDER,
- submission_name, 'log')
+ "ramp_kit_folder": ramp_kit_folder,
+ "submission": submission_name,
+ "submission_folder": os.path.join(
+ ramp_kit_folder, SUBMISSIONS_FOLDER, submission_name
+ ),
+ "log": os.path.join(
+ ramp_kit_folder, SUBMISSIONS_FOLDER, submission_name, "log"
+ ),
}
# we use python -u so that standard input/output are flushed
# and thus we can retrieve the log file live during training
# without waiting for the process to finish.
# We use an espace character around "$" because it is interpreted
# before being run remotely and leads to an empty string
- run_cmd = (r"python -u \$(which ramp_test_submission) "
- r"--submission {submission} --save-y-preds ")
+ run_cmd = (
+ r"python -u \$(which ramp_test_submission) "
+ r"--submission {submission} --save-y-preds "
+ )
if config.get(MEMORY_PROFILING_FIELD):
run_cmd = (
"mprof run --output={submission_folder}/mprof.dat "
- "--include-children " + run_cmd)
+ "--include-children " + run_cmd
+ )
cmd = (
"screen -dm -S {submission} sh -c '. ~/.profile;"
"cd {ramp_kit_folder};"
"rm -fr {submission_folder}/training_output;"
"rm -f {submission_folder}/log;"
- "rm -f {submission_folder}/mprof.dat;"
- + run_cmd + ">{log} 2>&1'"
+ "rm -f {submission_folder}/mprof.dat;" + run_cmd + ">{log} 2>&1'"
)
cmd = cmd.format(**values)
# tag the ec2 instance with info about submission
_tag_instance_by_submission(config, instance_id, submission_name)
- logger.info('Launch training of {}..'.format(submission_name))
+ logger.info("Launch training of {}..".format(submission_name))
return _run(config, instance_id, cmd)
@@ -644,7 +665,7 @@ def abort_training(config, instance_id, submission_name):
submission_id : int
submission id
"""
- cmd = 'screen -S {} -X quit'.format(submission_name)
+ cmd = "screen -S {} -X quit".format(submission_name)
return _run(config, instance_id, cmd)
@@ -664,7 +685,7 @@ def _upload(config, instance_id, source, dest):
dest : str
remote file or folder
"""
- dest = '{user}@{ip}:' + dest
+ dest = "{user}@{ip}:" + dest
return _rsync(config, instance_id, source, dest)
@@ -685,7 +706,7 @@ def _download(config, instance_id, source, dest):
local file or folder
"""
- source = '{user}@{ip}:' + source
+ source = "{user}@{ip}:" + source
return _rsync(config, instance_id, source, dest)
@@ -713,18 +734,18 @@ def _rsync(config, instance_id, source, dest):
ami_username = config[AMI_USER_NAME_FIELD]
sess = _get_boto_session(config)
- resource = sess.resource('ec2')
+ resource = sess.resource("ec2")
inst = resource.Instance(instance_id)
ip = inst.public_ip_address
- fmt = {'user': ami_username, 'ip': ip}
+ fmt = {"user": ami_username, "ip": ip}
values = {
- 'user': ami_username,
- 'ip': ip,
- 'cmd': "ssh -o 'StrictHostKeyChecking no' -i " + key_path,
- 'source': source.format(**fmt),
- 'dest': dest.format(**fmt),
+ "user": ami_username,
+ "ip": ip,
+ "cmd": "ssh -o 'StrictHostKeyChecking no' -i " + key_path,
+ "source": source.format(**fmt),
+ "dest": dest.format(**fmt),
}
- cmd = "rsync -e \"{cmd}\" -avzP {source} {dest}".format(**values)
+ cmd = 'rsync -e "{cmd}" -avzP {source} {dest}'.format(**values)
logger.debug(cmd)
return subprocess.call(cmd, shell=True)
@@ -762,16 +783,16 @@ def _run(config, instance_id, cmd, return_output=False):
ami_username = config[AMI_USER_NAME_FIELD]
sess = _get_boto_session(config)
- resource = sess.resource('ec2')
+ resource = sess.resource("ec2")
inst = resource.Instance(instance_id)
ip = inst.public_ip_address
values = {
- 'user': ami_username,
- 'ip': ip,
- 'ssh': "ssh -o 'StrictHostKeyChecking no' -i " + key_path,
- 'cmd': cmd,
+ "user": ami_username,
+ "ip": ip,
+ "ssh": "ssh -o 'StrictHostKeyChecking no' -i " + key_path,
+ "cmd": cmd,
}
- cmd = "{ssh} {user}@{ip} \"{cmd}\"".format(**values)
+ cmd = '{ssh} {user}@{ip} "{cmd}"'.format(**values)
logger.debug(cmd)
if return_output:
return subprocess.check_output(cmd, shell=True)
@@ -785,8 +806,8 @@ def _is_ready(config, instance_id):
"""
st = status_of_ec2_instance(config, instance_id)
if st:
- check = st['InstanceStatus']['Details'][0]['Status']
- return check == 'passed'
+ check = st["InstanceStatus"]["Details"][0]["Status"]
+ return check == "passed"
else:
return False
@@ -798,15 +819,13 @@ def _training_finished(config, instance_id, submission_name):
return not _has_screen(config, instance_id, submission_name)
-def _training_successful(config, instance_id, submission_name,
- actual_nb_folds=None):
+def _training_successful(config, instance_id, submission_name, actual_nb_folds=None):
"""
Return True if a finished submission have been trained successfully.
If the folder training_output exists and each fold directory contains
.npz prediction files we consider that the training was successful.
"""
- folder = _get_remote_training_output_folder(
- config, instance_id, submission_name)
+ folder = _get_remote_training_output_folder(config, instance_id, submission_name)
cmd = "ls -l {}|grep fold_|wc -l".format(folder)
nb_folds = int(_run(config, instance_id, cmd, return_output=True))
@@ -857,34 +876,32 @@ def _tag_instance_by_submission(config, instance_id, submission_name):
# config, instance_id, 'team_name', submission.team.name)
# name = _get_submission_label(submission)
# _add_or_update_tag(config, instance_id, 'Name', name)
- _add_or_update_tag(config, instance_id, 'Name', submission_name)
+ _add_or_update_tag(config, instance_id, "Name", submission_name)
def _add_or_update_tag(config, instance_id, key, value):
sess = _get_boto_session(config)
- client = sess.client('ec2')
+ client = sess.client("ec2")
tags = [
- {'Key': key, 'Value': value},
+ {"Key": key, "Value": value},
]
return client.create_tags(Resources=[instance_id], Tags=tags)
def _get_tags(config, instance_id):
sess = _get_boto_session(config)
- client = sess.client('ec2')
- filters = [
- {'Name': 'resource-id', 'Values': [instance_id]}
- ]
+ client = sess.client("ec2")
+ filters = [{"Name": "resource-id", "Values": [instance_id]}]
response = client.describe_tags(Filters=filters)
- for t in response['Tags']:
- t['Key'], t['Value']
- return {t['Key']: t['Value'] for t in response['Tags']}
+ for t in response["Tags"]:
+ t["Key"], t["Value"]
+ return {t["Key"]: t["Value"] for t in response["Tags"]}
def _delete_tag(config, instance_id, key):
sess = _get_boto_session(config)
- client = sess.client('ec2')
- tags = [{'Key': key}]
+ client = sess.client("ec2")
+ tags = [{"Key": key}]
return client.delete_tags(Resources=[instance_id], Tags=tags)
@@ -905,8 +922,11 @@ def _get_boto_session(config):
else:
raise ValueError(
'Please specify either "{}" or both of "{}" and "{}"'.format(
- PROFILE_NAME_FIELD, ACCESS_KEY_ID_FIELD,
- SECRET_ACCESS_KEY_FIELD))
+ PROFILE_NAME_FIELD,
+ ACCESS_KEY_ID_FIELD,
+ SECRET_ACCESS_KEY_FIELD,
+ )
+ )
def validate_config(config):
@@ -915,8 +935,7 @@ def validate_config(config):
raises ValueError if it is not correct.
"""
if AWS_CONFIG_SECTION not in config:
- raise ValueError(
- 'Expects "{}" section in config'.format(AWS_CONFIG_SECTION))
+ raise ValueError('Expects "{}" section in config'.format(AWS_CONFIG_SECTION))
conf = config[AWS_CONFIG_SECTION]
for k in conf.keys():
if k not in ALL_FIELDS:
@@ -930,38 +949,47 @@ def validate_config(config):
}
for k in required_fields_:
if k not in conf:
- raise ValueError(
- 'Required field "{}" missing from config'.format(k))
+ raise ValueError('Required field "{}" missing from config'.format(k))
if AMI_IMAGE_NAME_FIELD in conf and AMI_IMAGE_ID_FIELD in conf:
raise ValueError(
'The fields "{}" and "{}" cannot be both '
- 'specified at the same time. Please specify only '
- 'one of them'.format(AMI_IMAGE_NAME_FIELD, AMI_IMAGE_ID_FIELD))
+ "specified at the same time. Please specify only "
+ "one of them".format(AMI_IMAGE_NAME_FIELD, AMI_IMAGE_ID_FIELD)
+ )
if AMI_IMAGE_NAME_FIELD not in conf and AMI_IMAGE_ID_FIELD not in conf:
raise ValueError(
'Please specify either "{}" or "{}" in config.'.format(
- AMI_IMAGE_NAME_FIELD, AMI_IMAGE_ID_FIELD))
- if (PROFILE_NAME_FIELD in conf
- and (ACCESS_KEY_ID_FIELD in conf
- or SECRET_ACCESS_KEY_FIELD in conf)):
+ AMI_IMAGE_NAME_FIELD, AMI_IMAGE_ID_FIELD
+ )
+ )
+ if PROFILE_NAME_FIELD in conf and (
+ ACCESS_KEY_ID_FIELD in conf or SECRET_ACCESS_KEY_FIELD in conf
+ ):
raise ValueError(
'Please specify either "{}" or both of "{}" and "{}"'.format(
- PROFILE_NAME_FIELD, ACCESS_KEY_ID_FIELD,
- SECRET_ACCESS_KEY_FIELD))
- if (PROFILE_NAME_FIELD not in conf
- and not (ACCESS_KEY_ID_FIELD in conf
- and SECRET_ACCESS_KEY_FIELD in conf)):
- raise ValueError('Please specify both "{}" and "{}"'.format(
- ACCESS_KEY_ID_FIELD, SECRET_ACCESS_KEY_FIELD,
- ))
+ PROFILE_NAME_FIELD,
+ ACCESS_KEY_ID_FIELD,
+ SECRET_ACCESS_KEY_FIELD,
+ )
+ )
+ if PROFILE_NAME_FIELD not in conf and not (
+ ACCESS_KEY_ID_FIELD in conf and SECRET_ACCESS_KEY_FIELD in conf
+ ):
+ raise ValueError(
+ 'Please specify both "{}" and "{}"'.format(
+ ACCESS_KEY_ID_FIELD,
+ SECRET_ACCESS_KEY_FIELD,
+ )
+ )
hooks = conf.get(HOOKS_SECTION)
if hooks:
for hook_name in hooks.keys():
if hook_name not in HOOKS:
- hook_names = ','.join(HOOKS)
+ hook_names = ",".join(HOOKS)
raise ValueError(
- 'Invalid hook name : {}, hooks should be one of '
- 'these : {}'.format(hook_name, hook_names))
+ "Invalid hook name : {}, hooks should be one of "
+ "these : {}".format(hook_name, hook_names)
+ )
def is_spot_terminated(config, instance_id):
@@ -970,26 +998,31 @@ def is_spot_terminated(config, instance_id):
'instance-action' will be present."""
cmd_timeout = 1
n_retry = 9
- cmd = ("curl http://169.254.169.254/latest/meta-data/instance-action"
- f" -m {cmd_timeout} --retry {n_retry}")
+ cmd = (
+ "curl http://169.254.169.254/latest/meta-data/instance-action"
+ f" -m {cmd_timeout} --retry {n_retry}"
+ )
try:
out = _run(config, instance_id, cmd, return_output=True)
- out = out.decode('utf-8')
+ out = out.decode("utf-8")
except subprocess.CalledProcessError:
- logger.error('Unable to run curl: {e}')
+ logger.error("Unable to run curl: {e}")
return False
except Exception as e:
- logger.error('Unhandled exception occurred when checking for'
- f' instance action: {e}')
+ logger.error(
+ "Unhandled exception occurred when checking for" f" instance action: {e}"
+ )
return False
- if out == 'none':
+ if out == "none":
terminated = False
else:
- logger.info(f'An instance-action is present on {instance_id}, '
- 'indicating that this spot instance is marked for '
- 'termination.')
+ logger.info(
+ f"An instance-action is present on {instance_id}, "
+ "indicating that this spot instance is marked for "
+ "termination."
+ )
terminated = True
return terminated
@@ -997,6 +1030,10 @@ def is_spot_terminated(config, instance_id):
def check_instance_status(config, instance_id):
"""Return the status of an instance."""
sess = _get_boto_session(config)
- client = sess.client('ec2')
- response = client.describe_instance_status(InstanceIds=[instance_id, ])
- return response['InstanceStatuses'][0]['InstanceState']['Name']
+ client = sess.client("ec2")
+ response = client.describe_instance_status(
+ InstanceIds=[
+ instance_id,
+ ]
+ )
+ return response["InstanceStatuses"][0]["InstanceState"]["Name"]
diff --git a/ramp-engine/ramp_engine/aws/worker.py b/ramp-engine/ramp_engine/aws/worker.py
index 5c5a9b1e5..8e7af9d07 100644
--- a/ramp-engine/ramp_engine/aws/worker.py
+++ b/ramp-engine/ramp_engine/aws/worker.py
@@ -5,11 +5,11 @@
from . import api as aws
-logger = logging.getLogger('RAMP-AWS')
+logger = logging.getLogger("RAMP-AWS")
-log_file = 'aws_worker.log'
-formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s') # noqa
-fileHandler = logging.FileHandler(log_file, mode='a')
+log_file = "aws_worker.log"
+formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") # noqa
+fileHandler = logging.FileHandler(log_file, mode="a")
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
@@ -47,7 +47,7 @@ class AWSWorker(BaseWorker):
def __init__(self, config, submission):
super().__init__(config, submission)
- self.submissions_path = self.config['submissions_dir']
+ self.submissions_path = self.config["submissions_dir"]
self.instance = None
def setup(self):
@@ -57,36 +57,41 @@ def setup(self):
to the instance.
"""
# sanity check for the configuration variable
- for required_param in ('instance_type', 'access_key_id'):
+ for required_param in ("instance_type", "access_key_id"):
self._check_config_name(self.config, required_param)
- logger.info("Setting up AWSWorker for submission '{}'".format(
- self.submission))
+ logger.info("Setting up AWSWorker for submission '{}'".format(self.submission))
_instances, status = aws.launch_ec2_instances(self.config)
if not _instances:
- if status == 'retry':
+ if status == "retry":
# there was a timeout error, put this submission back in the
# queue and try again later
- logger.warning("Unable to launch instance for submission "
- f"{self.submission}. Adding it back to the "
- "queue and will try again later")
- self.status = 'retry'
+ logger.warning(
+ "Unable to launch instance for submission "
+ f"{self.submission}. Adding it back to the "
+ "queue and will try again later"
+ )
+ self.status = "retry"
else:
- logger.error("Unable to launch instance for submission "
- f"{self.submission}. An error occured: {status}")
- self.status = 'error'
+ logger.error(
+ "Unable to launch instance for submission "
+ f"{self.submission}. An error occured: {status}"
+ )
+ self.status = "error"
return
else:
- logger.info("Instance launched for submission '{}'".format(
- self.submission))
- self.instance, = _instances
+ logger.info("Instance launched for submission '{}'".format(self.submission))
+ (self.instance,) = _instances
for _ in range(5):
# try uploading the submission a few times, as this regularly fails
exit_status = aws.upload_submission(
- self.config, self.instance.id, self.submission,
- self.submissions_path)
+ self.config,
+ self.instance.id,
+ self.submission,
+ self.submissions_path,
+ )
if exit_status == 0:
break
else:
@@ -94,11 +99,12 @@ def setup(self):
if exit_status != 0:
logger.error(
'Cannot upload submission "{}"'
- ', an error occured'.format(self.submission))
- self.status = 'error'
+ ", an error occured".format(self.submission)
+ )
+ self.status = "error"
else:
logger.info("Uploaded submission '{}'".format(self.submission))
- self.status = 'setup'
+ self.status = "setup"
def launch_submission(self):
"""Launch the submission.
@@ -106,37 +112,41 @@ def launch_submission(self):
Basically, this runs ``ramp_test_submission`` inside the
Amazon instance.
"""
- if self.status == 'running':
- raise RuntimeError("Cannot launch submission: one is already "
- "started")
- if self.status == 'error':
+ if self.status == "running":
+ raise RuntimeError("Cannot launch submission: one is already " "started")
+ if self.status == "error":
raise RuntimeError("Cannot launch submission: the setup failed")
try:
exit_status = aws.launch_train(
- self.config, self.instance.id, self.submission)
+ self.config, self.instance.id, self.submission
+ )
except Exception as e:
- logger.error(f'Unknown error occurred: {e}')
+ logger.error(f"Unknown error occurred: {e}")
exit_status = 1
if exit_status != 0:
logger.error(
'Cannot start training of submission "{}"'
- ', an error occured.'.format(self.submission))
- self.status = 'error'
+ ", an error occured.".format(self.submission)
+ )
+ self.status = "error"
else:
- self.status = 'running'
+ self.status = "running"
return exit_status
def _is_submission_finished(self):
try:
return aws._training_finished(
- self.config, self.instance.id, self.submission)
+ self.config, self.instance.id, self.submission
+ )
except subprocess.CalledProcessError as e:
# it is no longer possible to connect to the instance
# possibly it was terminated from outside. restart the submission
- logger.warning("Unable to connect to the instance for submission "
- f"{self.submission}. Adding the submission back to"
- " the queue and will try again later")
+ logger.warning(
+ "Unable to connect to the instance for submission "
+ f"{self.submission}. Adding the submission back to"
+ " the queue and will try again later"
+ )
raise e
def _is_submission_interrupted(self):
@@ -150,45 +160,46 @@ def collect_results(self):
# with dispatcher).
# The event config: 'check_finished_training_interval_secs'
# is used here, but again only when worker used alone.
- if self.status == 'running':
+ if self.status == "running":
aws._wait_until_train_finished(
- self.config, self.instance.id, self.submission)
- self.status = 'finished'
- if self.status != 'finished':
- raise ValueError("Cannot collect results if worker is not"
- "'running' or 'finished'")
+ self.config, self.instance.id, self.submission
+ )
+ self.status = "finished"
+ if self.status != "finished":
+ raise ValueError(
+ "Cannot collect results if worker is not" "'running' or 'finished'"
+ )
logger.info("Collecting submission '{}'".format(self.submission))
exit_status = 0
try:
- _ = aws.download_log(self.config,
- self.instance.id, self.submission)
+ _ = aws.download_log(self.config, self.instance.id, self.submission)
except Exception as e:
- logger.error("Error occurred when downloading the logs"
- f" from the submission: {e}")
+ logger.error(
+ "Error occurred when downloading the logs" f" from the submission: {e}"
+ )
exit_status = 2
error_msg = str(e)
- self.status = 'error'
+ self.status = "error"
if exit_status == 0:
- if aws._training_successful(
- self.config, self.instance.id, self.submission):
+ if aws._training_successful(self.config, self.instance.id, self.submission):
try:
- _ = aws.download_predictions(self.config,
- self.instance.id,
- self.submission)
+ _ = aws.download_predictions(
+ self.config, self.instance.id, self.submission
+ )
except Exception as e:
- logger.error("Downloading the prediction failed with"
- f"error {e}")
- self.status = 'error'
+ logger.error("Downloading the prediction failed with" f"error {e}")
+ self.status = "error"
exit_status, error_msg = 1, str(e)
else:
- self.status = 'collected'
- exit_status, error_msg = 0, ''
+ self.status = "collected"
+ exit_status, error_msg = 0, ""
else:
error_msg = _get_traceback(
- aws._get_log_content(self.config, self.submission))
- self.status = 'collected'
+ aws._get_log_content(self.config, self.submission)
+ )
+ self.status = "collected"
exit_status = 1
logger.info(repr(self))
return exit_status, error_msg
@@ -197,9 +208,7 @@ def teardown(self):
"""Terminate the Amazon instance"""
# Only terminate if instance is running
if self.instance:
- instance_status = aws.check_instance_status(
- self.config, self.instance.id
- )
- if instance_status == 'running':
+ instance_status = aws.check_instance_status(self.config, self.instance.id)
+ if instance_status == "running":
aws.terminate_ec2_instance(self.config, self.instance.id)
super().teardown()
diff --git a/ramp-engine/ramp_engine/base.py b/ramp-engine/ramp_engine/base.py
index 6a6d0efd0..d9ba59ebe 100644
--- a/ramp-engine/ramp_engine/base.py
+++ b/ramp-engine/ramp_engine/base.py
@@ -3,11 +3,11 @@
from datetime import datetime
import subprocess
-logger = logging.getLogger('RAMP-WORKER')
+logger = logging.getLogger("RAMP-WORKER")
log_file = "worker.log"
-formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s') # noqa
-fileHandler = logging.FileHandler(log_file, mode='a')
+formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") # noqa
+fileHandler = logging.FileHandler(log_file, mode="a")
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
@@ -41,27 +41,29 @@ class BaseWorker(metaclass=ABCMeta):
* 'retry': the worker has been interrupted (and will be retried).
* 'killed'
"""
+
def __init__(self, config, submission):
self.config = config
self.submission = submission
- self.status = 'initialized'
+ self.status = "initialized"
def setup(self):
"""Setup the worker with some given setting required before launching
a submission."""
- self.status = 'setup'
+ self.status = "setup"
@staticmethod
def _check_config_name(config, param):
if param not in config.keys():
- raise ValueError("The worker required the parameter '{}' in the "
- "configuration given at instantiation. Only {}"
- "parameters were given."
- .format(param, config.keys()))
+ raise ValueError(
+ "The worker required the parameter '{}' in the "
+ "configuration given at instantiation. Only {}"
+ "parameters were given.".format(param, config.keys())
+ )
def teardown(self):
"""Clean up (i.e., removing path, etc.) before killing the worker."""
- self.status = 'killed'
+ self.status = "killed"
@abstractmethod
def _is_submission_interrupted(self):
@@ -77,17 +79,17 @@ def _is_submission_finished(self):
def status(self):
status = self._status
try:
- if status == 'running':
+ if status == "running":
self._status_running_check_time = datetime.utcnow()
if self._is_submission_interrupted():
- self._status = 'retry'
+ self._status = "retry"
elif self._is_submission_finished():
- self._status = 'finished'
+ self._status = "finished"
except subprocess.CalledProcessError:
# there was a problem while connecting to the worker
# if you are using AWS it might be that an instance was terminated
# from outside. retry the submission
- self._status = 'retry'
+ self._status = "retry"
return self._status
@status.setter
@@ -112,27 +114,32 @@ def time_since_last_status_check(self):
"""
if not hasattr(self, "_status_running_check_time"):
return None
- elapsed_time = ((datetime.utcnow() -
- self._status_running_check_time).total_seconds())
+ elapsed_time = (
+ datetime.utcnow() - self._status_running_check_time
+ ).total_seconds()
return elapsed_time
@abstractmethod
def launch_submission(self):
"""Launch a submission to be trained."""
- self.status = 'running'
+ self.status = "running"
@abstractmethod
def collect_results(self):
"""Collect the results after submission training."""
- if self.status == 'initialized':
- raise ValueError('The worker has not been setup and no submission '
- 'was launched. Call the method setup() and '
- 'launch_submission() before to collect the '
- 'results.')
- elif self.status == 'setup':
- raise ValueError('No submission was launched. Call the method '
- 'launch_submission() and then try again to '
- 'collect the results.')
+ if self.status == "initialized":
+ raise ValueError(
+ "The worker has not been setup and no submission "
+ "was launched. Call the method setup() and "
+ "launch_submission() before to collect the "
+ "results."
+ )
+ elif self.status == "setup":
+ raise ValueError(
+ "No submission was launched. Call the method "
+ "launch_submission() and then try again to "
+ "collect the results."
+ )
def launch(self):
"""Launch a standalone RAMP worker.
@@ -148,10 +155,11 @@ def launch(self):
self.teardown()
def __str__(self):
- msg = ('{worker_name}({submission_name}): status="{status}"'
- .format(worker_name=self.__class__.__name__,
- submission_name=self.submission,
- status=self.status))
+ msg = '{worker_name}({submission_name}): status="{status}"'.format(
+ worker_name=self.__class__.__name__,
+ submission_name=self.submission,
+ status=self.status,
+ )
return msg
def __repr__(self):
@@ -174,13 +182,13 @@ def _get_traceback(content):
"""
if not content:
- return ''
+ return ""
# cut_exception_text = content.rfind('--->')
# was like commented line above in ramp-board
# but there is no ---> in logs when we use
# ramp_test_submission, so we just search for the
# first occurence of 'Traceback'.
- cut_exception_text = content.find('Traceback')
+ cut_exception_text = content.find("Traceback")
if cut_exception_text > 0:
content = content[cut_exception_text:]
return content
diff --git a/ramp-engine/ramp_engine/cli.py b/ramp-engine/ramp_engine/cli.py
index 322790fe1..9c116423b 100644
--- a/ramp-engine/ramp_engine/cli.py
+++ b/ramp-engine/ramp_engine/cli.py
@@ -10,7 +10,7 @@
from ramp_engine import available_workers
-CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
+CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.group(context_settings=CONTEXT_SETTINGS)
@@ -20,12 +20,18 @@ def main():
@main.command()
-@click.option("--config", default='config.yml', show_default=True,
- help='Configuration file in YAML format containing the database '
- 'information.')
-@click.option("--events-dir", show_default=True,
- help='Directory where the event config files are located.')
-@click.option('-v', '--verbose', count=True)
+@click.option(
+ "--config",
+ default="config.yml",
+ show_default=True,
+ help="Configuration file in YAML format containing the database " "information.",
+)
+@click.option(
+ "--events-dir",
+ show_default=True,
+ help="Directory where the event config files are located.",
+)
+@click.option("-v", "--verbose", count=True)
def daemon(config, events_dir, verbose):
"""Launch the RAMP dispatcher.
@@ -38,8 +44,9 @@ def daemon(config, events_dir, verbose):
else:
level = logging.DEBUG
logging.basicConfig(
- format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
- level=level, datefmt='%Y:%m:%d %H:%M:%S'
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
+ level=level,
+ datefmt="%Y:%m:%d %H:%M:%S",
)
daemon = Daemon(config=config, events_dir=events_dir)
@@ -47,13 +54,18 @@ def daemon(config, events_dir, verbose):
@main.command()
-@click.option("--config", default='config.yml', show_default=True,
- help='Configuration file in YAML format containing the database '
- 'information.')
-@click.option("--event-config", show_default=True,
- help='Configuration file in YAML format containing the RAMP '
- 'event information.')
-@click.option('-v', '--verbose', count=True)
+@click.option(
+ "--config",
+ default="config.yml",
+ show_default=True,
+ help="Configuration file in YAML format containing the database " "information.",
+)
+@click.option(
+ "--event-config",
+ show_default=True,
+ help="Configuration file in YAML format containing the RAMP " "event information.",
+)
+@click.option("-v", "--verbose", count=True)
def dispatcher(config, event_config, verbose):
"""Launch the RAMP dispatcher.
@@ -66,36 +78,44 @@ def dispatcher(config, event_config, verbose):
else:
level = logging.DEBUG
logging.basicConfig(
- format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
- level=level, datefmt='%Y:%m:%d %H:%M:%S'
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
+ level=level,
+ datefmt="%Y:%m:%d %H:%M:%S",
)
internal_event_config = read_config(event_config)
- worker_type = available_workers[
- internal_event_config['worker']['worker_type']
- ]
-
- dispatcher_config = (internal_event_config['dispatcher']
- if 'dispatcher' in internal_event_config else {})
- n_workers = dispatcher_config.get('n_workers', -1)
- n_threads = dispatcher_config.get('n_threads', None)
- hunger_policy = dispatcher_config.get('hunger_policy', 'sleep')
- time_between_collection = dispatcher_config.get(
- 'time_between_collection', 1)
+ worker_type = available_workers[internal_event_config["worker"]["worker_type"]]
+
+ dispatcher_config = (
+ internal_event_config["dispatcher"]
+ if "dispatcher" in internal_event_config
+ else {}
+ )
+ n_workers = dispatcher_config.get("n_workers", -1)
+ n_threads = dispatcher_config.get("n_threads", None)
+ hunger_policy = dispatcher_config.get("hunger_policy", "sleep")
+ time_between_collection = dispatcher_config.get("time_between_collection", 1)
disp = Dispatcher(
- config=config, event_config=event_config, worker=worker_type,
- n_workers=n_workers, n_threads=n_threads, hunger_policy=hunger_policy,
- time_between_collection=time_between_collection
+ config=config,
+ event_config=event_config,
+ worker=worker_type,
+ n_workers=n_workers,
+ n_threads=n_threads,
+ hunger_policy=hunger_policy,
+ time_between_collection=time_between_collection,
)
disp.launch()
@main.command()
-@click.option("--event-config", default='config.yml', show_default=True,
- help='Configuration file in YAML format containing the RAMP '
- 'event information.')
-@click.option('--submission', help='The submission name')
-@click.option('-v', '--verbose', is_flag=True)
+@click.option(
+ "--event-config",
+ default="config.yml",
+ show_default=True,
+ help="Configuration file in YAML format containing the RAMP " "event information.",
+)
+@click.option("--submission", help="The submission name")
+@click.option("-v", "--verbose", is_flag=True)
def worker(event_config, submission, verbose):
"""Launch a standalone RAMP worker.
@@ -108,12 +128,13 @@ def worker(event_config, submission, verbose):
else:
level = logging.DEBUG
logging.basicConfig(
- format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
- level=level, datefmt='%Y:%m:%d %H:%M:%S'
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
+ level=level,
+ datefmt="%Y:%m:%d %H:%M:%S",
)
config = read_config(event_config)
worker_params = generate_worker_config(config)
- worker_type = available_workers[worker_params['worker_type']]
+ worker_type = available_workers[worker_params["worker_type"]]
worker = worker_type(worker_params, submission)
worker.launch()
@@ -122,5 +143,5 @@ def start():
main()
-if __name__ == '__main__':
+if __name__ == "__main__":
start()
diff --git a/ramp-engine/ramp_engine/conda.py b/ramp-engine/ramp_engine/conda.py
index 4c103a4b5..904e5d986 100644
--- a/ramp-engine/ramp_engine/conda.py
+++ b/ramp-engine/ramp_engine/conda.py
@@ -10,7 +10,7 @@ def _conda_info_envs() -> Dict:
proc = subprocess.Popen(
["conda", "info", "--envs", "--json"],
stdout=subprocess.PIPE,
- stderr=subprocess.PIPE
+ stderr=subprocess.PIPE,
)
stdout, _ = proc.communicate()
conda_info = json.loads(stdout)
@@ -18,43 +18,57 @@ def _conda_info_envs() -> Dict:
def _get_conda_env_path(conda_info: Dict, env_name: str, worker=None) -> str:
- """Get path for a python executable of a conda env
- """
+ """Get path for a python executable of a conda env"""
import os
- if env_name == 'base':
- return os.path.join(conda_info['envs'][0], 'bin')
+ if env_name == "base":
+ return os.path.join(conda_info["envs"][0], "bin")
else:
- envs_path = conda_info['envs'][1:]
+ envs_path = conda_info["envs"][1:]
if not envs_path:
- worker.status = 'error'
- raise ValueError('Only the conda base environment exist. You '
- 'need to create the "{}" conda environment '
- 'to use it.'.format(env_name))
+ worker.status = "error"
+ raise ValueError(
+ "Only the conda base environment exist. You "
+ 'need to create the "{}" conda environment '
+ "to use it.".format(env_name)
+ )
for env in envs_path:
if env_name == os.path.split(env)[-1]:
- return os.path.join(env, 'bin')
- worker.status = 'error'
- raise ValueError(f'The specified conda environment {env_name} '
- f'does not exist. You need to create it.')
+ return os.path.join(env, "bin")
+ worker.status = "error"
+ raise ValueError(
+ f"The specified conda environment {env_name} "
+ f"does not exist. You need to create it."
+ )
-def _conda_ramp_test_submission(config: Dict, submission: str, cmd_ramp: str,
- log_dir: str, wait: bool = False):
+def _conda_ramp_test_submission(
+ config: Dict,
+ submission: str,
+ cmd_ramp: str,
+ log_dir: str,
+ wait: bool = False,
+):
import os
import subprocess
if not os.path.exists(log_dir):
os.makedirs(log_dir)
- log_file = open(os.path.join(log_dir, 'log'), 'wb+')
+ log_file = open(os.path.join(log_dir, "log"), "wb+")
proc = subprocess.Popen(
- [cmd_ramp,
- '--submission', submission,
- '--ramp-kit-dir', config['kit_dir'],
- '--ramp-data-dir', config['data_dir'],
- '--ramp-submission-dir', config['submissions_dir'],
- '--save-output',
- '--ignore-warning'],
+ [
+ cmd_ramp,
+ "--submission",
+ submission,
+ "--ramp-kit-dir",
+ config["kit_dir"],
+ "--ramp-data-dir",
+ config["data_dir"],
+ "--ramp-submission-dir",
+ config["submissions_dir"],
+ "--save-output",
+ "--ignore-warning",
+ ],
stdout=log_file,
stderr=log_file,
)
diff --git a/ramp-engine/ramp_engine/daemon.py b/ramp-engine/ramp_engine/daemon.py
index 62706fad4..ba90217f7 100644
--- a/ramp-engine/ramp_engine/daemon.py
+++ b/ramp-engine/ramp_engine/daemon.py
@@ -29,14 +29,10 @@ class Daemon:
def __init__(self, config, events_dir):
self.config = config
- self._database_config = read_config(
- config, filter_section="sqlalchemy"
- )
+ self._database_config = read_config(config, filter_section="sqlalchemy")
self.events_dir = os.path.abspath(events_dir)
if not os.path.isdir(self.events_dir):
- raise ValueError(
- "The path {} is not existing.".format(events_dir)
- )
+ raise ValueError("The path {} is not existing.".format(events_dir))
self._proc = deque()
signal.signal(signal.SIGINT, self.kill_dispatcher)
signal.signal(signal.SIGTERM, self.kill_dispatcher)
@@ -45,14 +41,15 @@ def __init__(self, config, events_dir):
def launch_dispatchers(self, session):
events = [e for e in session.query(Event).all() if e.is_open]
for e in events:
- event_config = os.path.join(
- self.events_dir, e.name, "config.yml"
- )
+ event_config = os.path.join(self.events_dir, e.name, "config.yml")
cmd_dispatcher = [
- "ramp-launch", "dispatcher",
- "--config", self.config,
- "--event-config", event_config,
- "--verbose"
+ "ramp-launch",
+ "dispatcher",
+ "--config",
+ self.config,
+ "--event-config",
+ event_config,
+ "--verbose",
]
proc = subprocess.Popen(
cmd_dispatcher,
@@ -60,17 +57,13 @@ def launch_dispatchers(self, session):
stderr=subprocess.PIPE,
)
self._proc.append((e.name, proc))
- logger.info(
- "Launch dispatcher for the event {}".format(e.name)
- )
+ logger.info("Launch dispatcher for the event {}".format(e.name))
def kill_dispatcher(self, signum, frame):
while len(self._proc) != 0:
event, proc = self._proc.pop()
proc.kill()
- logger.info(
- "Kill dispatcher for the event {}".format(event)
- )
+ logger.info("Kill dispatcher for the event {}".format(event))
self._poison_pill = True
def launch(self):
diff --git a/ramp-engine/ramp_engine/dispatcher.py b/ramp-engine/ramp_engine/dispatcher.py
index d78a42ef6..339444a5d 100644
--- a/ramp-engine/ramp_engine/dispatcher.py
+++ b/ramp-engine/ramp_engine/dispatcher.py
@@ -29,11 +29,11 @@
from .local import CondaEnvWorker
-logger = logging.getLogger('RAMP-DISPATCHER')
+logger = logging.getLogger("RAMP-DISPATCHER")
-log_file = 'dispatcher.log'
-formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s') # noqa
-fileHandler = logging.FileHandler(log_file, mode='a')
+log_file = "dispatcher.log"
+formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") # noqa
+fileHandler = logging.FileHandler(log_file, mode="a")
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
@@ -86,12 +86,23 @@ class Dispatcher:
Thus, if the time between checks is too small, the repetitive
SSH requests may be potentially blocked by the cloud provider.
"""
- def __init__(self, config, event_config, worker=None, n_workers=1,
- n_threads=None, hunger_policy=None,
- time_between_collection=1):
+
+ def __init__(
+ self,
+ config,
+ event_config,
+ worker=None,
+ n_workers=1,
+ n_threads=None,
+ hunger_policy=None,
+ time_between_collection=1,
+ ):
self.worker = CondaEnvWorker if worker is None else worker
- self.n_workers = (max(multiprocessing.cpu_count() + 1 + n_workers, 1)
- if n_workers < 0 else n_workers)
+ self.n_workers = (
+ max(multiprocessing.cpu_count() + 1 + n_workers, 1)
+ if n_workers < 0
+ else n_workers
+ )
self.hunger_policy = hunger_policy
self.time_between_collection = time_between_collection
# init the poison pill to kill the dispatcher
@@ -101,14 +112,12 @@ def __init__(self, config, event_config, worker=None, n_workers=1,
self._processing_worker_queue = LifoQueue(maxsize=self.n_workers)
self._processed_submission_queue = Queue()
# split the different configuration required
- if (isinstance(config, str) and
- isinstance(event_config, str)):
- self._database_config = read_config(config,
- filter_section='sqlalchemy')
+ if isinstance(config, str) and isinstance(event_config, str):
+ self._database_config = read_config(config, filter_section="sqlalchemy")
self._ramp_config = generate_ramp_config(event_config, config)
else:
- self._database_config = config['sqlalchemy']
- self._ramp_config = event_config['ramp']
+ self._database_config = config["sqlalchemy"]
+ self._ramp_config = event_config["ramp"]
self._worker_config = generate_worker_config(event_config, config)
# set the number of threads for openmp, openblas, and mkl
self.n_threads = n_threads
@@ -117,16 +126,16 @@ def __init__(self, config, event_config, worker=None, n_workers=1,
raise TypeError(
"The parameter 'n_threads' should be a positive integer. "
"Got {} instead.".format(repr(self.n_threads))
- )
- for lib in ('OMP', 'MKL', 'OPENBLAS'):
- os.environ[lib + '_NUM_THREADS'] = str(self.n_threads)
- self._logger = logger.getChild(self._ramp_config['event_name'])
+ )
+ for lib in ("OMP", "MKL", "OPENBLAS"):
+ os.environ[lib + "_NUM_THREADS"] = str(self.n_threads)
+ self._logger = logger.getChild(self._ramp_config["event_name"])
def fetch_from_db(self, session):
"""Fetch the submission from the database and create the workers."""
- submissions = get_submissions(session,
- self._ramp_config['event_name'],
- state='new')
+ submissions = get_submissions(
+ session, self._ramp_config["event_name"], state="new"
+ )
if not submissions:
return
for submission_id, submission_name, _ in submissions:
@@ -136,124 +145,131 @@ def fetch_from_db(self, session):
continue
# create the worker
worker = self.worker(self._worker_config, submission_name)
- set_submission_state(session, submission_id, 'sent_to_training')
+ set_submission_state(session, submission_id, "sent_to_training")
update_user_leaderboards(
- session, self._ramp_config['event_name'],
- submission .team.name, new_only=True,
+ session,
+ self._ramp_config["event_name"],
+ submission.team.name,
+ new_only=True,
+ )
+ self._awaiting_worker_queue.put_nowait(
+ (worker, (submission_id, submission_name))
)
- self._awaiting_worker_queue.put_nowait((worker, (submission_id,
- submission_name)))
self._logger.info(
- f'Submission {submission_name} added to the queue of '
- 'submission to be processed'
+ f"Submission {submission_name} added to the queue of "
+ "submission to be processed"
)
def launch_workers(self, session):
"""Launch the awaiting workers if possible."""
- while (not self._processing_worker_queue.full() and
- not self._awaiting_worker_queue.empty()):
- worker, (submission_id, submission_name) = \
- self._awaiting_worker_queue.get()
- self._logger.info(f'Starting worker: {worker}')
+ while (
+ not self._processing_worker_queue.full()
+ and not self._awaiting_worker_queue.empty()
+ ):
+ worker, (
+ submission_id,
+ submission_name,
+ ) = self._awaiting_worker_queue.get()
+ self._logger.info(f"Starting worker: {worker}")
try:
worker.setup()
if worker.status != "error":
worker.launch_submission()
except Exception as e:
- self._logger.error(
- f'Worker finished with unhandled exception:\n {e}'
- )
- worker.status = 'error'
- if worker.status == 'error':
- set_submission_state(session, submission_id, 'checking_error')
+ self._logger.error(f"Worker finished with unhandled exception:\n {e}")
+ worker.status = "error"
+ if worker.status == "error":
+ set_submission_state(session, submission_id, "checking_error")
worker.teardown() # kill the worker
self._logger.info(
- f'Worker {worker} killed due to an error '
- f'while connecting to AWS worker'
+ f"Worker {worker} killed due to an error "
+ f"while connecting to AWS worker"
+ )
+ stderr = (
+ "There was a problem with sending your submission"
+ " for training. This problem is on RAMP side"
+ " and most likely it is not related to your"
+ " code. If this happened for the first time"
+ " to this submission you might"
+ " consider submitting the same code once again."
+ " Else, please contact the event organizers."
)
- stderr = ("There was a problem with sending your submission"
- " for training. This problem is on RAMP side"
- " and most likely it is not related to your"
- " code. If this happened for the first time"
- " to this submission you might"
- " consider submitting the same code once again."
- " Else, please contact the event organizers."
- )
set_submission_error_msg(session, submission_id, stderr)
continue
- set_submission_state(session, submission_id, 'training')
+ set_submission_state(session, submission_id, "training")
submission = get_submission_by_id(session, submission_id)
update_user_leaderboards(
- session, self._ramp_config['event_name'],
- submission.team.name, new_only=True,
+ session,
+ self._ramp_config["event_name"],
+ submission.team.name,
+ new_only=True,
)
self._processing_worker_queue.put_nowait(
- (worker, (submission_id, submission_name)))
- self._logger.info(
- f'Store the worker {worker} into the processing queue'
+ (worker, (submission_id, submission_name))
)
+ self._logger.info(f"Store the worker {worker} into the processing queue")
def collect_result(self, session):
"""Collect result from processed workers."""
try:
workers, submissions = zip(
- *[self._processing_worker_queue.get()
- for _ in range(self._processing_worker_queue.qsize())]
+ *[
+ self._processing_worker_queue.get()
+ for _ in range(self._processing_worker_queue.qsize())
+ ]
)
except ValueError:
- if self.hunger_policy == 'sleep':
+ if self.hunger_policy == "sleep":
time.sleep(5)
- elif self.hunger_policy == 'exit':
+ elif self.hunger_policy == "exit":
self._poison_pill = True
return
- for worker, (submission_id, submission_name) in zip(workers,
- submissions):
+ for worker, (submission_id, submission_name) in zip(workers, submissions):
dt = worker.time_since_last_status_check()
if (dt is not None) and (dt < self.time_between_collection):
self._processing_worker_queue.put_nowait(
- (worker, (submission_id, submission_name)))
+ (worker, (submission_id, submission_name))
+ )
time.sleep(0)
continue
- elif worker.status == 'running':
+ elif worker.status == "running":
self._processing_worker_queue.put_nowait(
- (worker, (submission_id, submission_name)))
+ (worker, (submission_id, submission_name))
+ )
time.sleep(0)
- elif worker.status == 'retry':
- set_submission_state(session, submission_id, 'new')
+ elif worker.status == "retry":
+ set_submission_state(session, submission_id, "new")
self._logger.info(
- f'Submission: {submission_id} has been interrupted. '
- 'It will be added to queue again and retried.'
+ f"Submission: {submission_id} has been interrupted. "
+ "It will be added to queue again and retried."
)
worker.teardown()
else:
- self._logger.info(f'Collecting results from worker {worker}')
+ self._logger.info(f"Collecting results from worker {worker}")
returncode, stderr = worker.collect_results()
if returncode:
if returncode == 124:
- self._logger.info(
- f'Worker {worker} killed due to timeout.'
- )
- submission_status = 'training_error'
+ self._logger.info(f"Worker {worker} killed due to timeout.")
+ submission_status = "training_error"
elif returncode == 2:
# Error occurred when downloading the logs
- submission_status = 'checking_error'
+ submission_status = "checking_error"
else:
self._logger.info(
- f'Worker {worker} killed due to an error '
- f'during training: {stderr}'
+ f"Worker {worker} killed due to an error "
+ f"during training: {stderr}"
)
- submission_status = 'training_error'
+ submission_status = "training_error"
else:
- submission_status = 'tested'
- set_submission_state(
- session, submission_id, submission_status
- )
+ submission_status = "tested"
+ set_submission_state(session, submission_id, submission_status)
set_submission_error_msg(session, submission_id, stderr)
self._processed_submission_queue.put_nowait(
- (submission_id, submission_name))
+ (submission_id, submission_name)
+ )
worker.teardown()
def update_database_results(self, session):
@@ -261,46 +277,47 @@ def update_database_results(self, session):
make_update_leaderboard = False
while not self._processed_submission_queue.empty():
make_update_leaderboard = True
- submission_id, submission_name = \
- self._processed_submission_queue.get_nowait()
- if 'error' in get_submission_state(session, submission_id):
+ (
+ submission_id,
+ submission_name,
+ ) = self._processed_submission_queue.get_nowait()
+ if "error" in get_submission_state(session, submission_id):
continue
self._logger.info(
- f'Write info in database for submission {submission_name}'
+ f"Write info in database for submission {submission_name}"
)
path_predictions = os.path.join(
- self._worker_config['predictions_dir'], submission_name
+ self._worker_config["predictions_dir"], submission_name
)
set_time(session, submission_id, path_predictions)
set_scores(session, submission_id, path_predictions)
set_bagged_scores(session, submission_id, path_predictions)
- set_submission_state(session, submission_id, 'scored')
+ set_submission_state(session, submission_id, "scored")
if make_update_leaderboard:
- self._logger.info('Update all leaderboards')
- update_leaderboards(session, self._ramp_config['event_name'])
- update_all_user_leaderboards(session,
- self._ramp_config['event_name'])
- self._logger.info('Leaderboards updated')
+ self._logger.info("Update all leaderboards")
+ update_leaderboards(session, self._ramp_config["event_name"])
+ update_all_user_leaderboards(session, self._ramp_config["event_name"])
+ self._logger.info("Leaderboards updated")
@staticmethod
def _reset_submission_after_failure(session, even_name):
submissions = get_submissions(session, even_name, state=None)
for submission_id, _, _ in submissions:
submission_state = get_submission_state(session, submission_id)
- if submission_state in ('training', 'sent_to_training'):
- set_submission_state(session, submission_id, 'new')
+ if submission_state in ("training", "sent_to_training"):
+ set_submission_state(session, submission_id, "new")
def launch(self):
"""Launch the dispatcher."""
- self._logger.info('Starting the RAMP dispatcher')
+ self._logger.info("Starting the RAMP dispatcher")
with session_scope(self._database_config) as session:
- self._logger.info('Open a session to the database')
+ self._logger.info("Open a session to the database")
self._logger.info(
- 'Reset unfinished trained submission from previous session'
+ "Reset unfinished trained submission from previous session"
)
self._reset_submission_after_failure(
- session, self._ramp_config['event_name']
+ session, self._ramp_config["event_name"]
)
try:
while not self._poison_pill:
@@ -312,6 +329,6 @@ def launch(self):
# reset the submissions to 'new' in case of error or unfinished
# training
self._reset_submission_after_failure(
- session, self._ramp_config['event_name']
+ session, self._ramp_config["event_name"]
)
- self._logger.info('Dispatcher killed by the poison pill')
+ self._logger.info("Dispatcher killed by the poison pill")
diff --git a/ramp-engine/ramp_engine/local.py b/ramp-engine/ramp_engine/local.py
index be92d265a..31cf5667f 100644
--- a/ramp-engine/ramp_engine/local.py
+++ b/ramp-engine/ramp_engine/local.py
@@ -7,7 +7,7 @@
from .conda import _conda_info_envs, _get_conda_env_path
from .conda import _conda_ramp_test_submission
-logger = logging.getLogger('RAMP-WORKER')
+logger = logging.getLogger("RAMP-WORKER")
class CondaEnvWorker(BaseWorker):
@@ -47,6 +47,7 @@ class CondaEnvWorker(BaseWorker):
* 'finished': the worker finished to train the submission.
* 'collected': the results of the training have been collected.
"""
+
def __init__(self, config, submission):
super().__init__(config=config, submission=submission)
@@ -57,11 +58,16 @@ def setup(self):
the configuration passed when instantiating the worker.
"""
# sanity check for the configuration variable
- for required_param in ('kit_dir', 'data_dir', 'submissions_dir',
- 'logs_dir', 'predictions_dir'):
+ for required_param in (
+ "kit_dir",
+ "data_dir",
+ "submissions_dir",
+ "logs_dir",
+ "predictions_dir",
+ ):
self._check_config_name(self.config, required_param)
# find the path to the conda environment
- env_name = self.config.get('conda_env', 'base')
+ env_name = self.config.get("conda_env", "base")
conda_info = _conda_info_envs()
self._python_bin_path = _get_conda_env_path(conda_info, env_name, self)
@@ -70,11 +76,14 @@ def setup(self):
def teardown(self):
"""Remove the predictions stores within the submission."""
- if self.status not in ('collected', 'retry'):
+ if self.status not in ("collected", "retry"):
raise ValueError("Collect the results before to kill the worker.")
- output_training_dir = os.path.join(self.config['kit_dir'],
- 'submissions', self.submission,
- 'training_output')
+ output_training_dir = os.path.join(
+ self.config["kit_dir"],
+ "submissions",
+ self.submission,
+ "training_output",
+ )
if os.path.exists(output_training_dir):
shutil.rmtree(output_training_dir)
super().teardown()
@@ -105,7 +114,7 @@ def check_timeout(self):
@property
def timeout(self):
- return self.config.get('timeout', 7200)
+ return self.config.get("timeout", 7200)
def launch_submission(self):
"""Launch the submission.
@@ -114,11 +123,12 @@ def launch_submission(self):
environment given in the configuration. The submission is launched in
a subprocess to free to not lock the Python main process.
"""
- cmd_ramp = os.path.join(self._python_bin_path, 'ramp-test')
- if self.status == 'running':
- raise ValueError('Wait that the submission is processed before to '
- 'launch a new one.')
- self._log_dir = os.path.join(self.config['logs_dir'], self.submission)
+ cmd_ramp = os.path.join(self._python_bin_path, "ramp-test")
+ if self.status == "running":
+ raise ValueError(
+ "Wait that the submission is processed before to " "launch a new one."
+ )
+ self._log_dir = os.path.join(self.config["logs_dir"], self.submission)
self._proc, self._log_file = _conda_ramp_test_submission(
self.config,
self.submission,
@@ -138,35 +148,36 @@ def collect_results(self):
beforehand.
"""
super().collect_results()
- if self.status in ['finished', 'running', 'timeout']:
+ if self.status in ["finished", "running", "timeout"]:
# communicate() will wait for the process to be completed
self._proc.communicate()
self._log_file.close()
- with open(os.path.join(self._log_dir, 'log'), 'rb') as f:
+ with open(os.path.join(self._log_dir, "log"), "rb") as f:
log_output = f.read()
- error_msg = _get_traceback(log_output.decode('utf-8'))
- if self.status == 'timeout':
- error_msg += ('\nWorker killed due to timeout after {}s.'
- .format(self.timeout))
- if self.status == 'timeout':
+ error_msg = _get_traceback(log_output.decode("utf-8"))
+ if self.status == "timeout":
+ error_msg += "\nWorker killed due to timeout after {}s.".format(
+ self.timeout
+ )
+ if self.status == "timeout":
returncode = 124
else:
returncode = self._proc.returncode
- pred_dir = os.path.join(
- self.config['predictions_dir'], self.submission
- )
+ pred_dir = os.path.join(self.config["predictions_dir"], self.submission)
output_training_dir = os.path.join(
- self.config['submissions_dir'], self.submission,
- 'training_output')
+ self.config["submissions_dir"],
+ self.submission,
+ "training_output",
+ )
if os.path.exists(pred_dir):
shutil.rmtree(pred_dir)
if returncode:
if os.path.exists(output_training_dir):
shutil.rmtree(output_training_dir)
- self.status = 'collected'
+ self.status = "collected"
return (returncode, error_msg)
# copy the predictions into the disk
# no need to create the directory, it will be handle by copytree
shutil.copytree(output_training_dir, pred_dir)
- self.status = 'collected'
+ self.status = "collected"
return (returncode, error_msg)
diff --git a/ramp-engine/ramp_engine/remote.py b/ramp-engine/ramp_engine/remote.py
index 3d2b04389..6b067b60c 100644
--- a/ramp-engine/ramp_engine/remote.py
+++ b/ramp-engine/ramp_engine/remote.py
@@ -12,7 +12,7 @@
from .conda import _conda_info_envs, _get_conda_env_path
from .conda import _conda_ramp_test_submission
-logger = logging.getLogger('RAMP-WORKER')
+logger = logging.getLogger("RAMP-WORKER")
def _check_dask_workers_single_machine(worker_urls: List[str]) -> bool:
@@ -34,21 +34,23 @@ def _check_dask_workers_single_machine(worker_urls: List[str]) -> bool:
if len(worker_hosts) == 1 and worker_hosts != {None}:
return True
else:
- raise ValueError(f'All dask workers should be on 1 machine, '
- f'found {len(worker_hosts)}: {worker_hosts}')
+ raise ValueError(
+ f"All dask workers should be on 1 machine, "
+ f"found {len(worker_hosts)}: {worker_hosts}"
+ )
def _read_file(path: Union[str, Path]) -> bytes:
"""Open and read a file"""
- with open(path, 'rb') as fh:
+ with open(path, "rb") as fh:
return fh.read()
def _serialize_folder(path: Union[str, Path]) -> bytes:
"""Serialize a folder as a bytes object of its .tar.gz"""
with BytesIO() as fh:
- with tarfile.open(fileobj=fh, mode='w:gz') as fh_tar:
- fh_tar.add(path, arcname='.')
+ with tarfile.open(fileobj=fh, mode="w:gz") as fh_tar:
+ fh_tar.add(path, arcname=".")
fh.seek(0)
return fh.read()
@@ -61,7 +63,7 @@ def _deserialize_folder(stream: bytes, out_dir: Union[str, Path]):
shutil.rmtree(out_dir, ignore_errors=True)
with BytesIO(stream) as fh:
fh.seek(0)
- with tarfile.open(fileobj=fh, mode='r:gz') as fh_tar:
+ with tarfile.open(fileobj=fh, mode="r:gz") as fh_tar:
fh_tar.extractall(out_dir)
@@ -118,6 +120,7 @@ class DaskWorker(BaseWorker):
* 'finished': the worker finished to train the submission.
* 'collected': the results of the training have been collected.
"""
+
def __init__(self, config, submission):
super().__init__(config=config, submission=submission)
@@ -131,29 +134,32 @@ def setup(self):
from dask.distributed import Client
# sanity check for the configuration variable
- for required_param in ('kit_dir', 'data_dir', 'submissions_dir',
- 'logs_dir', 'predictions_dir',
- 'dask_scheduler'):
+ for required_param in (
+ "kit_dir",
+ "data_dir",
+ "submissions_dir",
+ "logs_dir",
+ "predictions_dir",
+ "dask_scheduler",
+ ):
self._check_config_name(self.config, required_param)
# find the path to the conda environment
- env_name = self.config.get('conda_env', 'base')
- self._client = Client(self.config['dask_scheduler'])
+ env_name = self.config.get("conda_env", "base")
+ self._client = Client(self.config["dask_scheduler"])
dask_worker_urls = list(self._client.nthreads())
_check_dask_workers_single_machine(dask_worker_urls)
# Check if Dask workers are on the same host as the scheduler
self._is_local_cluster = (
- urlparse(self.config['dask_scheduler']).hostname
+ urlparse(self.config["dask_scheduler"]).hostname
== urlparse(dask_worker_urls[0]).hostname
)
# Fail early if the Dask worker is not working properly
- self._client.submit(lambda: 1+1).result()
+ self._client.submit(lambda: 1 + 1).result()
- conda_info = self._client.submit(
- _conda_info_envs, pure=False
- ).result()
+ conda_info = self._client.submit(_conda_info_envs, pure=False).result()
self._python_bin_path = _get_conda_env_path(conda_info, env_name, self)
@@ -161,11 +167,14 @@ def setup(self):
def teardown(self):
"""Remove the predictions stored within the submission."""
- if self.status != 'collected':
+ if self.status != "collected":
raise ValueError("Collect the results before to kill the worker.")
- output_training_dir = os.path.join(self.config['kit_dir'],
- 'submissions', self.submission,
- 'training_output')
+ output_training_dir = os.path.join(
+ self.config["kit_dir"],
+ "submissions",
+ self.submission,
+ "training_output",
+ )
self._client.submit(
shutil.rmtree, output_training_dir, ignore_errors=True, pure=False
).result()
@@ -198,7 +207,7 @@ def check_timeout(self):
@property
def timeout(self):
- return self.config.get('timeout', 7200)
+ return self.config.get("timeout", 7200)
def launch_submission(self):
"""Launch the submission.
@@ -207,23 +216,21 @@ def launch_submission(self):
environment given in the configuration. The submission is launched in
a subprocess to free to not lock the Python main process.
"""
- cmd_ramp = os.path.join(self._python_bin_path, 'ramp-test')
- if self.status == 'running':
- raise ValueError('Wait that the submission is processed before to '
- 'launch a new one.')
+ cmd_ramp = os.path.join(self._python_bin_path, "ramp-test")
+ if self.status == "running":
+ raise ValueError(
+ "Wait that the submission is processed before to " "launch a new one."
+ )
- self._log_dir = os.path.join(self.config['logs_dir'], self.submission)
+ self._log_dir = os.path.join(self.config["logs_dir"], self.submission)
# TODO: need to copy submission to the remote folder
if not self._is_local_cluster:
submission_dir = os.path.join(
- self.config['submissions_dir'], self.submission
+ self.config["submissions_dir"], self.submission
)
# remove remote submission and local prediction dir if it exists
self._client.submit(
- shutil.rmtree,
- submission_dir,
- ignore_errors=True,
- pure=False
+ shutil.rmtree, submission_dir, ignore_errors=True, pure=False
).result()
# Upload the submission dir to the remote machine
stream = _serialize_folder(submission_dir)
@@ -233,8 +240,8 @@ def launch_submission(self):
# If there is a training_output in the submission, remove it
self._client.submit(
_remove_link_or_dir,
- os.path.join(submission_dir, 'training_output'),
- pure=False
+ os.path.join(submission_dir, "training_output"),
+ pure=False,
).result()
self._proc = self._client.submit(
@@ -261,7 +268,7 @@ def collect_results(self):
from distributed.utils import CancelledError
super().collect_results()
- if self.status in ['finished', 'running', 'timeout']:
+ if self.status in ["finished", "running", "timeout"]:
returncode = 1
try:
# Wait for the computation to run.
@@ -269,19 +276,20 @@ def collect_results(self):
except CancelledError:
pass
log_output = self._client.submit(
- _read_file, os.path.join(self._log_dir, 'log'), pure=False
+ _read_file, os.path.join(self._log_dir, "log"), pure=False
).result()
- error_msg = _get_traceback(log_output.decode('utf-8'))
- if self.status == 'timeout':
- error_msg += ('\nWorker killed due to timeout after {}s.'
- .format(self.timeout))
+ error_msg = _get_traceback(log_output.decode("utf-8"))
+ if self.status == "timeout":
+ error_msg += "\nWorker killed due to timeout after {}s.".format(
+ self.timeout
+ )
returncode = 124
- pred_dir = os.path.join(
- self.config['predictions_dir'], self.submission
- )
+ pred_dir = os.path.join(self.config["predictions_dir"], self.submission)
output_training_dir = os.path.join(
- self.config['submissions_dir'], self.submission,
- 'training_output')
+ self.config["submissions_dir"],
+ self.submission,
+ "training_output",
+ )
self._client.submit(
shutil.rmtree, pred_dir, ignore_errors=True, pure=False
).result()
@@ -290,9 +298,9 @@ def collect_results(self):
shutil.rmtree,
output_training_dir,
ignore_errors=True,
- pure=False
+ pure=False,
).result()
- self.status = 'collected'
+ self.status = "collected"
return (returncode, error_msg)
if self._is_local_cluster:
shutil.copytree(output_training_dir, pred_dir)
@@ -305,5 +313,5 @@ def collect_results(self):
# remove the local predictions dir if it exists
shutil.rmtree(pred_dir, ignore_errors=True)
_deserialize_folder(stream, pred_dir)
- self.status = 'collected'
+ self.status = "collected"
return (returncode, error_msg)
diff --git a/ramp-engine/ramp_engine/tests/kits/iris/problem.py b/ramp-engine/ramp_engine/tests/kits/iris/problem.py
index 0dadae60c..aff46c180 100644
--- a/ramp-engine/ramp_engine/tests/kits/iris/problem.py
+++ b/ramp-engine/ramp_engine/tests/kits/iris/problem.py
@@ -3,20 +3,19 @@
import rampwf as rw
from sklearn.model_selection import StratifiedShuffleSplit
-problem_title = 'Iris classification'
-_target_column_name = 'species'
-_prediction_label_names = ['setosa', 'versicolor', 'virginica']
+problem_title = "Iris classification"
+_target_column_name = "species"
+_prediction_label_names = ["setosa", "versicolor", "virginica"]
# A type (class) which will be used to create wrapper objects for y_pred
-Predictions = rw.prediction_types.make_multiclass(
- label_names=_prediction_label_names)
+Predictions = rw.prediction_types.make_multiclass(label_names=_prediction_label_names)
# An object implementing the workflow
workflow = rw.workflows.Estimator()
score_types = [
- rw.score_types.Accuracy(name='acc'),
- rw.score_types.ClassificationError(name='error'),
- rw.score_types.NegativeLogLikelihood(name='nll'),
- rw.score_types.F1Above(name='f1_70', threshold=0.7),
+ rw.score_types.Accuracy(name="acc"),
+ rw.score_types.ClassificationError(name="error"),
+ rw.score_types.NegativeLogLikelihood(name="nll"),
+ rw.score_types.F1Above(name="f1_70", threshold=0.7),
]
@@ -26,17 +25,17 @@ def get_cv(X, y):
def _read_data(path, f_name):
- data = pd.read_csv(os.path.join(path, 'data', f_name))
+ data = pd.read_csv(os.path.join(path, "data", f_name))
y_array = data[_target_column_name].values
X_array = data.drop([_target_column_name], axis=1)
return X_array, y_array
-def get_train_data(path='.'):
- f_name = 'train.csv'
+def get_train_data(path="."):
+ f_name = "train.csv"
return _read_data(path, f_name)
-def get_test_data(path='.'):
- f_name = 'test.csv'
+def get_test_data(path="."):
+ f_name = "test.csv"
return _read_data(path, f_name)
diff --git a/ramp-engine/ramp_engine/tests/kits/iris/submissions/random_forest_10_10/estimator.py b/ramp-engine/ramp_engine/tests/kits/iris/submissions/random_forest_10_10/estimator.py
index 494d240b8..3860343c3 100755
--- a/ramp-engine/ramp_engine/tests/kits/iris/submissions/random_forest_10_10/estimator.py
+++ b/ramp-engine/ramp_engine/tests/kits/iris/submissions/random_forest_10_10/estimator.py
@@ -2,6 +2,5 @@
def get_estimator():
- clf = RandomForestClassifier(n_estimators=10, max_leaf_nodes=10,
- random_state=61)
+ clf = RandomForestClassifier(n_estimators=10, max_leaf_nodes=10, random_state=61)
return clf
diff --git a/ramp-engine/ramp_engine/tests/kits/iris/submissions/starting_kit/estimator.py b/ramp-engine/ramp_engine/tests/kits/iris/submissions/starting_kit/estimator.py
index 661d88c2b..64220a376 100755
--- a/ramp-engine/ramp_engine/tests/kits/iris/submissions/starting_kit/estimator.py
+++ b/ramp-engine/ramp_engine/tests/kits/iris/submissions/starting_kit/estimator.py
@@ -2,6 +2,5 @@
def get_estimator():
- clf = RandomForestClassifier(n_estimators=1, max_leaf_nodes=2,
- random_state=61)
+ clf = RandomForestClassifier(n_estimators=1, max_leaf_nodes=2, random_state=61)
return clf
diff --git a/ramp-engine/ramp_engine/tests/kits/iris/submissions/starting_kit_local/estimator.py b/ramp-engine/ramp_engine/tests/kits/iris/submissions/starting_kit_local/estimator.py
index 661d88c2b..64220a376 100755
--- a/ramp-engine/ramp_engine/tests/kits/iris/submissions/starting_kit_local/estimator.py
+++ b/ramp-engine/ramp_engine/tests/kits/iris/submissions/starting_kit_local/estimator.py
@@ -2,6 +2,5 @@
def get_estimator():
- clf = RandomForestClassifier(n_estimators=1, max_leaf_nodes=2,
- random_state=61)
+ clf = RandomForestClassifier(n_estimators=1, max_leaf_nodes=2, random_state=61)
return clf
diff --git a/ramp-engine/ramp_engine/tests/test_aws.py b/ramp-engine/ramp_engine/tests/test_aws.py
index f18803bd1..b430361df 100644
--- a/ramp-engine/ramp_engine/tests/test_aws.py
+++ b/ramp-engine/ramp_engine/tests/test_aws.py
@@ -30,8 +30,9 @@
logging.basicConfig(
level=logging.INFO,
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
- datefmt='%Y-%m-%d %H:%M:%S')
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S",
+)
def add_empty_dir(dir_name):
@@ -39,227 +40,224 @@ def add_empty_dir(dir_name):
os.mkdir(dir_name)
-@mock.patch('ramp_engine.aws.api.launch_ec2_instances')
-def test_launch_ec2_instances_put_back_into_queue(test_launch_ec2_instances,
- caplog):
- ''' checks if the retry status and the correct log is added if the
- api returns None instances and status retry '''
+@mock.patch("ramp_engine.aws.api.launch_ec2_instances")
+def test_launch_ec2_instances_put_back_into_queue(test_launch_ec2_instances, caplog):
+ """checks if the retry status and the correct log is added if the
+ api returns None instances and status retry"""
- test_launch_ec2_instances.return_value = None, 'retry'
+ test_launch_ec2_instances.return_value = None, "retry"
# setup the AWS worker
- event_config = read_config(ramp_aws_config_template())['worker']
+ event_config = read_config(ramp_aws_config_template())["worker"]
- worker = AWSWorker(event_config, submission='starting_kit_local')
+ worker = AWSWorker(event_config, submission="starting_kit_local")
worker.config = event_config
# worker should be put back into the queue
worker.setup()
- assert worker.status == 'retry'
- assert 'Adding it back to the queue and will try again' in caplog.text
+ assert worker.status == "retry"
+ assert "Adding it back to the queue and will try again" in caplog.text
-@mock.patch('ramp_engine.aws.api._rsync')
-@mock.patch('ramp_engine.aws.api.launch_ec2_instances')
-def test_aws_worker_upload_error(test_launch_ec2_instances, test_rsync,
- caplog):
+@mock.patch("ramp_engine.aws.api._rsync")
+@mock.patch("ramp_engine.aws.api.launch_ec2_instances")
+def test_aws_worker_upload_error(test_launch_ec2_instances, test_rsync, caplog):
# mock dummy AWS instance
class DummyInstance:
id = 1
test_launch_ec2_instances.return_value = (DummyInstance(),), 0
# mock the called process error
- test_rsync.side_effect = subprocess.CalledProcessError(255, 'test')
+ test_rsync.side_effect = subprocess.CalledProcessError(255, "test")
# setup the AWS worker
- event_config = read_config(ramp_aws_config_template())['worker']
+ event_config = read_config(ramp_aws_config_template())["worker"]
- worker = AWSWorker(event_config, submission='starting_kit_local')
+ worker = AWSWorker(event_config, submission="starting_kit_local")
worker.config = event_config
# CalledProcessError is thrown inside
worker.setup()
- assert worker.status == 'error'
- assert 'Unable to connect during log download' in caplog.text
+ assert worker.status == "error"
+ assert "Unable to connect during log download" in caplog.text
-@mock.patch('ramp_engine.aws.api._rsync')
+@mock.patch("ramp_engine.aws.api._rsync")
@mock.patch("ramp_engine.base.BaseWorker.collect_results")
-def test_aws_worker_download_log_error(superclass, test_rsync,
- caplog):
+def test_aws_worker_download_log_error(superclass, test_rsync, caplog):
# mock dummy AWS instance
class DummyInstance:
- id = 'test'
+ id = "test"
- test_rsync.side_effect = subprocess.CalledProcessError(255, 'test')
+ test_rsync.side_effect = subprocess.CalledProcessError(255, "test")
# setup the AWS worker
superclass.return_value = True
- event_config = read_config(ramp_aws_config_template())['worker']
+ event_config = read_config(ramp_aws_config_template())["worker"]
- worker = AWSWorker(event_config, submission='starting_kit_local')
+ worker = AWSWorker(event_config, submission="starting_kit_local")
worker.config = event_config
- worker.status = 'finished'
+ worker.status = "finished"
worker.instance = DummyInstance
# worker will now through an CalledProcessError
exit_status, error_msg = worker.collect_results()
- assert 'Error occurred when downloading the logs' in caplog.text
- assert 'Trying to download the log once again' in caplog.text
+ assert "Error occurred when downloading the logs" in caplog.text
+ assert "Trying to download the log once again" in caplog.text
assert exit_status == 2
- assert 'test' in error_msg
- assert worker.status == 'error'
+ assert "test" in error_msg
+ assert worker.status == "error"
-@mock.patch('ramp_engine.aws.api._rsync')
-@mock.patch('ramp_engine.aws.api._training_successful')
-@mock.patch('ramp_engine.aws.api.download_log')
+@mock.patch("ramp_engine.aws.api._rsync")
+@mock.patch("ramp_engine.aws.api._training_successful")
+@mock.patch("ramp_engine.aws.api.download_log")
@mock.patch("ramp_engine.base.BaseWorker.collect_results")
-def test_aws_worker_download_prediction_error(superclass, test_download_log,
- test_train, test_rsync, caplog):
+def test_aws_worker_download_prediction_error(
+ superclass, test_download_log, test_train, test_rsync, caplog
+):
# mock dummy AWS instance
class DummyInstance:
- id = 'test'
+ id = "test"
- test_rsync.side_effect = subprocess.CalledProcessError(255, 'test')
+ test_rsync.side_effect = subprocess.CalledProcessError(255, "test")
test_download_log.return_value = (0,)
# setup the AWS worker
superclass.return_value = True
test_train.return_value = True
- event_config = read_config(ramp_aws_config_template())['worker']
+ event_config = read_config(ramp_aws_config_template())["worker"]
- worker = AWSWorker(event_config, submission='starting_kit_local')
+ worker = AWSWorker(event_config, submission="starting_kit_local")
worker.config = event_config
- worker.status = 'finished'
+ worker.status = "finished"
worker.instance = DummyInstance
# worker will now through an CalledProcessError
exit_status, error_msg = worker.collect_results()
- assert 'Downloading the prediction failed with' in caplog.text
- assert 'Trying to download the prediction once again' in caplog.text
+ assert "Downloading the prediction failed with" in caplog.text
+ assert "Trying to download the prediction once again" in caplog.text
assert exit_status == 1
- assert 'test' in error_msg
+ assert "test" in error_msg
-@mock.patch('ramp_engine.aws.api._rsync')
+@mock.patch("ramp_engine.aws.api._rsync")
def test_rsync_download_predictions(test_rsync, caplog):
- error = subprocess.CalledProcessError(255, 'test')
- event_config = read_config(ramp_aws_config_template())['worker']
+ error = subprocess.CalledProcessError(255, "test")
+ event_config = read_config(ramp_aws_config_template())["worker"]
instance_id = 0
- submission_name = 'test_submission'
+ submission_name = "test_submission"
# test for 2 errors by rsync followed by a log output
- test_rsync.side_effect = [error, error, 'test_log']
- out = download_predictions(event_config, instance_id,
- submission_name, folder=None)
- assert 'Trying to download the prediction' in caplog.text
- assert 'test_submission' in out
+ test_rsync.side_effect = [error, error, "test_log"]
+ out = download_predictions(event_config, instance_id, submission_name, folder=None)
+ assert "Trying to download the prediction" in caplog.text
+ assert "test_submission" in out
# test for 3 errors by rsync followed by a log output
test_rsync.side_effect = [error, error, error]
with pytest.raises(subprocess.CalledProcessError):
- out = download_predictions(event_config, instance_id, submission_name,
- folder=None)
- assert 'Trying to download the prediction' in caplog.text
- assert 'error occured when downloading prediction' in caplog.text
+ out = download_predictions(
+ event_config, instance_id, submission_name, folder=None
+ )
+ assert "Trying to download the prediction" in caplog.text
+ assert "error occured when downloading prediction" in caplog.text
-@mock.patch('ramp_engine.aws.api._rsync')
+@mock.patch("ramp_engine.aws.api._rsync")
def test_rsync_download_log(test_rsync, caplog):
- error = subprocess.CalledProcessError(255, 'test')
- event_config = read_config(ramp_aws_config_template())['worker']
+ error = subprocess.CalledProcessError(255, "test")
+ event_config = read_config(ramp_aws_config_template())["worker"]
instance_id = 0
- submission_name = 'test_submission'
+ submission_name = "test_submission"
# test for 2 errors by rsync followed by a log output
- test_rsync.side_effect = [error, error, 'test_log']
+ test_rsync.side_effect = [error, error, "test_log"]
out = download_log(event_config, instance_id, submission_name)
- assert 'Trying to download the log' in caplog.text
- assert out == 'test_log'
+ assert "Trying to download the log" in caplog.text
+ assert out == "test_log"
# test for 3 errors by rsync followed by a log output
test_rsync.side_effect = [error, error, error]
with pytest.raises(subprocess.CalledProcessError):
out = download_log(event_config, instance_id, submission_name)
- assert 'Trying to download the log' in caplog.text
+ assert "Trying to download the log" in caplog.text
-@mock.patch('ramp_engine.aws.api._rsync')
+@mock.patch("ramp_engine.aws.api._rsync")
def test_rsync_upload_fails(test_rsync):
- test_rsync.side_effect = subprocess.CalledProcessError(255, 'test')
- event_config = read_config(ramp_aws_config_template())['worker']
+ test_rsync.side_effect = subprocess.CalledProcessError(255, "test")
+ event_config = read_config(ramp_aws_config_template())["worker"]
instance_id = 0
- submission_name = 'test_submission'
- submissions_dir = 'temp'
- out = upload_submission(event_config, instance_id, submission_name,
- submissions_dir)
+ submission_name = "test_submission"
+ submissions_dir = "temp"
+ out = upload_submission(event_config, instance_id, submission_name, submissions_dir)
assert out == 1 # error ocurred and it was caught
-@mock.patch('ramp_engine.aws.api._run')
+@mock.patch("ramp_engine.aws.api._run")
def test_is_spot_terminated_with_CalledProcessError(test_run, caplog):
- test_run.side_effect = subprocess.CalledProcessError(28, 'test')
- event_config = read_config(ramp_aws_config_template())['worker']
+ test_run.side_effect = subprocess.CalledProcessError(28, "test")
+ event_config = read_config(ramp_aws_config_template())["worker"]
instance_id = 0
is_spot_terminated(event_config, instance_id)
- assert 'Unable to run curl' in caplog.text
+ assert "Unable to run curl" in caplog.text
-@pytest.mark.parametrize(
- "use_spot_instance",
- [None, True, False]
- )
+@pytest.mark.parametrize("use_spot_instance", [None, True, False])
@mock.patch("boto3.session.Session")
def test_launch_ec2_instances(boto_session_cls, use_spot_instance):
- ''' Check 'use_spot_instance' config with None, True and False'''
+ """Check 'use_spot_instance' config with None, True and False"""
# dummy mock session
session = boto_session_cls.return_value
client = session.client.return_value
describe_images = client.describe_images
images = {"Images": [{"ImageId": 1, "CreationDate": 123}]}
describe_images.return_value = images
- event_config = read_config(ramp_aws_config_template())['worker']
+ event_config = read_config(ramp_aws_config_template())["worker"]
- event_config['use_spot_instance'] = use_spot_instance
+ event_config["use_spot_instance"] = use_spot_instance
launch_ec2_instances(event_config)
-@mock.patch('ramp_engine.aws.api.launch_train')
+@mock.patch("ramp_engine.aws.api.launch_train")
def test_aws_worker_launch_train_error(launch_train, caplog):
# mock dummy AWS instance
class DummyInstance:
id = 1
- launch_train.side_effect = subprocess.CalledProcessError(255, 'test')
+
+ launch_train.side_effect = subprocess.CalledProcessError(255, "test")
# setup the AWS worker
- event_config = read_config(ramp_aws_config_template())['worker']
+ event_config = read_config(ramp_aws_config_template())["worker"]
- worker = AWSWorker(event_config, submission='starting_kit_local')
+ worker = AWSWorker(event_config, submission="starting_kit_local")
worker.config = event_config
- worker.submission = 'dummy submissions'
+ worker.submission = "dummy submissions"
worker.instance = DummyInstance
# CalledProcessError is thrown inside
status = worker.launch_submission()
- assert 'test' in caplog.text
- assert 'Cannot start training of submission' in caplog.text
- assert worker.status == 'error'
+ assert "test" in caplog.text
+ assert "Cannot start training of submission" in caplog.text
+ assert worker.status == "error"
assert status == 1
@pytest.mark.parametrize(
- 'aws_msg_type, result_none, log_msg',
- [('max_spot', True, 'MaxSpotInstanceCountExceeded'),
- ('unhandled', True, 'this is temporary message'),
- ('correct', False, 'Spot instance request fulfilled')
- ]
+ "aws_msg_type, result_none, log_msg",
+ [
+ ("max_spot", True, "MaxSpotInstanceCountExceeded"),
+ ("unhandled", True, "this is temporary message"),
+ ("correct", False, "Spot instance request fulfilled"),
+ ],
)
# set shorter waiting time than in the actual settings
@mock.patch("ramp_engine.aws.api.WAIT_MINUTES", 0.03)
@mock.patch("ramp_engine.aws.api.MAX_TRIES_TO_CONNECT", 4)
@mock.patch("boto3.session.Session")
-def test_creating_instances(boto_session_cls, caplog,
- aws_msg_type, result_none, log_msg):
- ''' test launching more instances than limit on AWS enabled'''
+def test_creating_instances(
+ boto_session_cls, caplog, aws_msg_type, result_none, log_msg
+):
+ """test launching more instances than limit on AWS enabled"""
# info: caplog is a pytest fixture to collect logging info
# dummy mock session of AWS
session = boto_session_cls.return_value
@@ -268,29 +266,29 @@ def test_creating_instances(boto_session_cls, caplog,
images = {"Images": [{"ImageId": 1, "CreationDate": 123}]}
describe_images.return_value = images
- error = {
- "ClientError": {
- "Code": "Max spot instance count exceeded"
- }
- }
- event_config = read_config(ramp_aws_config_template())['worker']
- event_config['use_spot_instance'] = True
+ error = {"ClientError": {"Code": "Max spot instance count exceeded"}}
+ event_config = read_config(ramp_aws_config_template())["worker"]
+ event_config["use_spot_instance"] = True
request_spot_instances = client.request_spot_instances
error_max_instances = botocore.exceptions.ClientError(
- error, "MaxSpotInstanceCountExceeded")
+ error, "MaxSpotInstanceCountExceeded"
+ )
error_unhandled = botocore.exceptions.ParamValidationError(
- report='this is temporary message')
- correct_response = {'SpotInstanceRequests':
- [{'SpotInstanceRequestId': ['temp']}]
- }
-
- if aws_msg_type == 'max_spot':
- aws_response = [error_max_instances, error_max_instances,
- error_max_instances, error_max_instances]
- elif aws_msg_type == 'unhandled':
+ report="this is temporary message"
+ )
+ correct_response = {"SpotInstanceRequests": [{"SpotInstanceRequestId": ["temp"]}]}
+
+ if aws_msg_type == "max_spot":
+ aws_response = [
+ error_max_instances,
+ error_max_instances,
+ error_max_instances,
+ error_max_instances,
+ ]
+ elif aws_msg_type == "unhandled":
aws_response = [error_unhandled, error_unhandled]
- elif aws_msg_type == 'correct':
+ elif aws_msg_type == "correct":
aws_response = [error_max_instances, correct_response]
request_spot_instances.side_effect = aws_response
@@ -299,22 +297,23 @@ def test_creating_instances(boto_session_cls, caplog,
assert log_msg in caplog.text
-@mock.patch('ramp_engine.aws.api.is_spot_terminated')
-@mock.patch('ramp_engine.aws.api.launch_train')
-@mock.patch('ramp_engine.aws.api._training_finished')
-def test_restart_on_sudden_instance_termination(training_finished,
- launch_train, spot_terminated,
- caplog):
+@mock.patch("ramp_engine.aws.api.is_spot_terminated")
+@mock.patch("ramp_engine.aws.api.launch_train")
+@mock.patch("ramp_engine.aws.api._training_finished")
+def test_restart_on_sudden_instance_termination(
+ training_finished, launch_train, spot_terminated, caplog
+):
class DummyInstance:
id = 1
+
launch_train.return_value = 0
# setup the AWS worker
- event_config = read_config(ramp_aws_config_template())['worker']
+ event_config = read_config(ramp_aws_config_template())["worker"]
- worker = AWSWorker(event_config, submission='starting_kit_local')
+ worker = AWSWorker(event_config, submission="starting_kit_local")
worker.config = event_config
- worker.submission = 'dummy submissions'
+ worker.submission = "dummy submissions"
worker.instance = DummyInstance
# set the submission did not yet finish training
@@ -322,55 +321,57 @@ class DummyInstance:
spot_terminated.return_value = False
worker.launch_submission()
- assert worker.status == 'running'
- assert caplog.text == ''
+ assert worker.status == "running"
+ assert caplog.text == ""
# call CalledProcessError on checking if submission was finished
- training_finished.side_effect = subprocess.CalledProcessError(255, 'test')
+ training_finished.side_effect = subprocess.CalledProcessError(255, "test")
# make sure that the worker status is set to 'retry'
- assert worker.status == 'retry'
- assert 'Unable to connect to the instance' in caplog.text
- assert 'Adding the submission back to the queue' in caplog.text
+ assert worker.status == "retry"
+ assert "Unable to connect to the instance" in caplog.text
+ assert "Adding the submission back to the queue" in caplog.text
def test_aws_worker():
- if not os.path.isfile(os.path.join(HERE, 'config.yml')):
+ if not os.path.isfile(os.path.join(HERE, "config.yml")):
pytest.skip("Only for local tests for now")
- ramp_kit_dir = os.path.join(HERE, 'kits', 'iris')
+ ramp_kit_dir = os.path.join(HERE, "kits", "iris")
# make sure prediction and log dirs exist, if not, add them
- add_empty_dir(os.path.join(ramp_kit_dir, 'predictions'))
- add_empty_dir(os.path.join(ramp_kit_dir, 'logs'))
+ add_empty_dir(os.path.join(ramp_kit_dir, "predictions"))
+ add_empty_dir(os.path.join(ramp_kit_dir, "logs"))
# if the prediction / log files are still there, remove them
- for subdir in os.listdir(os.path.join(ramp_kit_dir, 'predictions')):
+ for subdir in os.listdir(os.path.join(ramp_kit_dir, "predictions")):
if os.path.isdir(subdir):
shutil.rmtree(subdir)
- for subdir in os.listdir(os.path.join(ramp_kit_dir, 'logs')):
+ for subdir in os.listdir(os.path.join(ramp_kit_dir, "logs")):
if os.path.isdir(subdir):
shutil.rmtree(subdir)
- worker_config = read_config(os.path.join(HERE, 'config.yml'))['worker']
- worker = AWSWorker(worker_config, submission='starting_kit_local')
+ worker_config = read_config(os.path.join(HERE, "config.yml"))["worker"]
+ worker = AWSWorker(worker_config, submission="starting_kit_local")
worker.setup()
- assert worker.status == 'setup'
+ assert worker.status == "setup"
worker.launch_submission()
- assert worker.status in ('running', 'finished')
+ assert worker.status in ("running", "finished")
worker.collect_results()
- assert worker.status == 'collected'
- assert os.path.isdir(os.path.join(
- ramp_kit_dir, 'predictions', 'starting_kit_local', 'fold_0'))
- assert os.path.isfile(os.path.join(
- ramp_kit_dir, 'logs', 'starting_kit_local', 'log'))
+ assert worker.status == "collected"
+ assert os.path.isdir(
+ os.path.join(ramp_kit_dir, "predictions", "starting_kit_local", "fold_0")
+ )
+ assert os.path.isfile(
+ os.path.join(ramp_kit_dir, "logs", "starting_kit_local", "log")
+ )
worker.teardown()
- assert worker.status == 'killed'
+ assert worker.status == "killed"
def test_aws_dispatcher(session_toy): # noqa
# copy of test_integration_dispatcher but with AWS
- if not os.path.isfile(os.path.join(HERE, 'config.yml')):
+ if not os.path.isfile(os.path.join(HERE, "config.yml")):
pytest.skip("Only for local tests for now")
config = read_config(database_config_template())
@@ -378,17 +379,20 @@ def test_aws_dispatcher(session_toy): # noqa
event_config = read_config(event_config)
# patch the event_config to match local config.yml for AWS
- aws_event_config = read_config(os.path.join(HERE, 'config.yml'))
- event_config['worker'] = aws_event_config['worker']
+ aws_event_config = read_config(os.path.join(HERE, "config.yml"))
+ event_config["worker"] = aws_event_config["worker"]
dispatcher = Dispatcher(
- config=config, event_config=event_config, worker=AWSWorker,
- n_workers=-1, hunger_policy='exit'
+ config=config,
+ event_config=event_config,
+ worker=AWSWorker,
+ n_workers=-1,
+ hunger_policy="exit",
)
dispatcher.launch()
# the iris kit contain a submission which should fail for each user
submission = get_submissions(
- session_toy, event_config['ramp']['event_name'], 'training_error'
+ session_toy, event_config["ramp"]["event_name"], "training_error"
)
assert len(submission) == 2
diff --git a/ramp-engine/ramp_engine/tests/test_cli.py b/ramp-engine/ramp_engine/tests/test_cli.py
index dfe7dd3f9..58d9e1ceb 100644
--- a/ramp-engine/ramp_engine/tests/test_cli.py
+++ b/ramp-engine/ramp_engine/tests/test_cli.py
@@ -24,32 +24,36 @@ def make_toy_db(database_connection):
yield
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
-@pytest.mark.parametrize(
- "verbose_params", [None, "--verbose", "-vv"]
-)
+@pytest.mark.parametrize("verbose_params", [None, "--verbose", "-vv"])
def test_dispatcher(verbose_params, make_toy_db):
runner = CliRunner()
- cmd = ["dispatcher",
- "--config", database_config_template(),
- "--event-config", ramp_config_template()]
+ cmd = [
+ "dispatcher",
+ "--config",
+ database_config_template(),
+ "--event-config",
+ ramp_config_template(),
+ ]
if verbose_params is not None:
cmd += [verbose_params]
result = runner.invoke(main, cmd)
assert result.exit_code == 0, result.output
-@pytest.mark.parametrize(
- "verbose_params", [None, "--verbose", "-vv"]
-)
+@pytest.mark.parametrize("verbose_params", [None, "--verbose", "-vv"])
def test_worker(verbose_params, make_toy_db):
runner = CliRunner()
- cmd = ["worker",
- "--event-config", ramp_config_template(),
- "--submission", "starting_kit"]
+ cmd = [
+ "worker",
+ "--event-config",
+ ramp_config_template(),
+ "--submission",
+ "starting_kit",
+ ]
if verbose_params is not None:
cmd += [verbose_params]
result = runner.invoke(main, cmd)
diff --git a/ramp-engine/ramp_engine/tests/test_conda_worker.py b/ramp-engine/ramp_engine/tests/test_conda_worker.py
index d50977670..6d55787aa 100644
--- a/ramp-engine/ramp_engine/tests/test_conda_worker.py
+++ b/ramp-engine/ramp_engine/tests/test_conda_worker.py
@@ -17,8 +17,8 @@ def _is_conda_env_installed():
# is available. Otherwise skip all tests.
try:
conda_info = _conda_info_envs()
- envs_path = conda_info['envs'][1:]
- if not envs_path or not any(['ramp-iris' in env for env in envs_path]):
+ envs_path = conda_info["envs"][1:]
+ if not envs_path or not any(["ramp-iris" in env for env in envs_path]):
return True
return False
except: # noqa
@@ -28,56 +28,66 @@ def _is_conda_env_installed():
pytestmark = pytest.mark.skipif(
_is_conda_env_installed(),
- reason=('CondaEnvWorker required conda and an environment named '
- '"ramp-iris". No such environment was found. Check the '
- '"ci_tools/environment_iris_kit.yml" file to create such '
- 'environment.')
+ reason=(
+ "CondaEnvWorker required conda and an environment named "
+ '"ramp-iris". No such environment was found. Check the '
+ '"ci_tools/environment_iris_kit.yml" file to create such '
+ "environment."
+ ),
)
@contextmanager
-def get_conda_worker(submission_name, Worker=CondaEnvWorker,
- conda_env='ramp-iris', dask_scheduler=None):
+def get_conda_worker(
+ submission_name,
+ Worker=CondaEnvWorker,
+ conda_env="ramp-iris",
+ dask_scheduler=None,
+):
module_path = os.path.dirname(__file__)
- config = {'kit_dir': os.path.join(module_path, 'kits', 'iris'),
- 'data_dir': os.path.join(module_path, 'kits', 'iris'),
- 'submissions_dir': os.path.join(module_path, 'kits',
- 'iris', 'submissions'),
- 'logs_dir': os.path.join(module_path, 'kits', 'iris', 'log'),
- 'predictions_dir': os.path.join(
- module_path, 'kits', 'iris', 'predictions'),
- 'conda_env': conda_env}
+ config = {
+ "kit_dir": os.path.join(module_path, "kits", "iris"),
+ "data_dir": os.path.join(module_path, "kits", "iris"),
+ "submissions_dir": os.path.join(module_path, "kits", "iris", "submissions"),
+ "logs_dir": os.path.join(module_path, "kits", "iris", "log"),
+ "predictions_dir": os.path.join(module_path, "kits", "iris", "predictions"),
+ "conda_env": conda_env,
+ }
if issubclass(Worker, DaskWorker):
- pytest.importorskip('dask')
- pytest.importorskip('dask.distributed')
- config['dask_scheduler'] = dask_scheduler
+ pytest.importorskip("dask")
+ pytest.importorskip("dask.distributed")
+ config["dask_scheduler"] = dask_scheduler
- worker = Worker(config=config, submission='starting_kit')
+ worker = Worker(config=config, submission="starting_kit")
yield worker
# remove all directories that we potentially created
_remove_directory(worker)
- if issubclass(Worker, DaskWorker) and hasattr(worker, '_client'):
+ if issubclass(Worker, DaskWorker) and hasattr(worker, "_client"):
worker._client.close()
def _remove_directory(worker):
- if 'kit_dir' not in worker.config:
+ if "kit_dir" not in worker.config:
return
output_training_dir = os.path.join(
- worker.config['kit_dir'], 'submissions', worker.submission,
- 'training_output'
+ worker.config["kit_dir"],
+ "submissions",
+ worker.submission,
+ "training_output",
)
- for directory in (output_training_dir,
- worker.config['logs_dir'],
- worker.config['predictions_dir']):
+ for directory in (
+ output_training_dir,
+ worker.config["logs_dir"],
+ worker.config["predictions_dir"],
+ ):
if os.path.exists(directory):
shutil.rmtree(directory)
-@pytest.mark.parametrize("submission", ('starting_kit', 'random_forest_10_10'))
+@pytest.mark.parametrize("submission", ("starting_kit", "random_forest_10_10"))
@pytest.mark.parametrize("Worker", ALL_WORKERS)
def test_conda_worker_launch(submission, Worker, dask_scheduler):
with get_conda_worker(
@@ -85,60 +95,66 @@ def test_conda_worker_launch(submission, Worker, dask_scheduler):
) as worker:
worker.launch()
# check that teardown removed the predictions
- output_training_dir = os.path.join(worker.config['kit_dir'],
- 'submissions',
- worker.submission,
- 'training_output')
- assert not os.path.exists(output_training_dir), \
- "teardown() failed to remove the predictions"
-
-
-@pytest.mark.parametrize("submission", ('starting_kit', 'random_forest_10_10'))
+ output_training_dir = os.path.join(
+ worker.config["kit_dir"],
+ "submissions",
+ worker.submission,
+ "training_output",
+ )
+ assert not os.path.exists(
+ output_training_dir
+ ), "teardown() failed to remove the predictions"
+
+
+@pytest.mark.parametrize("submission", ("starting_kit", "random_forest_10_10"))
@pytest.mark.parametrize("Worker", ALL_WORKERS)
def test_conda_worker(submission, Worker, dask_scheduler):
with get_conda_worker(
submission, Worker=Worker, dask_scheduler=dask_scheduler
) as worker:
- assert worker.status == 'initialized'
+ assert worker.status == "initialized"
worker.setup()
- assert worker.status == 'setup'
+ assert worker.status == "setup"
worker.launch_submission()
- assert worker.status == 'running'
+ assert worker.status == "running"
exit_status, _ = worker.collect_results()
assert exit_status == 0
- assert worker.status == 'collected'
+ assert worker.status == "collected"
worker.teardown()
# check that teardown removed the predictions
- output_training_dir = os.path.join(worker.config['kit_dir'],
- 'submissions',
- worker.submission,
- 'training_output')
- assert not os.path.exists(output_training_dir), \
- "teardown() failed to remove the predictions"
+ output_training_dir = os.path.join(
+ worker.config["kit_dir"],
+ "submissions",
+ worker.submission,
+ "training_output",
+ )
+ assert not os.path.exists(
+ output_training_dir
+ ), "teardown() failed to remove the predictions"
@pytest.mark.parametrize("Worker", ALL_WORKERS)
def test_conda_worker_without_conda_env_specified(Worker, dask_scheduler):
with get_conda_worker(
- 'starting_kit', Worker=Worker, dask_scheduler=dask_scheduler
+ "starting_kit", Worker=Worker, dask_scheduler=dask_scheduler
) as worker:
# remove the conda_env parameter from the configuration
- del worker.config['conda_env']
+ del worker.config["conda_env"]
# if the conda environment is not given in the configuration, we should
# fall back on the base environment of conda
# the conda environment is set during setup; thus no need to launch
# submission
worker.setup()
- assert 'envs' not in worker._python_bin_path
+ assert "envs" not in worker._python_bin_path
@pytest.mark.parametrize("Worker", ALL_WORKERS)
def test_conda_worker_error_missing_config_param(Worker, dask_scheduler):
with get_conda_worker(
- 'starting_kit', Worker=Worker, dask_scheduler=dask_scheduler
+ "starting_kit", Worker=Worker, dask_scheduler=dask_scheduler
) as worker:
# we remove one of the required parameter
- del worker.config['kit_dir']
+ del worker.config["kit_dir"]
err_msg = "The worker required the parameter 'kit_dir'"
with pytest.raises(ValueError, match=err_msg):
@@ -147,20 +163,22 @@ def test_conda_worker_error_missing_config_param(Worker, dask_scheduler):
@pytest.mark.parametrize("Worker", ALL_WORKERS)
def test_conda_worker_error_unknown_env(Worker, dask_scheduler):
- conda_env = 'xxx'
+ conda_env = "xxx"
with get_conda_worker(
- 'starting_kit', conda_env=conda_env, Worker=Worker,
- dask_scheduler=dask_scheduler
+ "starting_kit",
+ conda_env=conda_env,
+ Worker=Worker,
+ dask_scheduler=dask_scheduler,
) as worker:
msg_err = f"specified conda environment {conda_env} does not exist."
with pytest.raises(ValueError, match=msg_err):
worker.setup()
- assert worker.status == 'error'
+ assert worker.status == "error"
@pytest.mark.parametrize("Worker", ALL_WORKERS)
def test_conda_worker_error_multiple_launching(Worker, dask_scheduler):
- submission = 'starting_kit'
+ submission = "starting_kit"
with get_conda_worker(
submission, Worker=Worker, dask_scheduler=dask_scheduler
) as worker:
@@ -177,10 +195,10 @@ def test_conda_worker_error_multiple_launching(Worker, dask_scheduler):
@pytest.mark.parametrize("Worker", ALL_WORKERS)
def test_conda_worker_error_soon_teardown(Worker, dask_scheduler):
with get_conda_worker(
- 'starting_kit', Worker=Worker, dask_scheduler=dask_scheduler
+ "starting_kit", Worker=Worker, dask_scheduler=dask_scheduler
) as worker:
worker.setup()
- err_msg = 'Collect the results before to kill the worker.'
+ err_msg = "Collect the results before to kill the worker."
with pytest.raises(ValueError, match=err_msg):
worker.teardown()
@@ -188,7 +206,7 @@ def test_conda_worker_error_soon_teardown(Worker, dask_scheduler):
@pytest.mark.parametrize("Worker", ALL_WORKERS)
def test_conda_worker_error_soon_collection(Worker, dask_scheduler):
with get_conda_worker(
- 'starting_kit', Worker=Worker, dask_scheduler=dask_scheduler
+ "starting_kit", Worker=Worker, dask_scheduler=dask_scheduler
) as worker:
err_msg = r"Call the method setup\(\) and launch_submission\(\) before"
with pytest.raises(ValueError, match=err_msg):
@@ -202,27 +220,30 @@ def test_conda_worker_error_soon_collection(Worker, dask_scheduler):
@pytest.mark.parametrize("Worker", ALL_WORKERS)
def test_conda_worker_timeout(Worker, dask_scheduler):
with get_conda_worker(
- 'random_forest_10_10', Worker=Worker, dask_scheduler=dask_scheduler
+ "random_forest_10_10", Worker=Worker, dask_scheduler=dask_scheduler
) as worker:
- worker.config['timeout'] = 1
+ worker.config["timeout"] = 1
- assert worker.status == 'initialized'
+ assert worker.status == "initialized"
worker.setup()
- assert worker.status == 'setup'
+ assert worker.status == "setup"
worker.launch_submission()
assert not worker.check_timeout()
- assert worker.status == 'running'
+ assert worker.status == "running"
sleep(2)
assert worker.check_timeout() is True
- assert worker.status == 'timeout'
+ assert worker.status == "timeout"
exit_status, _ = worker.collect_results()
assert exit_status > 0
- assert worker.status == 'collected'
+ assert worker.status == "collected"
worker.teardown()
# check that teardown removed the predictions
- output_training_dir = os.path.join(worker.config['kit_dir'],
- 'submissions',
- worker.submission,
- 'training_output')
- assert not os.path.exists(output_training_dir), \
- "teardown() failed to remove the predictions"
+ output_training_dir = os.path.join(
+ worker.config["kit_dir"],
+ "submissions",
+ worker.submission,
+ "training_output",
+ )
+ assert not os.path.exists(
+ output_training_dir
+ ), "teardown() failed to remove the predictions"
diff --git a/ramp-engine/ramp_engine/tests/test_daemon.py b/ramp-engine/ramp_engine/tests/test_daemon.py
index 9cabba096..6968d4c45 100644
--- a/ramp-engine/ramp_engine/tests/test_daemon.py
+++ b/ramp-engine/ramp_engine/tests/test_daemon.py
@@ -23,17 +23,17 @@ def session_toy(database_connection):
ramp_config = ramp_config_template()
try:
deployment_dir = create_toy_db(database_config, ramp_config)
- with session_scope(database_config['sqlalchemy']) as session:
+ with session_scope(database_config["sqlalchemy"]) as session:
yield session
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
def test_daemon_error_init():
with pytest.raises(ValueError, match="The path xxx is not existing"):
- Daemon(config=database_config_template(), events_dir='xxx')
+ Daemon(config=database_config_template(), events_dir="xxx")
def test_daemon(session_toy):
@@ -44,7 +44,7 @@ def test_daemon(session_toy):
event.closing_timestamp = datetime.datetime.utcnow()
session_toy.commit()
- events_dir = os.path.join(os.path.dirname(__file__), 'events')
+ events_dir = os.path.join(os.path.dirname(__file__), "events")
daemon = Daemon(config=database_config_template(), events_dir=events_dir)
try:
diff --git a/ramp-engine/ramp_engine/tests/test_dispatcher.py b/ramp-engine/ramp_engine/tests/test_dispatcher.py
index aadff2fbf..69e5e07b0 100644
--- a/ramp-engine/ramp_engine/tests/test_dispatcher.py
+++ b/ramp-engine/ramp_engine/tests/test_dispatcher.py
@@ -33,11 +33,11 @@ def session_toy(database_connection):
ramp_config = ramp_config_template()
try:
deployment_dir = create_toy_db(database_config, ramp_config)
- with session_scope(database_config['sqlalchemy']) as session:
+ with session_scope(database_config["sqlalchemy"]) as session:
yield session
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
@@ -47,10 +47,10 @@ def session_toy_aws(database_connection):
ramp_config_aws = ramp_aws_config_template()
try:
deployment_dir = create_toy_db(database_config, ramp_config_aws)
- with session_scope(database_config['sqlalchemy']) as session:
+ with session_scope(database_config["sqlalchemy"]) as session:
yield session
finally:
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
shutil.rmtree(deployment_dir, ignore_errors=True)
@@ -58,12 +58,12 @@ def session_toy_aws(database_connection):
def test_error_handling_worker_setup_error(session_toy, caplog):
# make sure the error on the worker.setup is dealt with correctly
# set mock worker
- class Worker_mock():
+ class Worker_mock:
def __init__(self, *args, **kwargs):
self.state = None
def setup(self):
- raise Exception('Test error')
+ raise Exception("Test error")
def teardown(self):
pass
@@ -73,58 +73,67 @@ def teardown(self):
worker = Worker_mock()
dispatcher = Dispatcher(
- config=config, event_config=event_config, worker=Worker_mock,
- n_workers=-1, hunger_policy='exit'
+ config=config,
+ event_config=event_config,
+ worker=Worker_mock,
+ n_workers=-1,
+ hunger_policy="exit",
)
dispatcher.launch()
submissions = get_submissions(
- session_toy, event_config['ramp']['event_name'], 'checking_error'
+ session_toy, event_config["ramp"]["event_name"], "checking_error"
)
assert len(submissions) == 6
- worker.status = 'error'
- assert 'Test error' in caplog.text
+ worker.status = "error"
+ assert "Test error" in caplog.text
def _update_worker_config(event_config, Worker, dask_scheduler):
if issubclass(Worker, DaskWorker):
- pytest.importorskip('dask')
- pytest.importorskip('dask.distributed')
- event_config['worker']['dask_scheduler'] = dask_scheduler
- event_config['worker']['worker_type'] = 'dask'
+ pytest.importorskip("dask")
+ pytest.importorskip("dask.distributed")
+ event_config["worker"]["dask_scheduler"] = dask_scheduler
+ event_config["worker"]["worker_type"] = "dask"
-@pytest.mark.parametrize('Worker', ALL_WORKERS)
+@pytest.mark.parametrize("Worker", ALL_WORKERS)
def test_integration_dispatcher(session_toy, Worker, dask_scheduler):
config = read_config(database_config_template())
event_config = read_config(ramp_config_template())
_update_worker_config(event_config, Worker, dask_scheduler)
dispatcher = Dispatcher(
- config=config, event_config=event_config, worker=Worker,
- n_workers=-1, hunger_policy='exit'
+ config=config,
+ event_config=event_config,
+ worker=Worker,
+ n_workers=-1,
+ hunger_policy="exit",
)
dispatcher.launch()
# the iris kit contain a submission which should fail for each user
submissions = get_submissions(
- session_toy, event_config['ramp']['event_name'], 'training_error'
+ session_toy, event_config["ramp"]["event_name"], "training_error"
)
assert len(submissions) == 2
submission = get_submission_by_id(session_toy, submissions[0][0])
- assert 'ValueError' in submission.error_msg
+ assert "ValueError" in submission.error_msg
-@pytest.mark.parametrize('Worker', ALL_WORKERS)
+@pytest.mark.parametrize("Worker", ALL_WORKERS)
def test_unit_test_dispatcher(session_toy, Worker, dask_scheduler):
# make sure that the size of the list is bigger than the number of
# submissions
config = read_config(database_config_template())
event_config = read_config(ramp_config_template())
_update_worker_config(event_config, Worker, dask_scheduler)
- dispatcher = Dispatcher(config=config,
- event_config=event_config,
- worker=Worker, n_workers=100,
- hunger_policy='exit')
+ dispatcher = Dispatcher(
+ config=config,
+ event_config=event_config,
+ worker=Worker,
+ n_workers=100,
+ hunger_policy="exit",
+ )
# check that all the queue are empty
assert dispatcher._awaiting_worker_queue.empty()
@@ -132,12 +141,12 @@ def test_unit_test_dispatcher(session_toy, Worker, dask_scheduler):
assert dispatcher._processed_submission_queue.empty()
# check that all submissions are queued
- submissions = get_submissions(session_toy, 'iris_test', 'new')
+ submissions = get_submissions(session_toy, "iris_test", "new")
dispatcher.fetch_from_db(session_toy)
# we should remove the starting kit from the length of the submissions for
# each user
assert dispatcher._awaiting_worker_queue.qsize() == len(submissions) - 2
- submissions = get_submissions(session_toy, 'iris_test', 'sent_to_training')
+ submissions = get_submissions(session_toy, "iris_test", "sent_to_training")
assert len(submissions) == 6
# start the training
@@ -146,14 +155,13 @@ def test_unit_test_dispatcher(session_toy, Worker, dask_scheduler):
while not dispatcher._processing_worker_queue.empty():
dispatcher.collect_result(session_toy)
- assert len(get_submissions(session_toy, 'iris_test', 'new')) == 2
- assert (len(get_submissions(session_toy, 'iris_test', 'training_error')) ==
- 2)
- assert len(get_submissions(session_toy, 'iris_test', 'tested')) == 4
+ assert len(get_submissions(session_toy, "iris_test", "new")) == 2
+ assert len(get_submissions(session_toy, "iris_test", "training_error")) == 2
+ assert len(get_submissions(session_toy, "iris_test", "tested")) == 4
dispatcher.update_database_results(session_toy)
assert dispatcher._processed_submission_queue.empty()
- event = get_event(session_toy, 'iris_test')
+ event = get_event(session_toy, "iris_test")
assert event.private_leaderboard_html
assert event.public_leaderboard_html_with_links
assert event.public_leaderboard_html_no_links
@@ -163,22 +171,23 @@ def test_unit_test_dispatcher(session_toy, Worker, dask_scheduler):
assert event.private_competition_leaderboard_html
-@pytest.mark.parametrize(
- "n_threads", [None, 4]
-)
-@pytest.mark.parametrize('Worker', ALL_WORKERS)
+@pytest.mark.parametrize("n_threads", [None, 4])
+@pytest.mark.parametrize("Worker", ALL_WORKERS)
def test_dispatcher_num_threads(n_threads, Worker, dask_scheduler):
- libraries = ('OMP', 'MKL', 'OPENBLAS')
+ libraries = ("OMP", "MKL", "OPENBLAS")
config = read_config(database_config_template())
event_config = read_config(ramp_config_template())
_update_worker_config(event_config, Worker, dask_scheduler)
# check that by default we don't set the environment by default
- dispatcher = Dispatcher(config=config,
- event_config=event_config,
- worker=Worker, n_workers=100,
- n_threads=n_threads,
- hunger_policy='exit')
+ dispatcher = Dispatcher(
+ config=config,
+ event_config=event_config,
+ worker=Worker,
+ n_workers=100,
+ n_threads=n_threads,
+ hunger_policy="exit",
+ )
if n_threads is None:
assert dispatcher.n_threads is n_threads
for lib in libraries:
@@ -196,21 +205,27 @@ def test_dispatcher_error():
# check that passing a not a number will raise a TypeError
err_msg = "The parameter 'n_threads' should be a positive integer"
with pytest.raises(TypeError, match=err_msg):
- Dispatcher(config=config,
- event_config=event_config,
- worker=CondaEnvWorker, n_workers=100,
- n_threads='whatever',
- hunger_policy='exit')
+ Dispatcher(
+ config=config,
+ event_config=event_config,
+ worker=CondaEnvWorker,
+ n_workers=100,
+ n_threads="whatever",
+ hunger_policy="exit",
+ )
-@pytest.mark.parametrize('Worker', ALL_WORKERS)
+@pytest.mark.parametrize("Worker", ALL_WORKERS)
def test_dispatcher_timeout(session_toy, Worker, dask_scheduler):
config = read_config(database_config_template())
event_config = read_config(ramp_config_template())
_update_worker_config(event_config, Worker, dask_scheduler)
dispatcher = Dispatcher(
- config=config, event_config=event_config, worker=Worker,
- n_workers=-1, hunger_policy='exit'
+ config=config,
+ event_config=event_config,
+ worker=Worker,
+ n_workers=-1,
+ hunger_policy="exit",
)
# override the timeout of the worker
dispatcher._worker_config["timeout"] = 1
@@ -219,7 +234,7 @@ def test_dispatcher_timeout(session_toy, Worker, dask_scheduler):
# we should have at least 3 submissions which will fail:
# 2 for errors and 1 for timeout
submissions = get_submissions(
- session_toy, event_config['ramp']['event_name'], 'training_error'
+ session_toy, event_config["ramp"]["event_name"], "training_error"
)
assert len(submissions) >= 2
@@ -227,19 +242,24 @@ def test_dispatcher_timeout(session_toy, Worker, dask_scheduler):
def test_dispatcher_worker_retry(session_toy):
config = read_config(database_config_template())
event_config = read_config(ramp_config_template())
- dispatcher = Dispatcher(config=config,
- event_config=event_config,
- worker=CondaEnvWorker, n_workers=10,
- hunger_policy='exit')
+ dispatcher = Dispatcher(
+ config=config,
+ event_config=event_config,
+ worker=CondaEnvWorker,
+ n_workers=10,
+ hunger_policy="exit",
+ )
dispatcher.fetch_from_db(session_toy)
dispatcher.launch_workers(session_toy)
# Get one worker and set status to 'retry'
- worker, (submission_id, submission_name) = \
- dispatcher._processing_worker_queue.get()
- setattr(worker, 'status', 'retry')
- assert worker.status == 'retry'
+ worker, (
+ submission_id,
+ submission_name,
+ ) = dispatcher._processing_worker_queue.get()
+ setattr(worker, "status", "retry")
+ assert worker.status == "retry"
# Add back to queue
dispatcher._processing_worker_queue.put_nowait(
(worker, (submission_id, submission_name))
@@ -247,7 +267,7 @@ def test_dispatcher_worker_retry(session_toy):
while not dispatcher._processing_worker_queue.empty():
dispatcher.collect_result(session_toy)
- submissions = get_submissions(session_toy, 'iris_test', 'new')
+ submissions = get_submissions(session_toy, "iris_test", "new")
assert submission_name in [sub[1] for sub in submissions]
@@ -258,41 +278,49 @@ def test_dispatcher_aws_not_launching(session_toy_aws, caplog):
config = read_config(database_config_template())
event_config = read_config(ramp_aws_config_template())
- dispatcher = Dispatcher(config=config,
- event_config=event_config,
- worker=AWSWorker, n_workers=10,
- hunger_policy='exit')
+ dispatcher = Dispatcher(
+ config=config,
+ event_config=event_config,
+ worker=AWSWorker,
+ n_workers=10,
+ hunger_policy="exit",
+ )
dispatcher.fetch_from_db(session_toy_aws)
- submissions = get_submissions(session_toy_aws, 'iris_aws_test', 'new')
+ submissions = get_submissions(session_toy_aws, "iris_aws_test", "new")
dispatcher.launch_workers(session_toy_aws)
- assert 'AuthFailure' in caplog.text
+ assert "AuthFailure" in caplog.text
# training should not have started
- assert 'training' not in caplog.text
+ assert "training" not in caplog.text
num_running_workers = dispatcher._processing_worker_queue.qsize()
assert num_running_workers == 0
- submissions2 = get_submissions(session_toy_aws, 'iris_aws_test', 'new')
+ submissions2 = get_submissions(session_toy_aws, "iris_aws_test", "new")
# assert that all the submissions are still in the 'new' state
assert len(submissions) == len(submissions2)
-@mock.patch('ramp_engine.aws.api.download_log')
-@mock.patch('ramp_engine.aws.api.check_instance_status')
-@mock.patch('ramp_engine.aws.api._get_log_content')
-@mock.patch('ramp_engine.aws.api._training_successful')
-@mock.patch('ramp_engine.aws.api._training_finished')
-@mock.patch('ramp_engine.aws.api.is_spot_terminated')
-@mock.patch('ramp_engine.aws.api.launch_train')
-@mock.patch('ramp_engine.aws.api.upload_submission')
-@mock.patch('ramp_engine.aws.api.launch_ec2_instances')
-def test_info_on_training_error(test_launch_ec2_instances, upload_submission,
- launch_train,
- is_spot_terminated, training_finished,
- training_successful,
- get_log_content, check_instance_status,
- download_log,
- session_toy_aws,
- caplog):
+@mock.patch("ramp_engine.aws.api.download_log")
+@mock.patch("ramp_engine.aws.api.check_instance_status")
+@mock.patch("ramp_engine.aws.api._get_log_content")
+@mock.patch("ramp_engine.aws.api._training_successful")
+@mock.patch("ramp_engine.aws.api._training_finished")
+@mock.patch("ramp_engine.aws.api.is_spot_terminated")
+@mock.patch("ramp_engine.aws.api.launch_train")
+@mock.patch("ramp_engine.aws.api.upload_submission")
+@mock.patch("ramp_engine.aws.api.launch_ec2_instances")
+def test_info_on_training_error(
+ test_launch_ec2_instances,
+ upload_submission,
+ launch_train,
+ is_spot_terminated,
+ training_finished,
+ training_successful,
+ get_log_content,
+ check_instance_status,
+ download_log,
+ session_toy_aws,
+ caplog,
+):
# make sure that the Python error from the solution is passed to the
# dispatcher
# everything shoud be mocked as correct output from AWS instances
@@ -300,6 +328,7 @@ def test_info_on_training_error(test_launch_ec2_instances, upload_submission,
# mock dummy AWS instance
class DummyInstance:
id = 1
+
test_launch_ec2_instances.return_value = (DummyInstance(),), 0
upload_submission.return_value = 0
launch_train.return_value = 0
@@ -310,19 +339,20 @@ class DummyInstance:
config = read_config(database_config_template())
event_config = read_config(ramp_aws_config_template())
- dispatcher = Dispatcher(config=config,
- event_config=event_config,
- worker=AWSWorker, n_workers=10,
- hunger_policy='exit')
+ dispatcher = Dispatcher(
+ config=config,
+ event_config=event_config,
+ worker=AWSWorker,
+ n_workers=10,
+ hunger_policy="exit",
+ )
dispatcher.fetch_from_db(session_toy_aws)
dispatcher.launch_workers(session_toy_aws)
num_running_workers = dispatcher._processing_worker_queue.qsize()
# worker, (submission_id, submission_name) = \
# dispatcher._processing_worker_queue.get()
# assert worker.status == 'running'
- submissions = get_submissions(session_toy_aws,
- 'iris_aws_test',
- 'training')
+ submissions = get_submissions(session_toy_aws, "iris_aws_test", "training")
ids = [submissions[idx][0] for idx in range(len(submissions))]
assert len(submissions) > 1
assert num_running_workers == len(ids)
@@ -332,9 +362,9 @@ class DummyInstance:
# now we will end the submission with training error
training_finished.return_value = True
- training_error_msg = 'Python error here'
+ training_error_msg = "Python error here"
get_log_content.return_value = training_error_msg
- check_instance_status.return_value = 'finished'
+ check_instance_status.return_value = "finished"
dispatcher.collect_result(session_toy_aws)
@@ -343,9 +373,7 @@ class DummyInstance:
assert num_running_workers == 0
- submissions = get_submissions(session_toy_aws,
- 'iris_aws_test',
- 'training_error')
+ submissions = get_submissions(session_toy_aws, "iris_aws_test", "training_error")
assert len(submissions) == len(ids)
submission = get_submission_by_id(session_toy_aws, submissions[0][0])
diff --git a/ramp-engine/ramp_engine/tests/test_remote_worker.py b/ramp-engine/ramp_engine/tests/test_remote_worker.py
index 685c5ef00..3274f974a 100644
--- a/ramp-engine/ramp_engine/tests/test_remote_worker.py
+++ b/ramp-engine/ramp_engine/tests/test_remote_worker.py
@@ -8,48 +8,45 @@
def test_dask_workers_single_machine():
workers_nthreads = {
- 'tcp://127.0.0.1:38901': 4,
- 'tcp://127.0.0.1:43229': 4,
- 'tcp://127.0.0.1:46663': 4
+ "tcp://127.0.0.1:38901": 4,
+ "tcp://127.0.0.1:43229": 4,
+ "tcp://127.0.0.1:46663": 4,
}
assert _check_dask_workers_single_machine(workers_nthreads.keys()) is True
- workers_nthreads = {
- 'tcp://127.0.0.1:38901': 4,
- 'tcp://127.0.0.2:43229': 4
- }
+ workers_nthreads = {"tcp://127.0.0.1:38901": 4, "tcp://127.0.0.2:43229": 4}
- msg = 'dask workers should .* on 1 machine, found 2:'
+ msg = "dask workers should .* on 1 machine, found 2:"
with pytest.raises(ValueError, match=msg):
_check_dask_workers_single_machine(workers_nthreads.keys())
- msg = 'dask workers should .* on 1 machine, found 0:'
+ msg = "dask workers should .* on 1 machine, found 0:"
with pytest.raises(ValueError, match=msg):
- _check_dask_workers_single_machine('some invalid string')
+ _check_dask_workers_single_machine("some invalid string")
def test_serialize_deserialize_folder(tmpdir):
with pytest.raises(FileNotFoundError):
- _serialize_folder('some_invalid_path')
+ _serialize_folder("some_invalid_path")
base_dir = Path(tmpdir)
- src_dir = base_dir / 'src'
+ src_dir = base_dir / "src"
src_dir.mkdir()
- with open(src_dir / '1.txt', 'wt') as fh:
- fh.write('a')
- (src_dir / 'dir2').mkdir()
+ with open(src_dir / "1.txt", "wt") as fh:
+ fh.write("a")
+ (src_dir / "dir2").mkdir()
stream = _serialize_folder(src_dir)
assert isinstance(stream, bytes)
- dest_dir = base_dir / 'dest'
+ dest_dir = base_dir / "dest"
_deserialize_folder(stream, dest_dir)
assert sorted(os.listdir(src_dir)) == sorted(os.listdir(dest_dir))
# create some other file objects in the destination dir and try
# to deserialize a second time
- (dest_dir / 'dir3').mkdir()
+ (dest_dir / "dir3").mkdir()
_deserialize_folder(stream, dest_dir)
assert sorted(os.listdir(src_dir)) == sorted(os.listdir(dest_dir))
diff --git a/ramp-engine/setup.py b/ramp-engine/setup.py
index 5574c2549..3d3edc8c2 100644
--- a/ramp-engine/setup.py
+++ b/ramp-engine/setup.py
@@ -5,42 +5,44 @@
from setuptools import find_packages, setup
# get __version__ from _version.py
-ver_file = os.path.join('ramp_engine', '_version.py')
+ver_file = os.path.join("ramp_engine", "_version.py")
with open(ver_file) as f:
exec(f.read())
-DISTNAME = 'ramp-engine'
+DISTNAME = "ramp-engine"
DESCRIPTION = "Submissions orchestrator and processors for the RAMP bundle"
-with codecs.open('README.rst', encoding='utf-8-sig') as f:
+with codecs.open("README.rst", encoding="utf-8-sig") as f:
LONG_DESCRIPTION = f.read()
-MAINTAINER = 'A. Boucaud, B. Kegl, G. Lemaitre, J. Van den Bossche'
-MAINTAINER_EMAIL = 'boucaud.alexandre@gmail.com, guillaume.lemaitre@inria.fr'
-URL = 'https://github.com/paris-saclay-cds/ramp-board'
-LICENSE = 'BSD (3-clause)'
-DOWNLOAD_URL = 'https://github.com/paris-saclay-cds/ramp-board'
+MAINTAINER = "A. Boucaud, B. Kegl, G. Lemaitre, J. Van den Bossche"
+MAINTAINER_EMAIL = "boucaud.alexandre@gmail.com, guillaume.lemaitre@inria.fr"
+URL = "https://github.com/paris-saclay-cds/ramp-board"
+LICENSE = "BSD (3-clause)"
+DOWNLOAD_URL = "https://github.com/paris-saclay-cds/ramp-board"
VERSION = __version__ # noqa
-CLASSIFIERS = ['Intended Audience :: Science/Research',
- 'Intended Audience :: Developers',
- 'License :: OSI Approved',
- 'Programming Language :: Python',
- 'Topic :: Software Development',
- 'Topic :: Scientific/Engineering',
- 'Operating System :: Microsoft :: Windows',
- 'Operating System :: POSIX',
- 'Operating System :: Unix',
- 'Operating System :: MacOS',
- 'Programming Language :: Python :: 3.6',
- 'Programming Language :: Python :: 3.7',
- 'Programming Language :: Python :: 3.8']
-INSTALL_REQUIRES = ['click', 'numpy', 'psycopg2-binary', 'sqlalchemy']
+CLASSIFIERS = [
+ "Intended Audience :: Science/Research",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved",
+ "Programming Language :: Python",
+ "Topic :: Software Development",
+ "Topic :: Scientific/Engineering",
+ "Operating System :: Microsoft :: Windows",
+ "Operating System :: POSIX",
+ "Operating System :: Unix",
+ "Operating System :: MacOS",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+]
+INSTALL_REQUIRES = ["click", "numpy", "psycopg2-binary", "sqlalchemy"]
EXTRAS_REQUIRE = {
- 'tests': ['pytest', 'pytest-cov'],
- 'docs': ['sphinx', 'sphinx_rtd_theme', 'numpydoc']
+ "tests": ["pytest", "pytest-cov"],
+ "docs": ["sphinx", "sphinx_rtd_theme", "numpydoc"],
}
PACKAGE_DATA = {
- 'ramp_engine': [
- os.path.join('tests', 'events', 'iris_test', 'config.yml'),
- os.path.join('tests', 'events', 'boston_housing', 'config.yml')
+ "ramp_engine": [
+ os.path.join("tests", "events", "iris_test", "config.yml"),
+ os.path.join("tests", "events", "boston_housing", "config.yml"),
]
}
@@ -61,7 +63,5 @@
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
python_requires=">=3.7",
- entry_points={
- 'console_scripts': ['ramp-launch = ramp_engine.cli:start']
- }
+ entry_points={"console_scripts": ["ramp-launch = ramp_engine.cli:start"]},
)
diff --git a/ramp-frontend/ramp_frontend/__init__.py b/ramp-frontend/ramp_frontend/__init__.py
index e28f3e80b..51de4bd70 100644
--- a/ramp-frontend/ramp_frontend/__init__.py
+++ b/ramp-frontend/ramp_frontend/__init__.py
@@ -10,9 +10,7 @@
from ._version import __version__ # noqa
-all = [
- '__version__'
-]
+all = ["__version__"]
HERE = os.path.dirname(__file__)
db = SQLAlchemy(model_class=Model)
@@ -39,32 +37,34 @@ def create_app(config):
logger_config = config.pop("LOGGER")
dictConfig(logger_config)
else:
- dictConfig({
- 'version': 1,
- 'formatters': {'default': {
- 'format': '[%(asctime)s] [%(levelname)s] %(message)s',
- }},
- 'handlers': {'wsgi': {
- 'class': 'logging.StreamHandler',
- 'stream': 'ext://flask.logging.wsgi_errors_stream',
- 'formatter': 'default'
- }},
- 'root': {
- 'level': 'INFO',
- 'handlers': ['wsgi']
+ dictConfig(
+ {
+ "version": 1,
+ "formatters": {
+ "default": {
+ "format": "[%(asctime)s] [%(levelname)s] %(message)s",
+ }
+ },
+ "handlers": {
+ "wsgi": {
+ "class": "logging.StreamHandler",
+ "stream": "ext://flask.logging.wsgi_errors_stream",
+ "formatter": "default",
+ }
+ },
+ "root": {"level": "INFO", "handlers": ["wsgi"]},
}
- })
+ )
- app = Flask('ramp-frontend', root_path=HERE)
+ app = Flask("ramp-frontend", root_path=HERE)
app.config.update(config)
with app.app_context():
db.init_app(app)
# register the login manager
login_manager.init_app(app)
- login_manager.login_view = 'auth.login'
- login_manager.login_message = ('Please log in or sign up to access '
- 'this page.')
+ login_manager.login_view = "auth.login"
+ login_manager.login_message = "Please log in or sign up to access " "this page."
# register the email manager
mail.init_app(app)
# register our blueprint
@@ -73,6 +73,7 @@ def create_app(config):
from .views import general
from .views import leaderboard
from .views import ramp
+
app.register_blueprint(admin.mod)
app.register_blueprint(auth.mod)
app.register_blueprint(general.mod)
diff --git a/ramp-frontend/ramp_frontend/_version.py b/ramp-frontend/ramp_frontend/_version.py
index 726e34836..58d460c8c 100644
--- a/ramp-frontend/ramp_frontend/_version.py
+++ b/ramp-frontend/ramp_frontend/_version.py
@@ -21,4 +21,4 @@
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
-__version__ = '0.9.0.dev0'
+__version__ = "0.9.0.dev0"
diff --git a/ramp-frontend/ramp_frontend/cli.py b/ramp-frontend/ramp_frontend/cli.py
index 56df6af98..478e78f75 100644
--- a/ramp-frontend/ramp_frontend/cli.py
+++ b/ramp-frontend/ramp_frontend/cli.py
@@ -2,7 +2,7 @@
from .wsgi import make_app
-CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
+CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.group(context_settings=CONTEXT_SETTINGS)
@@ -11,21 +11,32 @@ def main():
@main.command()
-@click.option("--config", default='config.yml', show_default=True,
- help='Configuration file in YAML format')
-@click.option("--port", default=8080, show_default=True,
- help='The port where to launch the website')
-@click.option("--host", default='127.0.0.1', show_default=True,
- help='The IP address where to launch the website')
+@click.option(
+ "--config",
+ default="config.yml",
+ show_default=True,
+ help="Configuration file in YAML format",
+)
+@click.option(
+ "--port",
+ default=8080,
+ show_default=True,
+ help="The port where to launch the website",
+)
+@click.option(
+ "--host",
+ default="127.0.0.1",
+ show_default=True,
+ help="The IP address where to launch the website",
+)
def test_launch(config, port, host):
app = make_app(config)
- app.run(port=port, use_reloader=False,
- host=host, processes=1, threaded=False)
+ app.run(port=port, use_reloader=False, host=host, processes=1, threaded=False)
def start():
main()
-if __name__ == '__main__':
+if __name__ == "__main__":
start()
diff --git a/ramp-frontend/ramp_frontend/forms.py b/ramp-frontend/ramp_frontend/forms.py
index 4aee48390..245db7873 100644
--- a/ramp-frontend/ramp_frontend/forms.py
+++ b/ramp-frontend/ramp_frontend/forms.py
@@ -21,15 +21,15 @@
def _space_check(form, field):
- if ' ' in field.data:
- raise ValidationError('Field cannot contain space.')
+ if " " in field.data:
+ raise ValidationError("Field cannot contain space.")
def _ascii_check(form, field):
try:
- field.data.encode('ascii')
+ field.data.encode("ascii")
except Exception:
- raise ValidationError('Field cannot contain non-ascii characters.')
+ raise ValidationError("Field cannot contain non-ascii characters.")
class LoginForm(FlaskForm):
@@ -42,8 +42,9 @@ class LoginForm(FlaskForm):
password : str
The user password.
"""
- user_name = StringField('user_name', [validators.DataRequired()])
- password = PasswordField('password', [validators.DataRequired()])
+
+ user_name = StringField("user_name", [validators.DataRequired()])
+ password = PasswordField("password", [validators.DataRequired()])
class UserUpdateProfileForm(FlaskForm):
@@ -76,19 +77,25 @@ class UserUpdateProfileForm(FlaskForm):
is_want_news : bool, default is True
Whether the user want some info from us.
"""
- user_name = StringField('user_name', [
- validators.DataRequired(), validators.Length(min=1, max=20),
- _space_check])
- firstname = StringField('firstname', [validators.DataRequired()])
- lastname = StringField('lastname', [validators.DataRequired()])
- email = StringField('email', [validators.DataRequired()])
- linkedin_url = StringField('linkedin_url')
- twitter_url = StringField('twitter_url')
- facebook_url = StringField('facebook_url')
- google_url = StringField('google_url')
- github_url = StringField('github_url')
- website_url = StringField('website_url')
- bio = StringField('bio')
+
+ user_name = StringField(
+ "user_name",
+ [
+ validators.DataRequired(),
+ validators.Length(min=1, max=20),
+ _space_check,
+ ],
+ )
+ firstname = StringField("firstname", [validators.DataRequired()])
+ lastname = StringField("lastname", [validators.DataRequired()])
+ email = StringField("email", [validators.DataRequired()])
+ linkedin_url = StringField("linkedin_url")
+ twitter_url = StringField("twitter_url")
+ facebook_url = StringField("facebook_url")
+ google_url = StringField("google_url")
+ github_url = StringField("github_url")
+ website_url = StringField("website_url")
+ bio = StringField("bio")
is_want_news = BooleanField()
@@ -124,7 +131,8 @@ class UserCreateProfileForm(UserUpdateProfileForm):
is_want_news : bool, default is True
Whether the user want some info from us.
"""
- password = PasswordField('password', [validators.DataRequired()])
+
+ password = PasswordField("password", [validators.DataRequired()])
class CodeForm(FlaskForm):
@@ -138,6 +146,7 @@ class CodeForm(FlaskForm):
The place holder containing the name of the submission file and the
code associated.
"""
+
names_codes: List[Tuple[str, int]] = []
@@ -152,8 +161,10 @@ class SubmitForm(FlaskForm):
submission_name : str
The name of the submission.
"""
- submission_name = StringField('submission_name',
- [validators.DataRequired(), _space_check])
+
+ submission_name = StringField(
+ "submission_name", [validators.DataRequired(), _space_check]
+ )
class UploadForm(FlaskForm):
@@ -167,7 +178,8 @@ class UploadForm(FlaskForm):
file : file
File to be uploaded and loaded into the sandbox code form.
"""
- file = FileField('file')
+
+ file = FileField("file")
class EventUpdateProfileForm(FlaskForm):
@@ -198,8 +210,9 @@ class EventUpdateProfileForm(FlaskForm):
public_opening_timestamp : datetime
The date and time when the public phase of the event is opening.
"""
+
title = StringField(
- 'event_title', [validators.DataRequired(), validators.Length(max=80)]
+ "event_title", [validators.DataRequired(), validators.Length(max=80)]
)
is_send_trained_mails = BooleanField()
is_send_submitted_mails = BooleanField()
@@ -207,34 +220,36 @@ class EventUpdateProfileForm(FlaskForm):
is_controled_signup = BooleanField()
is_competitive = BooleanField()
min_duration_between_submissions_hour = IntegerField(
- 'min_h', [validators.NumberRange(min=0)]
+ "min_h", [validators.NumberRange(min=0)]
)
min_duration_between_submissions_minute = IntegerField(
- 'min_m', [validators.NumberRange(min=0, max=59)]
+ "min_m", [validators.NumberRange(min=0, max=59)]
)
min_duration_between_submissions_second = IntegerField(
- 'min_s', [validators.NumberRange(min=0, max=59)]
+ "min_s", [validators.NumberRange(min=0, max=59)]
)
opening_timestamp = DateTimeField(
- 'opening_timestamp', [], format='%Y-%m-%d %H:%M:%S'
+ "opening_timestamp", [], format="%Y-%m-%d %H:%M:%S"
)
closing_timestamp = DateTimeField(
- 'closing_timestamp', [], format='%Y-%m-%d %H:%M:%S'
+ "closing_timestamp", [], format="%Y-%m-%d %H:%M:%S"
)
public_opening_timestamp = DateTimeField(
- 'public_opening_timestamp', [], format='%Y-%m-%d %H:%M:%S'
+ "public_opening_timestamp", [], format="%Y-%m-%d %H:%M:%S"
)
class MultiCheckboxField(SelectMultipleField):
"""A form containing multiple checkboxes."""
+
widget = ListWidget(prefix_label=False)
option_widget = CheckboxInput()
class ImportForm(FlaskForm):
"""The form allowing to select which model to view."""
- selected_f_names = MultiCheckboxField('selected_f_names')
+
+ selected_f_names = MultiCheckboxField("selected_f_names")
class CreditForm(FlaskForm):
@@ -252,8 +267,9 @@ class CreditForm(FlaskForm):
name_credits : list
The name for the credits.
"""
- note = StringField('submission_name')
- self_credit = StringField('self credit')
+
+ note = StringField("submission_name")
+ self_credit = StringField("self credit")
name_credits: List[str] = []
@@ -279,43 +295,45 @@ class AskForEventForm(FlaskForm):
closing_timestamp : datetime
The date and time when the event is closing.
"""
+
suffix = StringField(
- 'event_suffix',
- [validators.DataRequired(), validators.Length(max=20), _ascii_check,
- _space_check]
+ "event_suffix",
+ [
+ validators.DataRequired(),
+ validators.Length(max=20),
+ _ascii_check,
+ _space_check,
+ ],
)
title = StringField(
- 'event_title',
- [validators.DataRequired(), validators.Length(max=80)]
+ "event_title", [validators.DataRequired(), validators.Length(max=80)]
)
n_students = IntegerField(
- 'n_students',
- [validators.DataRequired(), validators.NumberRange(min=0)]
+ "n_students",
+ [validators.DataRequired(), validators.NumberRange(min=0)],
)
min_duration_between_submissions_hour = IntegerField(
- 'min_h', [validators.NumberRange(min=0)]
+ "min_h", [validators.NumberRange(min=0)]
)
min_duration_between_submissions_minute = IntegerField(
- 'min_m', [validators.NumberRange(min=0, max=59)]
+ "min_m", [validators.NumberRange(min=0, max=59)]
)
min_duration_between_submissions_second = IntegerField(
- 'min_s', [validators.NumberRange(min=0, max=59)]
+ "min_s", [validators.NumberRange(min=0, max=59)]
)
opening_date = DateField(
- 'opening_date', [validators.DataRequired()], format='%Y-%m-%d'
+ "opening_date", [validators.DataRequired()], format="%Y-%m-%d"
)
closing_date = DateField(
- 'closing_date', [validators.DataRequired()], format='%Y-%m-%d'
+ "closing_date", [validators.DataRequired()], format="%Y-%m-%d"
)
class EmailForm(FlaskForm):
email = StringField(
- 'Email', validators=[validators.DataRequired(), validators.Email()]
+ "Email", validators=[validators.DataRequired(), validators.Email()]
)
class PasswordForm(FlaskForm):
- password = PasswordField(
- 'Password', validators=[validators.DataRequired()]
- )
+ password = PasswordField("Password", validators=[validators.DataRequired()])
diff --git a/ramp-frontend/ramp_frontend/testing.py b/ramp-frontend/ramp_frontend/testing.py
index b1453d0ec..aa3e7e037 100644
--- a/ramp-frontend/ramp_frontend/testing.py
+++ b/ramp-frontend/ramp_frontend/testing.py
@@ -27,10 +27,11 @@ def login(client, username, password):
response : :class:`flask.wrappers.Response`
The response of the client.
"""
- return client.post('/login', data=dict(
- user_name=username,
- password=password
- ), follow_redirects=True)
+ return client.post(
+ "/login",
+ data=dict(user_name=username, password=password),
+ follow_redirects=True,
+ )
def logout(client):
@@ -49,7 +50,7 @@ def logout(client):
response : :class:`flask.wrappers.Response`
The response of the client.
"""
- return client.get('/logout', follow_redirects=True)
+ return client.get("/logout", follow_redirects=True)
@contextmanager
diff --git a/ramp-frontend/ramp_frontend/tests/test_admin.py b/ramp-frontend/ramp_frontend/tests/test_admin.py
index 90b652f1b..fa610637d 100644
--- a/ramp-frontend/ramp_frontend/tests/test_admin.py
+++ b/ramp-frontend/ramp_frontend/tests/test_admin.py
@@ -25,7 +25,7 @@
from ramp_frontend.testing import login_scope
-@pytest.fixture(scope='module')
+@pytest.fixture(scope="module")
def client_session(database_connection):
database_config = read_config(database_config_template())
ramp_config = ramp_config_template()
@@ -33,64 +33,70 @@ def client_session(database_connection):
deployment_dir = create_toy_db(database_config, ramp_config)
flask_config = generate_flask_config(database_config)
app = create_app(flask_config)
- app.config['TESTING'] = True
- app.config['WTF_CSRF_ENABLED'] = False
- with session_scope(database_config['sqlalchemy']) as session:
+ app.config["TESTING"] = True
+ app.config["WTF_CSRF_ENABLED"] = False
+ with session_scope(database_config["sqlalchemy"]) as session:
yield app.test_client(), session
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
try:
# In case of failure we should close the global flask engine
from ramp_frontend import db as db_flask
+
db_flask.session.close()
except RuntimeError:
pass
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
@pytest.mark.parametrize(
"page",
- ["/approve_users",
- "/manage_users",
- "/sign_up/test_user",
- "/events/iris_test/sign_up/test_user",
- "/events/iris_test/update",
- "/user_interactions",
- "/events/iris_test/dashboard_submissions"]
+ [
+ "/approve_users",
+ "/manage_users",
+ "/sign_up/test_user",
+ "/events/iris_test/sign_up/test_user",
+ "/events/iris_test/update",
+ "/user_interactions",
+ "/events/iris_test/dashboard_submissions",
+ ],
)
def test_check_login_required(client_session, page):
client, _ = client_session
rv = client.get(page)
assert rv.status_code == 302
- assert 'http://localhost/login' in rv.location
+ assert "http://localhost/login" in rv.location
rv = client.get(page, follow_redirects=True)
assert rv.status_code == 200
@pytest.mark.parametrize(
"page, request_function",
- [("/approve_users", ["get", "post"]),
- ('/manage_users', ['get']),
- ("/sign_up/test_user", ["get"]),
- ("/events/iris_test/sign_up/test_user", ["get"]),
- ("/events/iris_test/update", ["get", "post"]),
- ("/user_interactions", ["get"]),
- ("/events/iris_test/dashboard_submissions", ["get"])]
+ [
+ ("/approve_users", ["get", "post"]),
+ ("/manage_users", ["get"]),
+ ("/sign_up/test_user", ["get"]),
+ ("/events/iris_test/sign_up/test_user", ["get"]),
+ ("/events/iris_test/update", ["get", "post"]),
+ ("/user_interactions", ["get"]),
+ ("/events/iris_test/dashboard_submissions", ["get"]),
+ ],
)
def test_check_admin_required(client_session, page, request_function):
client, _ = client_session
- with login_scope(client, 'test_user', 'test') as client:
+ with login_scope(client, "test_user", "test") as client:
for rf in request_function:
rv = getattr(client, rf)(page)
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert (flash_message['message'] ==
- 'Sorry User, you do not have admin rights')
+ flash_message = dict(cs["_flashes"])
+ assert (
+ flash_message["message"] == "Sorry User, you do not have admin rights"
+ )
assert rv.status_code == 302
- assert rv.location == 'http://localhost/problems'
+ assert rv.location == "http://localhost/problems"
rv = getattr(client, rf)(page, follow_redirects=True)
assert rv.status_code == 200
@@ -99,120 +105,129 @@ def test_approve_users_remove(client_session):
client, session = client_session
# create 2 new users
- add_user(session, 'xx', 'xx', 'xx', 'xx', 'xx', access_level='user')
- add_user(session, 'yy', 'yy', 'yy', 'yy', 'yy', access_level='asked')
+ add_user(session, "xx", "xx", "xx", "xx", "xx", access_level="user")
+ add_user(session, "yy", "yy", "yy", "yy", "yy", access_level="asked")
# ask for sign up for an event for the first user
- _, _, event_team = ask_sign_up_team(session, 'iris_test', 'xx')
+ _, _, event_team = ask_sign_up_team(session, "iris_test", "xx")
- with login_scope(client, 'test_iris_admin', 'test') as client:
+ with login_scope(client, "test_iris_admin", "test") as client:
# GET check that we get all new user to be approved
- rv = client.get('/approve_users')
+ rv = client.get("/approve_users")
assert rv.status_code == 200
# line for user approval
- assert b'yy yy - yy' in rv.data
+ assert b"yy yy - yy" in rv.data
# line for the event approval
- assert b'iris_test - xx'
+ assert b"iris_test - xx"
# POST check that we are able to approve a user and event
- data = ImmutableMultiDict([
- ('submit_button', 'Remove!'),
- ('approve_users', 'yy'),
- ('approve_event_teams', str(event_team.id))
- ])
- rv = client.post('/approve_users', data=data)
+ data = ImmutableMultiDict(
+ [
+ ("submit_button", "Remove!"),
+ ("approve_users", "yy"),
+ ("approve_event_teams", str(event_team.id)),
+ ]
+ )
+ rv = client.post("/approve_users", data=data)
assert rv.status_code == 302
- assert rv.location == 'http://localhost/problems'
+ assert rv.location == "http://localhost/problems"
# ensure that the previous change have been committed within our
# session
session.commit()
- user = get_user_by_name(session, 'yy')
+ user = get_user_by_name(session, "yy")
assert user is None
- event_team = get_event_team_by_name(session, 'iris_test', 'xx')
+ event_team = get_event_team_by_name(session, "iris_test", "xx")
assert event_team is None
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert re.match(r"Removed users:\nyy\nRemoved event_team:\n"
- r"Event\(iris_test\)/Team\(.*xx.*\)\n",
- flash_message['Removed users'])
+ flash_message = dict(cs["_flashes"])
+ assert re.match(
+ r"Removed users:\nyy\nRemoved event_team:\n"
+ r"Event\(iris_test\)/Team\(.*xx.*\)\n",
+ flash_message["Removed users"],
+ )
def test_approve_users_approve(client_session):
client, session = client_session
# create 2 new users
- add_user(session, 'cc', 'cc', 'cc', 'cc', 'cc', access_level='user')
- add_user(session, 'dd', 'dd', 'dd', 'dd', 'dd', access_level='asked')
+ add_user(session, "cc", "cc", "cc", "cc", "cc", access_level="user")
+ add_user(session, "dd", "dd", "dd", "dd", "dd", access_level="asked")
# ask for sign up for an event for the first user
- _, _, event_team = ask_sign_up_team(session, 'iris_test', 'cc')
+ _, _, event_team = ask_sign_up_team(session, "iris_test", "cc")
- with login_scope(client, 'test_iris_admin', 'test') as client:
+ with login_scope(client, "test_iris_admin", "test") as client:
# GET check that we get all new user to be approved
- rv = client.get('/approve_users')
+ rv = client.get("/approve_users")
assert rv.status_code == 200
# line for user approval
- assert b'dd dd - dd' in rv.data
+ assert b"dd dd - dd" in rv.data
# line for the event approval
- assert b'iris_test - cc'
+ assert b"iris_test - cc"
# POST check that we are able to approve a user and event
- data = ImmutableMultiDict([
- ('submit_button', 'Approve!'),
- ('approve_users', 'dd'),
- ('approve_event_teams', str(event_team.id))]
+ data = ImmutableMultiDict(
+ [
+ ("submit_button", "Approve!"),
+ ("approve_users", "dd"),
+ ("approve_event_teams", str(event_team.id)),
+ ]
)
- rv = client.post('/approve_users', data=data)
+ rv = client.post("/approve_users", data=data)
assert rv.status_code == 302
- assert rv.location == 'http://localhost/problems'
+ assert rv.location == "http://localhost/problems"
# ensure that the previous change have been committed within our
# session
session.commit()
- user = get_user_by_name(session, 'dd')
- assert user.access_level == 'user'
- event_team = get_event_team_by_name(session, 'iris_test', 'cc')
+ user = get_user_by_name(session, "dd")
+ assert user.access_level == "user"
+ event_team = get_event_team_by_name(session, "iris_test", "cc")
assert event_team.approved
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert re.match(r"Approved users:\ndd\nApproved event_team:\n"
- r"Event\(iris_test\)/Team\(.*cc.*\)\n",
- flash_message['Approved users'])
+ flash_message = dict(cs["_flashes"])
+ assert re.match(
+ r"Approved users:\ndd\nApproved event_team:\n"
+ r"Event\(iris_test\)/Team\(.*cc.*\)\n",
+ flash_message["Approved users"],
+ )
def test_approve_single_user(client_session):
client, session = client_session
- add_user(session, 'gg', 'gg', 'gg', 'gg', 'gg', access_level='asked')
- with login_scope(client, 'test_iris_admin', 'test') as client:
- rv = client.get('/sign_up/gg')
+ add_user(session, "gg", "gg", "gg", "gg", "gg", access_level="asked")
+ with login_scope(client, "test_iris_admin", "test") as client:
+ rv = client.get("/sign_up/gg")
assert rv.status_code == 302
- assert rv.location == 'http://localhost/problems'
+ assert rv.location == "http://localhost/problems"
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert re.match("User(.*gg.*) is signed up",
- flash_message['Successful sign-up'])
+ flash_message = dict(cs["_flashes"])
+ assert re.match(
+ "User(.*gg.*) is signed up", flash_message["Successful sign-up"]
+ )
# ensure that the previous change have been committed within our
# session
session.commit()
- user = get_user_by_name(session, 'gg')
- assert user.access_level == 'user'
+ user = get_user_by_name(session, "gg")
+ assert user.access_level == "user"
rv = client.get("/sign_up/unknown_user")
session.commit()
assert rv.status_code == 302
assert rv.location == "http://localhost/problems"
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert flash_message['message'] == 'No user unknown_user'
+ flash_message = dict(cs["_flashes"])
+ assert flash_message["message"] == "No user unknown_user"
def test_approve_sign_up_for_event(client_session):
client, session = client_session
- with login_scope(client, 'test_iris_admin', 'test') as client:
+ with login_scope(client, "test_iris_admin", "test") as client:
# check the redirection if the user or the event does not exist
rv = client.get("/events/xxx/sign_up/test_user")
@@ -220,43 +235,43 @@ def test_approve_sign_up_for_event(client_session):
assert rv.status_code == 302
assert rv.location == "http://localhost/problems"
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert flash_message['message'] == 'No event xxx or no user test_user'
+ flash_message = dict(cs["_flashes"])
+ assert flash_message["message"] == "No event xxx or no user test_user"
rv = client.get("/events/iris_test/sign_up/xxxx")
session.commit()
assert rv.status_code == 302
assert rv.location == "http://localhost/problems"
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert flash_message['message'] == 'No event iris_test or no user xxxx'
+ flash_message = dict(cs["_flashes"])
+ assert flash_message["message"] == "No event iris_test or no user xxxx"
- add_user(session, 'zz', 'zz', 'zz', 'zz', 'zz', access_level='user')
- _, _, event_team = ask_sign_up_team(session, 'iris_test', 'zz')
+ add_user(session, "zz", "zz", "zz", "zz", "zz", access_level="user")
+ _, _, event_team = ask_sign_up_team(session, "iris_test", "zz")
assert not event_team.approved
- rv = client.get('/events/iris_test/sign_up/zz')
+ rv = client.get("/events/iris_test/sign_up/zz")
assert rv.status_code == 302
assert rv.location == "http://localhost/problems"
session.commit()
- event_team = get_event_team_by_name(session, 'iris_test', 'zz')
+ event_team = get_event_team_by_name(session, "iris_test", "zz")
assert event_team.approved
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert "is signed up for Event" in flash_message['Successful sign-up']
+ flash_message = dict(cs["_flashes"])
+ assert "is signed up for Event" in flash_message["Successful sign-up"]
def test_manage_users(client_session):
client, session = client_session
# create 2 new users
- add_user(session, 'ff', 'ff', 'ff', 'ff', 'ff', access_level='user')
- add_user(session, 'll', 'll', 'll', 'll', 'll', access_level='asked')
+ add_user(session, "ff", "ff", "ff", "ff", "ff", access_level="user")
+ add_user(session, "ll", "ll", "ll", "ll", "ll", access_level="asked")
# ask for sign up for an event for the first user
- _, _, event_team = ask_sign_up_team(session, 'iris_test', 'xx')
+ _, _, event_team = ask_sign_up_team(session, "iris_test", "xx")
- with login_scope(client, 'test_iris_admin', 'test') as client:
+ with login_scope(client, "test_iris_admin", "test") as client:
# GET check that we get all users
- rv = client.get('/manage_users')
+ rv = client.get("/manage_users")
assert rv.status_code == 200
# assert b'yy yy - yy' in rv.data
@@ -264,50 +279,50 @@ def test_manage_users(client_session):
def test_update_event(client_session):
client, session = client_session
- with login_scope(client, 'test_iris_admin', 'test') as client:
+ with login_scope(client, "test_iris_admin", "test") as client:
# case tha the event does not exist
- rv = client.get('/events/boston_housing/update')
+ rv = client.get("/events/boston_housing/update")
assert rv.status_code == 302
- assert rv.location == 'http://localhost/problems'
+ assert rv.location == "http://localhost/problems"
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert 'no event named "boston_housing"' in flash_message['message']
+ flash_message = dict(cs["_flashes"])
+ assert 'no event named "boston_housing"' in flash_message["message"]
# GET: pre-fill the forms
- rv = client.get('/events/iris_test/update')
+ rv = client.get("/events/iris_test/update")
assert rv.status_code == 200
- assert b'Minimum duration between submissions' in rv.data
+ assert b"Minimum duration between submissions" in rv.data
# POST: update the event data
event_info = {
- 'suffix': 'test',
- 'title': 'Iris new title',
- 'is_send_trained_mail': True,
- 'is_public': True,
- 'is_controled_signup': True,
- 'is_competitive': False,
- 'min_duration_between_submissions_hour': 0,
- 'min_duration_between_submissions_minute': 0,
- 'min_duration_between_submissions_second': 0,
- 'opening_timestamp': "2000-01-01 00:00:00",
- 'closing_timestamp': "2100-01-01 00:00:00",
- 'public_opening_timestamp': "2000-01-01 00:00:00",
+ "suffix": "test",
+ "title": "Iris new title",
+ "is_send_trained_mail": True,
+ "is_public": True,
+ "is_controled_signup": True,
+ "is_competitive": False,
+ "min_duration_between_submissions_hour": 0,
+ "min_duration_between_submissions_minute": 0,
+ "min_duration_between_submissions_second": 0,
+ "opening_timestamp": "2000-01-01 00:00:00",
+ "closing_timestamp": "2100-01-01 00:00:00",
+ "public_opening_timestamp": "2000-01-01 00:00:00",
}
- rv = client.post('/events/iris_test/update', data=event_info)
+ rv = client.post("/events/iris_test/update", data=event_info)
assert rv.status_code == 302
assert rv.location == "http://localhost/problems"
- event = get_event(session, 'iris_test')
+ event = get_event(session, "iris_test")
assert event.min_duration_between_submissions == 0
def test_user_interactions(client_session):
client, _ = client_session
- with login_scope(client, 'test_iris_admin', 'test') as client:
- rv = client.get('/user_interactions')
+ with login_scope(client, "test_iris_admin", "test") as client:
+ rv = client.get("/user_interactions")
assert rv.status_code == 200
- assert b'landing' in rv.data
+ assert b"landing" in rv.data
# TODO: To be tested when we implemented properly the leaderboard
diff --git a/ramp-frontend/ramp_frontend/tests/test_auth.py b/ramp-frontend/ramp_frontend/tests/test_auth.py
index 4839ed5e2..76a6b8ee6 100644
--- a/ramp-frontend/ramp_frontend/tests/test_auth.py
+++ b/ramp-frontend/ramp_frontend/tests/test_auth.py
@@ -26,7 +26,7 @@
from ramp_frontend.testing import _fail_no_smtp_server
-@pytest.fixture(scope='module')
+@pytest.fixture(scope="module")
def client_session(database_connection):
database_config = read_config(database_config_template())
ramp_config = ramp_config_template()
@@ -34,19 +34,20 @@ def client_session(database_connection):
deployment_dir = create_toy_db(database_config, ramp_config)
flask_config = generate_flask_config(database_config)
app = create_app(flask_config)
- app.config['TESTING'] = True
- app.config['WTF_CSRF_ENABLED'] = False
- with session_scope(database_config['sqlalchemy']) as session:
+ app.config["TESTING"] = True
+ app.config["WTF_CSRF_ENABLED"] = False
+ with session_scope(database_config["sqlalchemy"]) as session:
yield app.test_client(), session
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
try:
# In case of failure we should close the global flask engine
from ramp_frontend import db as db_flask
+
db_flask.session.close()
except RuntimeError:
pass
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
@@ -54,77 +55,83 @@ def test_login(client_session):
client, session = client_session
# GET without any previous login
- rv = client.get('/login')
+ rv = client.get("/login")
assert rv.status_code == 200
- assert b'Login' in rv.data
- assert b'Username' in rv.data
- assert b'Password' in rv.data
+ assert b"Login" in rv.data
+ assert b"Username" in rv.data
+ assert b"Password" in rv.data
# GET with a previous login
- with login_scope(client, 'test_user', 'test') as client:
- rv = client.get('/login')
+ with login_scope(client, "test_user", "test") as client:
+ rv = client.get("/login")
assert rv.status_code == 302
- assert rv.location == 'http://localhost/problems'
- rv = client.get('/login', follow_redirects=True)
+ assert rv.location == "http://localhost/problems"
+ rv = client.get("/login", follow_redirects=True)
assert rv.status_code == 200
# POST with unknown username
- login_info = {'user_name': 'unknown', 'password': 'xxx'}
- rv = client.post('/login', data=login_info)
+ login_info = {"user_name": "unknown", "password": "xxx"}
+ rv = client.post("/login", data=login_info)
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert flash_message['message'] == 'User "unknown" does not exist'
+ flash_message = dict(cs["_flashes"])
+ assert flash_message["message"] == 'User "unknown" does not exist'
assert rv.status_code == 302
- assert rv.location == 'http://localhost/login'
- rv = client.post('/login', data=login_info, follow_redirects=True)
+ assert rv.location == "http://localhost/login"
+ rv = client.post("/login", data=login_info, follow_redirects=True)
assert rv.status_code == 200
# POST with wrong password
- login_info = {'user_name': 'test_user', 'password': 'xxx'}
- rv = client.post('/login', data=login_info)
+ login_info = {"user_name": "test_user", "password": "xxx"}
+ rv = client.post("/login", data=login_info)
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert flash_message['message'] == 'Wrong password'
+ flash_message = dict(cs["_flashes"])
+ assert flash_message["message"] == "Wrong password"
assert rv.status_code == 302
- assert rv.location == 'http://localhost/login'
- rv = client.post('/login', data=login_info, follow_redirects=True)
+ assert rv.location == "http://localhost/login"
+ rv = client.post("/login", data=login_info, follow_redirects=True)
assert rv.status_code == 200
# POST with a right login and password
- login_info = {'user_name': 'test_user', 'password': 'test'}
- rv = client.post('/login', data=login_info)
+ login_info = {"user_name": "test_user", "password": "test"}
+ rv = client.post("/login", data=login_info)
assert rv.status_code == 302
- assert rv.location == 'http://localhost/problems'
- user = get_user_by_name_or_email(session, login_info['user_name'])
+ assert rv.location == "http://localhost/problems"
+ user = get_user_by_name_or_email(session, login_info["user_name"])
assert user.is_authenticated
logout(client)
- rv = client.post('/login', data=login_info, follow_redirects=True)
+ rv = client.post("/login", data=login_info, follow_redirects=True)
assert rv.status_code == 200
logout(client)
# POST with a right email as login and password
- login_info = {'user_name': 'test_user', 'password': 'test',
- 'email': 'test.user@gmail.com'}
- rv = client.post('/login', data=login_info)
+ login_info = {
+ "user_name": "test_user",
+ "password": "test",
+ "email": "test.user@gmail.com",
+ }
+ rv = client.post("/login", data=login_info)
assert rv.status_code == 302
- assert rv.location == 'http://localhost/problems'
- user = get_user_by_name_or_email(session,
- login_info['email'])
+ assert rv.location == "http://localhost/problems"
+ user = get_user_by_name_or_email(session, login_info["email"])
assert user.is_authenticated
logout(client)
- rv = client.post('/login', data=login_info, follow_redirects=True)
+ rv = client.post("/login", data=login_info, follow_redirects=True)
assert rv.status_code == 200
logout(client)
# POST with right login and password from a different location webpage
- login_info = {'user_name': 'test_user', 'password': 'test'}
- landing_page = {'next': 'http://localhost/events/iris_test'}
- rv = client.post('/login', data=login_info, query_string=landing_page)
+ login_info = {"user_name": "test_user", "password": "test"}
+ landing_page = {"next": "http://localhost/events/iris_test"}
+ rv = client.post("/login", data=login_info, query_string=landing_page)
assert rv.status_code == 302
- assert rv.location == landing_page['next']
+ assert rv.location == landing_page["next"]
logout(client)
- rv = client.post('/login', data=login_info, query_string=landing_page,
- follow_redirects=True)
+ rv = client.post(
+ "/login",
+ data=login_info,
+ query_string=landing_page,
+ follow_redirects=True,
+ )
assert rv.status_code == 200
logout(client)
@@ -133,21 +140,21 @@ def test_logout(client_session):
client, session = client_session
# logout without previous login
- rv = client.get('/logout')
+ rv = client.get("/logout")
assert rv.status_code == 302
- assert rv.location == 'http://localhost/login?next=%2Flogout'
- rv = client.get('/logout', follow_redirects=True)
+ assert rv.location == "http://localhost/login?next=%2Flogout"
+ rv = client.get("/logout", follow_redirects=True)
assert rv.status_code == 200
# logout from a previous login
- login(client, 'test_user', 'test')
- rv = client.get('/logout')
+ login(client, "test_user", "test")
+ rv = client.get("/logout")
assert rv.status_code == 302
- assert rv.location == 'http://localhost/login'
- user = get_user_by_name(session, 'test_user')
+ assert rv.location == "http://localhost/login"
+ user = get_user_by_name(session, "test_user")
assert not user.is_authenticated
- login(client, 'test_user', 'test')
- rv = client.get('/logout', follow_redirects=True)
+ login(client, "test_user", "test")
+ rv = client.get("/logout", follow_redirects=True)
assert rv.status_code == 200
@@ -155,23 +162,29 @@ def test_delete_profile(client_session):
client, session = client_session
# try to delete profile without previous login
- rv = client.get('/delete_profile')
+ rv = client.get("/delete_profile")
assert rv.status_code == 302
- assert rv.location == 'http://localhost/login?next=%2Fdelete_profile'
- rv = client.get('/delete_profile', follow_redirects=True)
+ assert rv.location == "http://localhost/login?next=%2Fdelete_profile"
+ rv = client.get("/delete_profile", follow_redirects=True)
assert rv.status_code == 200
# delete profile from a previous login
- user = add_user(session, name='test_user_tmp',
- password='password', lastname='lastname',
- firstname='firstname', email='test_user_tmp@email.com',
- access_level='asked', github_url="some")
+ user = add_user(
+ session,
+ name="test_user_tmp",
+ password="password",
+ lastname="lastname",
+ firstname="firstname",
+ email="test_user_tmp@email.com",
+ access_level="asked",
+ github_url="some",
+ )
user_id = user.id
user_password = user.hashed_password
- login(client, 'test_user_tmp', 'password')
- rv = client.get('/delete_profile', follow_redirects=False)
+ login(client, "test_user_tmp", "password")
+ rv = client.get("/delete_profile", follow_redirects=False)
assert rv.status_code == 302
- assert rv.location == 'http://localhost/'
+ assert rv.location == "http://localhost/"
session.refresh(user)
assert not user.is_authenticated
assert user.firstname == "deleted"
@@ -183,17 +196,16 @@ def test_delete_profile(client_session):
assert user.admined_teams[0].name == f"deleted_{user_id}"
-@pytest.mark.parametrize("request_function", ['get', 'post'])
+@pytest.mark.parametrize("request_function", ["get", "post"])
def test_sign_up_already_logged_in(client_session, request_function):
client, _ = client_session
# sign-up when already logged-in
- with login_scope(client, 'test_user', 'test') as client:
- rv = getattr(client, request_function)('/sign_up')
+ with login_scope(client, "test_user", "test") as client:
+ rv = getattr(client, request_function)("/sign_up")
assert rv.status_code == 302
- assert rv.location == 'http://localhost/problems'
- rv = getattr(client, request_function)('/sign_up',
- follow_redirects=True)
+ assert rv.location == "http://localhost/problems"
+ rv = getattr(client, request_function)("/sign_up", follow_redirects=True)
assert rv.status_code == 200
@@ -201,46 +213,59 @@ def test_sign_up(client_session):
client, session = client_session
# GET on sign-up
- rv = client.get('/sign_up')
+ rv = client.get("/sign_up")
assert rv.status_code == 200
- assert b'Sign Up' in rv.data
+ assert b"Sign Up" in rv.data
# POST on sign-up
- user_profile = {'user_name': 'xx', 'password': 'xx', 'firstname': 'xx',
- 'lastname': 'xx', 'email': 'xx'}
- rv = client.post('/sign_up', data=user_profile)
+ user_profile = {
+ "user_name": "xx",
+ "password": "xx",
+ "firstname": "xx",
+ "lastname": "xx",
+ "email": "xx",
+ }
+ rv = client.post("/sign_up", data=user_profile)
assert rv.status_code == 302
- user = get_user_by_name(session, 'xx')
- assert user.name == 'xx'
- user_profile = {'user_name': 'yy', 'password': 'yy', 'firstname': 'yy',
- 'lastname': 'yy', 'email': 'yy'}
- rv = client.post('/sign_up', data=user_profile, follow_redirects=True)
+ user = get_user_by_name(session, "xx")
+ assert user.name == "xx"
+ user_profile = {
+ "user_name": "yy",
+ "password": "yy",
+ "firstname": "yy",
+ "lastname": "yy",
+ "email": "yy",
+ }
+ rv = client.post("/sign_up", data=user_profile, follow_redirects=True)
assert rv.status_code == 200
- def _assert_flash(url, data, status_code=302,
- message='username is already in use'):
- rv = client.post('/sign_up', data=data)
+ def _assert_flash(url, data, status_code=302, message="username is already in use"):
+ rv = client.post("/sign_up", data=data)
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert (flash_message['message'] == message)
+ flash_message = dict(cs["_flashes"])
+ assert flash_message["message"] == message
assert rv.status_code == status_code
# check that we catch a flash error if we try to sign-up with an identical
# username
- user_profile = {'user_name': 'xx', 'password': 'xx',
- 'firstname': 'xx', 'lastname': 'xx',
- 'email': 'test_user@gmail.com'}
- _assert_flash('/sign_up', data=user_profile,
- message='username is already in use')
-
- user_profile.update(user_name='new', email="yy")
- _assert_flash('/sign_up', data=user_profile,
- message='email is already in use')
-
- user_profile.update(user_name='yy', email="yy")
- _assert_flash('/sign_up', data=user_profile,
- message=("username is already in use "
- "and email is already in use"))
+ user_profile = {
+ "user_name": "xx",
+ "password": "xx",
+ "firstname": "xx",
+ "lastname": "xx",
+ "email": "test_user@gmail.com",
+ }
+ _assert_flash("/sign_up", data=user_profile, message="username is already in use")
+
+ user_profile.update(user_name="new", email="yy")
+ _assert_flash("/sign_up", data=user_profile, message="email is already in use")
+
+ user_profile.update(user_name="yy", email="yy")
+ _assert_flash(
+ "/sign_up",
+ data=user_profile,
+ message=("username is already in use " "and email is already in use"),
+ )
@_fail_no_smtp_server
@@ -251,32 +276,32 @@ def test_sign_up_with_approval(client_session):
with client.application.app_context():
with mail.record_messages() as outbox:
user_profile = {
- 'user_name': 'new_user_1', 'password': 'xx', 'firstname': 'xx',
- 'lastname': 'xx', 'email': 'new_user_1@mail.com'
+ "user_name": "new_user_1",
+ "password": "xx",
+ "firstname": "xx",
+ "lastname": "xx",
+ "email": "new_user_1@mail.com",
}
- rv = client.post('/sign_up', data=user_profile)
+ rv = client.post("/sign_up", data=user_profile)
# check the flash box to inform the user about the mail
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert 'We sent a confirmation email.' in flash_message['message']
+ flash_message = dict(cs["_flashes"])
+ assert "We sent a confirmation email." in flash_message["message"]
# check that the email has been sent
assert len(outbox) == 1
- assert ('Click on the following link to confirm your email'
- in outbox[0].body)
+ assert "Click on the following link to confirm your email" in outbox[0].body
# get the link to reset the password
- reg_exp = re.search(
- "http://localhost/confirm_email/.*", outbox[0].body
- )
+ reg_exp = re.search("http://localhost/confirm_email/.*", outbox[0].body)
confirm_email_link = reg_exp.group()
# remove the part with 'localhost' for the next query
confirm_email_link = confirm_email_link[
- confirm_email_link.find('/confirm_email'):
+ confirm_email_link.find("/confirm_email") :
]
# check the redirection
assert rv.status_code == 302
- user = get_user_by_name(session, 'new_user_1')
+ user = get_user_by_name(session, "new_user_1")
assert user is not None
- assert user.access_level == 'not_confirmed'
+ assert user.access_level == "not_confirmed"
# POST method of the email confirmation
with client.application.app_context():
@@ -285,50 +310,51 @@ def test_sign_up_with_approval(client_session):
# check the flash box to inform the user to wait for admin's
# approval
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert ('An email has been sent to the RAMP administrator' in
- flash_message['message'])
+ flash_message = dict(cs["_flashes"])
+ assert (
+ "An email has been sent to the RAMP administrator"
+ in flash_message["message"]
+ )
# check that we send an email to the administrator
assert len(outbox) == 1
assert "Approve registration of new_user_1" in outbox[0].subject
# ensure that we have the last changes
session.commit()
- user = get_user_by_name(session, 'new_user_1')
- assert user.access_level == 'asked'
+ user = get_user_by_name(session, "new_user_1")
+ assert user.access_level == "asked"
assert rv.status_code == 302
- assert rv.location == 'http://localhost/login'
+ assert rv.location == "http://localhost/login"
# POST to check that we raise the right errors
# resend the confirmation for a user which already confirmed
rv = client.post(confirm_email_link)
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert ('Your email address already has been confirmed'
- in flash_message['error'])
+ flash_message = dict(cs["_flashes"])
+ assert "Your email address already has been confirmed" in flash_message["error"]
assert rv.status_code == 302
- assert rv.location == 'http://localhost/'
+ assert rv.location == "http://localhost/"
# check when the user was already approved
- for status in ('user', 'admin'):
- user = get_user_by_name(session, 'new_user_1')
+ for status in ("user", "admin"):
+ user = get_user_by_name(session, "new_user_1")
user.access_level = status
session.commit()
rv = client.post(confirm_email_link)
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert 'Your account is already approved.' in flash_message['error']
+ flash_message = dict(cs["_flashes"])
+ assert "Your account is already approved." in flash_message["error"]
assert rv.status_code == 302
- assert rv.location == 'http://localhost/login'
+ assert rv.location == "http://localhost/login"
# delete the user in the middle
session.delete(user)
session.commit()
rv = client.post(confirm_email_link)
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert 'You did not sign-up yet to RAMP.' in flash_message['error']
+ flash_message = dict(cs["_flashes"])
+ assert "You did not sign-up yet to RAMP." in flash_message["error"]
assert rv.status_code == 302
- assert rv.location == 'http://localhost/sign_up'
+ assert rv.location == "http://localhost/sign_up"
# access a token which does not exist
- rv = client.post('/confirm_email/xxx')
+ rv = client.post("/confirm_email/xxx")
assert rv.status_code == 404
@@ -336,34 +362,46 @@ def test_update_profile(client_session):
client, session = client_session
# try to change the profile without being logged-in
- rv = client.get('/update_profile')
+ rv = client.get("/update_profile")
assert rv.status_code == 302
- assert rv.location == 'http://localhost/login?next=%2Fupdate_profile'
- rv = client.get('/update_profile', follow_redirects=True)
+ assert rv.location == "http://localhost/login?next=%2Fupdate_profile"
+ rv = client.get("/update_profile", follow_redirects=True)
assert rv.status_code == 200
- with login_scope(client, 'test_user', 'test') as client:
+ with login_scope(client, "test_user", "test") as client:
# GET function once logged-in
- rv = client.get('/update_profile')
+ rv = client.get("/update_profile")
assert rv.status_code == 200
- for attr in [b'Username', b'First name', b'Last name', b'Email',
- b'User', b'Test', b'test.user@gmail.com']:
+ for attr in [
+ b"Username",
+ b"First name",
+ b"Last name",
+ b"Email",
+ b"User",
+ b"Test",
+ b"test.user@gmail.com",
+ ]:
assert attr in rv.data
# POST function once logged-in
- user_profile = {'lastname': 'XXX', 'firstname': 'YYY',
- 'email': 'xxx@gmail.com'}
- rv = client.post('/update_profile', data=user_profile)
+ user_profile = {
+ "lastname": "XXX",
+ "firstname": "YYY",
+ "email": "xxx@gmail.com",
+ }
+ rv = client.post("/update_profile", data=user_profile)
assert rv.status_code == 302
- assert rv.location == 'http://localhost/problems'
- user = get_user_by_name(session, 'test_user')
- assert user.lastname == 'XXX'
- assert user.firstname == 'YYY'
- assert user.email == 'xxx@gmail.com'
- user_profile = {'lastname': 'Test', 'firstname': 'User',
- 'email': 'test.user@gmail.com'}
- rv = client.post('/update_profile', data=user_profile,
- follow_redirects=True)
+ assert rv.location == "http://localhost/problems"
+ user = get_user_by_name(session, "test_user")
+ assert user.lastname == "XXX"
+ assert user.firstname == "YYY"
+ assert user.email == "xxx@gmail.com"
+ user_profile = {
+ "lastname": "Test",
+ "firstname": "User",
+ "email": "test.user@gmail.com",
+ }
+ rv = client.post("/update_profile", data=user_profile, follow_redirects=True)
assert rv.status_code == 200
@@ -372,64 +410,63 @@ def test_reset_password(client_session):
client, session = client_session
# GET method
- rv = client.get('/reset_password')
+ rv = client.get("/reset_password")
assert rv.status_code == 200
- assert b'If you are a registered user, we are going to send' in rv.data
+ assert b"If you are a registered user, we are going to send" in rv.data
# POST method
# check that we raise an error if the email does not exist
- rv = client.post('/reset_password', data={'email': 'random@mail.com'})
+ rv = client.post("/reset_password", data={"email": "random@mail.com"})
assert rv.status_code == 200
- assert b'You can sign-up instead.' in rv.data
+ assert b"You can sign-up instead." in rv.data
# set a user to "asked" access level
- user = get_user_by_name(session, 'test_user')
- user.access_level = 'asked'
+ user = get_user_by_name(session, "test_user")
+ user.access_level = "asked"
session.commit()
- rv = client.post('/reset_password', data={'email': user.email})
+ rv = client.post("/reset_password", data={"email": user.email})
assert rv.status_code == 200
- assert b'Your account has not been yet approved.' in rv.data
+ assert b"Your account has not been yet approved." in rv.data
# set back the account to 'user' access level
- user.access_level = 'user'
+ user.access_level = "user"
session.commit()
- rv = client.post('/reset_password', data={'email': user.email})
+ rv = client.post("/reset_password", data={"email": user.email})
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert flash_message['message'] == ('An email to reset your password has '
- 'been sent')
+ flash_message = dict(cs["_flashes"])
+ assert flash_message["message"] == (
+ "An email to reset your password has " "been sent"
+ )
assert rv.status_code == 302
- assert rv.location == 'http://localhost/login'
+ assert rv.location == "http://localhost/login"
with client.application.app_context():
with mail.record_messages() as outbox:
- rv = client.post('/reset_password', data={'email': user.email})
+ rv = client.post("/reset_password", data={"email": user.email})
assert len(outbox) == 1
- assert 'click on the link to reset your password' in outbox[0].body
+ assert "click on the link to reset your password" in outbox[0].body
# get the link to reset the password
- reg_exp = re.search(
- "http://localhost/reset/.*", outbox[0].body
- )
+ reg_exp = re.search("http://localhost/reset/.*", outbox[0].body)
reset_password_link = reg_exp.group()
# remove the part with 'localhost' for the next query
reset_password_link = reset_password_link[
- reset_password_link.find('/reset'):
+ reset_password_link.find("/reset") :
]
# check that we can reset the password using the previous link
# GET method
rv = client.get(reset_password_link)
assert rv.status_code == 200
- assert b'Change my password' in rv.data
+ assert b"Change my password" in rv.data
# POST method
- new_password = 'new_password'
- rv = client.post(reset_password_link, data={'password': new_password})
+ new_password = "new_password"
+ rv = client.post(reset_password_link, data={"password": new_password})
assert rv.status_code == 302
- assert rv.location == 'http://localhost/login'
+ assert rv.location == "http://localhost/login"
# make a commit to be sure that the update has been done
session.commit()
- user = get_user_by_name(session, 'test_user')
+ user = get_user_by_name(session, "test_user")
assert check_password(new_password, user.hashed_password)
@@ -438,30 +475,28 @@ def test_reset_token_error(client_session):
client, session = client_session
# POST method
- new_password = 'new_password'
- rv = client.post('/reset/xxx', data={'password': new_password})
+ new_password = "new_password"
+ rv = client.post("/reset/xxx", data={"password": new_password})
assert rv.status_code == 404
# Get get the link to a real token but remove the user in between
- user = get_user_by_name(session, 'test_user')
+ user = get_user_by_name(session, "test_user")
with client.application.app_context():
with mail.record_messages() as outbox:
- rv = client.post('/reset_password', data={'email': user.email})
+ rv = client.post("/reset_password", data={"email": user.email})
assert len(outbox) == 1
- assert 'click on the link to reset your password' in outbox[0].body
+ assert "click on the link to reset your password" in outbox[0].body
# get the link to reset the password
- reg_exp = re.search(
- "http://localhost/reset/.*", outbox[0].body
- )
+ reg_exp = re.search("http://localhost/reset/.*", outbox[0].body)
reset_password_link = reg_exp.group()
# remove the part with 'localhost' for the next query
reset_password_link = reset_password_link[
- reset_password_link.find('/reset'):
+ reset_password_link.find("/reset") :
]
- user = get_user_by_name(session, 'test_user')
+ user = get_user_by_name(session, "test_user")
session.delete(user)
session.commit()
- new_password = 'new_password'
- rv = client.post(reset_password_link, data={'password': new_password})
+ new_password = "new_password"
+ rv = client.post(reset_password_link, data={"password": new_password})
assert rv.status_code == 404
diff --git a/ramp-frontend/ramp_frontend/tests/test_cli.py b/ramp-frontend/ramp_frontend/tests/test_cli.py
index c65824604..bd76a64dd 100644
--- a/ramp-frontend/ramp_frontend/tests/test_cli.py
+++ b/ramp-frontend/ramp_frontend/tests/test_cli.py
@@ -24,18 +24,25 @@ def make_toy_db(database_connection):
yield
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
def test_test_launch(make_toy_db):
# pass environment to subprocess
- cmd = ['python', '-m']
- cmd += ["ramp_frontend.cli", "test-launch",
- "--config", database_config_template()]
+ cmd = ["python", "-m"]
+ cmd += [
+ "ramp_frontend.cli",
+ "test-launch",
+ "--config",
+ database_config_template(),
+ ]
proc = subprocess.Popen(
- cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- env=os.environ.copy())
+ cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=os.environ.copy(),
+ )
# wait for 5 seconds before to terminate the server
time.sleep(5)
proc.send_signal(signal.SIGINT)
diff --git a/ramp-frontend/ramp_frontend/tests/test_general.py b/ramp-frontend/ramp_frontend/tests/test_general.py
index bfe848bf3..53efd6a76 100644
--- a/ramp-frontend/ramp_frontend/tests/test_general.py
+++ b/ramp-frontend/ramp_frontend/tests/test_general.py
@@ -15,7 +15,7 @@
from ramp_frontend import create_app
-@pytest.fixture(scope='module')
+@pytest.fixture(scope="module")
def client_session(database_connection):
database_config = read_config(database_config_template())
ramp_config = ramp_config_template()
@@ -23,81 +23,79 @@ def client_session(database_connection):
deployment_dir = create_toy_db(database_config, ramp_config)
flask_config = generate_flask_config(database_config)
app = create_app(flask_config)
- app.config['TESTING'] = True
- app.config['WTF_CSRF_ENABLED'] = False
- with session_scope(database_config['sqlalchemy']) as session:
+ app.config["TESTING"] = True
+ app.config["WTF_CSRF_ENABLED"] = False
+ with session_scope(database_config["sqlalchemy"]) as session:
yield app.test_client(), session, app
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
try:
# In case of failure we should close the global flask engine
from ramp_frontend import db as db_flask
+
db_flask.session.close()
except RuntimeError:
pass
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
def test_index(client_session):
client, _, _ = client_session
- rv = client.get('/')
+ rv = client.get("/")
assert rv.status_code == 200
- assert (b'RAMP: collaborative data science challenges' in
- rv.data)
+ assert b"RAMP: collaborative data science challenges" in rv.data
def test_ramp(client_session):
client, _, _ = client_session
- rv = client.get('/description')
+ rv = client.get("/description")
assert rv.status_code == 200
- assert (b'The RAMP software packages were developed by the' in
- rv.data)
+ assert b"The RAMP software packages were developed by the" in rv.data
def test_domain(client_session):
client, session, _ = client_session
- rv = client.get('/data_domains')
+ rv = client.get("/data_domains")
assert rv.status_code == 200
- assert b'Scientific data domains' in rv.data
- assert b'boston_housing' in rv.data
- assert b'Boston housing price regression' in rv.data
+ assert b"Scientific data domains" in rv.data
+ assert b"boston_housing" in rv.data
+ assert b"Boston housing price regression" in rv.data
def test_teaching(client_session):
client, _, _ = client_session
- rv = client.get('/teaching')
+ rv = client.get("/teaching")
assert rv.status_code == 200
- assert b'RAMP challenges begin with an interesting supervised prediction' \
- in rv.data
+ assert b"RAMP challenges begin with an interesting supervised prediction" in rv.data
def test_data_science_themes(client_session):
client, _, _ = client_session
- rv = client.get('/data_science_themes')
+ rv = client.get("/data_science_themes")
assert rv.status_code == 200
- assert b'boston_housing_theme' in rv.data
- assert b'iris_theme' in rv.data
+ assert b"boston_housing_theme" in rv.data
+ assert b"iris_theme" in rv.data
def test_keywords(client_session):
client, _, _ = client_session
- rv = client.get('/keywords/boston_housing')
+ rv = client.get("/keywords/boston_housing")
assert rv.status_code == 200
- assert b'Related problems' in rv.data
- assert b'boston_housing' in rv.data
- assert b'Boston housing price regression' in rv.data
+ assert b"Related problems" in rv.data
+ assert b"boston_housing" in rv.data
+ assert b"Boston housing price regression" in rv.data
def test_privacy_policy(client_session, monkeypatch):
client, _, app = client_session
- rv = client.get('/privacy_policy')
+ rv = client.get("/privacy_policy")
# By default privacy policy is not defined in the config.
# i.e. config['privacy_policy'] = None
assert rv.status_code == 404
- msg = 'Some HTML code'
- monkeypatch.setitem(app.config, 'PRIVACY_POLICY_PAGE', msg)
- rv = client.get('/privacy_policy')
+ msg = "Some HTML code"
+ monkeypatch.setitem(app.config, "PRIVACY_POLICY_PAGE", msg)
+ rv = client.get("/privacy_policy")
assert rv.status_code == 200
- assert msg in rv.data.decode('utf-8')
+ assert msg in rv.data.decode("utf-8")
diff --git a/ramp-frontend/ramp_frontend/tests/test_ramp.py b/ramp-frontend/ramp_frontend/tests/test_ramp.py
index 79aa2d948..cc558a392 100644
--- a/ramp-frontend/ramp_frontend/tests/test_ramp.py
+++ b/ramp-frontend/ramp_frontend/tests/test_ramp.py
@@ -33,7 +33,7 @@
from ramp_frontend.testing import _fail_no_smtp_server
-@pytest.fixture(scope='module')
+@pytest.fixture(scope="module")
def client_session(database_connection):
database_config = read_config(database_config_template())
ramp_config = ramp_config_template()
@@ -41,128 +41,141 @@ def client_session(database_connection):
deployment_dir = create_toy_db(database_config, ramp_config)
flask_config = generate_flask_config(database_config)
app = create_app(flask_config)
- app.config['TESTING'] = True
- app.config['WTF_CSRF_ENABLED'] = False
- with session_scope(database_config['sqlalchemy']) as session:
+ app.config["TESTING"] = True
+ app.config["WTF_CSRF_ENABLED"] = False
+ with session_scope(database_config["sqlalchemy"]) as session:
yield app.test_client(), session
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
try:
# In case of failure we should close the global flask engine
from ramp_frontend import db as db_flask
+
db_flask.session.close()
except RuntimeError:
pass
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
-@pytest.fixture(scope='function')
+@pytest.fixture(scope="function")
def makedrop_event(client_session):
_, session = client_session
- add_event(session, 'iris', 'iris_test_4event', 'iris_test_4event',
- 'starting_kit', '/tmp/databoard_test/submissions',
- is_public=True)
+ add_event(
+ session,
+ "iris",
+ "iris_test_4event",
+ "iris_test_4event",
+ "starting_kit",
+ "/tmp/databoard_test/submissions",
+ is_public=True,
+ )
yield
- delete_event(session, 'iris_test_4event')
+ delete_event(session, "iris_test_4event")
@pytest.mark.parametrize(
"page",
- ["/events/iris_test",
- "/events/iris_test/sign_up",
- "/events/iris_test/sandbox",
- "problems/iris/ask_for_event",
- "/credit/xxx",
- "/event_plots/iris_test"]
+ [
+ "/events/iris_test",
+ "/events/iris_test/sign_up",
+ "/events/iris_test/sandbox",
+ "problems/iris/ask_for_event",
+ "/credit/xxx",
+ "/event_plots/iris_test",
+ ],
)
def test_check_login_required(client_session, page):
client, _ = client_session
rv = client.get(page)
assert rv.status_code == 302
- assert 'http://localhost/login' in rv.location
+ assert "http://localhost/login" in rv.location
rv = client.get(page, follow_redirects=True)
assert rv.status_code == 200
@pytest.mark.parametrize(
"page",
- ["/events/xxx",
- "/events/xxx/sign_up",
- "/events/xxx/sandbox",
- "/event_plots/xxx"]
+ [
+ "/events/xxx",
+ "/events/xxx/sign_up",
+ "/events/xxx/sandbox",
+ "/event_plots/xxx",
+ ],
)
def test_check_unknown_events(client_session, page):
client, _ = client_session
# trigger that the event does not exist
- with login_scope(client, 'test_user', 'test') as client:
+ with login_scope(client, "test_user", "test") as client:
rv = client.get(page)
assert rv.status_code == 302
- assert rv.location == 'http://localhost/problems'
+ assert rv.location == "http://localhost/problems"
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert "no event named" in flash_message['message']
+ flash_message = dict(cs["_flashes"])
+ assert "no event named" in flash_message["message"]
def test_problems(client_session):
client, _ = client_session
# GET: access the problems page without login
- rv = client.get('/problems')
+ rv = client.get("/problems")
assert rv.status_code == 200
- assert b'Hi User!' not in rv.data
- assert b'participants' in rv.data
- assert b'Iris classification' in rv.data
- assert b'Boston housing price regression' in rv.data
+ assert b"Hi User!" not in rv.data
+ assert b"participants" in rv.data
+ assert b"Iris classification" in rv.data
+ assert b"Boston housing price regression" in rv.data
# GET: access the problems when logged-in
- with login_scope(client, 'test_user', 'test') as client:
- rv = client.get('/problems')
+ with login_scope(client, "test_user", "test") as client:
+ rv = client.get("/problems")
assert rv.status_code == 200
- assert b'Hi User!' in rv.data
- assert b'participants' in rv.data
- assert b'Iris classification' in rv.data
- assert b'Boston housing price regression' in rv.data
+ assert b"Hi User!" in rv.data
+ assert b"participants" in rv.data
+ assert b"Iris classification" in rv.data
+ assert b"Boston housing price regression" in rv.data
def test_problem(client_session):
client, session = client_session
# Access a problem that does not exist
- rv = client.get('/problems/xxx')
+ rv = client.get("/problems/xxx")
assert rv.status_code == 302
- assert rv.location == 'http://localhost/problems'
+ assert rv.location == "http://localhost/problems"
with client.session_transaction() as cs:
- flash_message = dict(cs['_flashes'])
- assert flash_message['message'] == "Problem xxx does not exist"
- rv = client.get('/problems/xxx', follow_redirects=True)
+ flash_message = dict(cs["_flashes"])
+ assert flash_message["message"] == "Problem xxx does not exist"
+ rv = client.get("/problems/xxx", follow_redirects=True)
assert rv.status_code == 200
# GET: looking at the problem without being logged-in
- rv = client.get('problems/iris')
+ rv = client.get("problems/iris")
assert rv.status_code == 200
- assert b'Iris classification' in rv.data
- assert b'Registered events' in rv.data
+ assert b"Iris classification" in rv.data
+ assert b"Registered events" in rv.data
# GET: looking at the problem being logged-in
- with login_scope(client, 'test_user', 'test') as client:
- rv = client.get('problems/iris')
+ with login_scope(client, "test_user", "test") as client:
+ rv = client.get("problems/iris")
assert rv.status_code == 200
- assert b'Iris classification' in rv.data
- assert b'Registered events' in rv.data
+ assert b"Iris classification" in rv.data
+ assert b"Registered events" in rv.data
@pytest.mark.parametrize(
"event_name, correct",
- [("iri_aaa", False),
- ("irisaaa", False),
- ("test_iris", False),
- ("iris_", True),
- ("iris_aaa_aaa_test", True),
- ("iris", False),
- ("iris_t", True)]
+ [
+ ("iri_aaa", False),
+ ("irisaaa", False),
+ ("test_iris", False),
+ ("iris_", True),
+ ("iris_aaa_aaa_test", True),
+ ("iris", False),
+ ("iris_t", True),
+ ],
)
def test_event_name_correct(client_session, event_name, correct):
client, session = client_session
@@ -170,82 +183,128 @@ def test_event_name_correct(client_session, event_name, correct):
err_msg = "The event name should start with the problem name"
with pytest.raises(ValueError, match=err_msg):
add_event(
- session, 'iris', event_name, 'new_event', 'starting_kit',
- '/tmp/databoard_test/submissions', is_public=True
+ session,
+ "iris",
+ event_name,
+ "new_event",
+ "starting_kit",
+ "/tmp/databoard_test/submissions",
+ is_public=True,
)
else:
- assert add_event(session, 'iris', event_name, 'new_event',
- 'starting_kit', '/tmp/databoard_test/submissions',
- is_public=True)
+ assert add_event(
+ session,
+ "iris",
+ event_name,
+ "new_event",
+ "starting_kit",
+ "/tmp/databoard_test/submissions",
+ is_public=True,
+ )
def test_user_event_status(client_session):
client, session = client_session
- add_user(session, 'new_user', 'new_user', 'new_user',
- 'new_user', 'new_user', access_level='user')
- add_event(session, 'iris', 'iris_new_event', 'new_event', 'starting_kit',
- '/tmp/databoard_test/submissions', is_public=True)
+ add_user(
+ session,
+ "new_user",
+ "new_user",
+ "new_user",
+ "new_user",
+ "new_user",
+ access_level="user",
+ )
+ add_event(
+ session,
+ "iris",
+ "iris_new_event",
+ "new_event",
+ "starting_kit",
+ "/tmp/databoard_test/submissions",
+ is_public=True,
+ )
# user signed up, not approved for the event
- ask_sign_up_team(session, 'iris_new_event', 'new_user')
- with login_scope(client, 'new_user', 'new_user') as client:
- rv = client.get('/problems')
+ ask_sign_up_team(session, "iris_new_event", "new_user")
+ with login_scope(client, "new_user", "new_user") as client:
+ rv = client.get("/problems")
assert rv.status_code == 200
- assert b'user-waiting' in rv.data
- assert b'user-signed' not in rv.data
+ assert b"user-waiting" in rv.data
+ assert b"user-signed" not in rv.data
# user signed up and approved for the event
- sign_up_team(session, 'iris_new_event', 'new_user')
- with login_scope(client, 'new_user', 'new_user') as client:
- rv = client.get('/problems')
+ sign_up_team(session, "iris_new_event", "new_user")
+ with login_scope(client, "new_user", "new_user") as client:
+ rv = client.get("/problems")
assert rv.status_code == 200
- assert b'user-signed' in rv.data
- assert b'user-waiting' not in rv.data
+ assert b"user-signed" in rv.data
+ assert b"user-waiting" not in rv.data
NOW = datetime.datetime.now()
testtimestamps = [
- (NOW.replace(year=NOW.year+1), NOW.replace(year=NOW.year+2),
- NOW.replace(year=NOW.year+3), b'event-close'),
- (NOW.replace(year=NOW.year-1), NOW.replace(year=NOW.year+1),
- NOW.replace(year=NOW.year+2), b'event-comp'),
- (NOW.replace(year=NOW.year-2), NOW.replace(year=NOW.year-1),
- NOW.replace(year=NOW.year+1), b'event-collab'),
- (NOW.replace(year=NOW.year-3), NOW.replace(year=NOW.year-2),
- NOW.replace(year=NOW.year-1), b'event-close'),
+ (
+ NOW.replace(year=NOW.year + 1),
+ NOW.replace(year=NOW.year + 2),
+ NOW.replace(year=NOW.year + 3),
+ b"event-close",
+ ),
+ (
+ NOW.replace(year=NOW.year - 1),
+ NOW.replace(year=NOW.year + 1),
+ NOW.replace(year=NOW.year + 2),
+ b"event-comp",
+ ),
+ (
+ NOW.replace(year=NOW.year - 2),
+ NOW.replace(year=NOW.year - 1),
+ NOW.replace(year=NOW.year + 1),
+ b"event-collab",
+ ),
+ (
+ NOW.replace(year=NOW.year - 3),
+ NOW.replace(year=NOW.year - 2),
+ NOW.replace(year=NOW.year - 1),
+ b"event-close",
+ ),
]
@pytest.mark.parametrize(
"opening_date,public_date,closing_date,expected", testtimestamps
)
-def test_event_status(client_session, makedrop_event,
- opening_date, public_date,
- closing_date, expected):
+def test_event_status(
+ client_session,
+ makedrop_event,
+ opening_date,
+ public_date,
+ closing_date,
+ expected,
+):
# checks if the event status is displayed correctly
client, session = client_session
# change the datetime stamps for the event
- event = get_event(session, 'iris_test_4event')
+ event = get_event(session, "iris_test_4event")
event.opening_timestamp = opening_date
event.public_opening_timestamp = public_date
event.closing_timestamp = closing_date
session.commit()
# GET: access the problems page without login
- rv = client.get('/problems')
+ rv = client.get("/problems")
assert rv.status_code == 200
- event_idx = rv.data.index(b'iris_test_4event')
- event_class_idx = rv.data[:event_idx].rfind(b'")
@@ -144,21 +142,21 @@ def manage_users():
def approve_single_user(user_name):
"""Approve a single user. This is usually used to approve user through
email."""
- if not flask_login.current_user.access_level == 'admin':
+ if not flask_login.current_user.access_level == "admin":
return redirect_to_user(
- 'Sorry {}, you do not have admin rights'
- .format(flask_login.current_user.firstname),
- is_error=True
+ "Sorry {}, you do not have admin rights".format(
+ flask_login.current_user.firstname
+ ),
+ is_error=True,
)
user = User.query.filter_by(name=user_name).one_or_none()
if not user:
- return redirect_to_user(
- 'No user {}'.format(user_name), is_error=True
- )
+ return redirect_to_user("No user {}".format(user_name), is_error=True)
approve_user(db.session, user.name)
return redirect_to_user(
- '{} is signed up'.format(user), is_error=False,
- category='Successful sign-up'
+ "{} is signed up".format(user),
+ is_error=False,
+ category="Successful sign-up",
)
@@ -180,26 +178,35 @@ def approve_sign_up_for_event(event_name, user_name):
event = get_event(db.session, event_name)
user = User.query.filter_by(name=user_name).one_or_none()
if not is_admin(db.session, event_name, flask_login.current_user.name):
- return redirect_to_user('Sorry {}, you do not have admin rights'
- .format(flask_login.current_user.firstname),
- is_error=True)
+ return redirect_to_user(
+ "Sorry {}, you do not have admin rights".format(
+ flask_login.current_user.firstname
+ ),
+ is_error=True,
+ )
if not event or not user:
- return redirect_to_user('No event {} or no user {}'
- .format(event_name, user_name), is_error=True)
+ return redirect_to_user(
+ "No event {} or no user {}".format(event_name, user_name),
+ is_error=True,
+ )
sign_up_team(db.session, event.name, user.name)
- subject = ('Signed up for the RAMP event {}'
- .format(event.name))
- body = ('{}, you have been registered to the RAMP event {}. '
- 'You can now proceed to your sandbox and make submissions.'
- '\nHave fun!!!'.format(user.name, event.name))
+ subject = "Signed up for the RAMP event {}".format(event.name)
+ body = (
+ "{}, you have been registered to the RAMP event {}. "
+ "You can now proceed to your sandbox and make submissions."
+ "\nHave fun!!!".format(user.name, event.name)
+ )
send_mail(to=user.email, subject=subject, body=body)
- return redirect_to_user('{} is signed up for {}.'.format(user, event),
- is_error=False, category='Successful sign-up')
+ return redirect_to_user(
+ "{} is signed up for {}.".format(user, event),
+ is_error=False,
+ category="Successful sign-up",
+ )
-@mod.route("/events//update", methods=['GET', 'POST'])
+@mod.route("/events//update", methods=["GET", "POST"])
@flask_login.login_required
def update_event(event_name):
"""Update the parameters of an event.
@@ -211,19 +218,21 @@ def update_event(event_name):
"""
if not is_admin(db.session, event_name, flask_login.current_user.name):
return redirect_to_user(
- 'Sorry {}, you do not have admin rights'
- .format(flask_login.current_user.firstname),
- is_error=True
+ "Sorry {}, you do not have admin rights".format(
+ flask_login.current_user.firstname
+ ),
+ is_error=True,
)
event = get_event(db.session, event_name)
- if not is_accessible_event(db.session, event_name,
- flask_login.current_user.name):
+ if not is_accessible_event(db.session, event_name, flask_login.current_user.name):
return redirect_to_user(
- '{}: no event named "{}"'
- .format(flask_login.current_user.firstname, event_name)
+ '{}: no event named "{}"'.format(
+ flask_login.current_user.firstname, event_name
+ )
)
- logger.info('{} is updating event {}'
- .format(flask_login.current_user.name, event.name))
+ logger.info(
+ "{} is updating event {}".format(flask_login.current_user.name, event.name)
+ )
admin = is_admin(db.session, event_name, flask_login.current_user.name)
# We assume here that event name has the syntax _
@@ -253,9 +262,10 @@ def update_event(event_name):
event.is_controled_signup = form.is_controled_signup.data
event.is_competitive = form.is_competitive.data
event.min_duration_between_submissions = (
- form.min_duration_between_submissions_hour.data * 3600 +
- form.min_duration_between_submissions_minute.data * 60 +
- form.min_duration_between_submissions_second.data)
+ form.min_duration_between_submissions_hour.data * 3600
+ + form.min_duration_between_submissions_minute.data * 60
+ + form.min_duration_between_submissions_second.data
+ )
event.opening_timestamp = form.opening_timestamp.data
event.closing_timestamp = form.closing_timestamp.data
event.public_opening_timestamp = form.public_opening_timestamp.data
@@ -263,10 +273,10 @@ def update_event(event_name):
except IntegrityError as e:
db.session.rollback()
- message = ''
+ message = ""
existing_event = get_event(db.session, event.name)
if existing_event is not None:
- message += 'event name is already in use'
+ message += "event name is already in use"
# # try:
# # User.query.filter_by(email=email).one()
# # if len(message) > 0:
@@ -276,24 +286,22 @@ def update_event(event_name):
# pass
if message:
e = NameClashError(message)
- flash('{}'.format(e), category='Update event error')
- return redirect(url_for('update_event', event_name=event.name))
+ flash("{}".format(e), category="Update event error")
+ return redirect(url_for("update_event", event_name=event.name))
- return redirect(url_for('ramp.problems'))
+ return redirect(url_for("ramp.problems"))
- approved = is_user_signed_up(
- db.session, event_name, flask_login.current_user.name
- )
+ approved = is_user_signed_up(db.session, event_name, flask_login.current_user.name)
asked = is_user_sign_up_requested(
db.session, event_name, flask_login.current_user.name
)
return render_template(
- 'update_event.html',
+ "update_event.html",
form=form,
event=event,
admin=admin,
asked=asked,
- approved=approved
+ approved=approved,
)
@@ -301,19 +309,20 @@ def update_event(event_name):
@flask_login.login_required
def user_interactions():
"""Show the user interactions recorded on the website."""
- if flask_login.current_user.access_level != 'admin':
+ if flask_login.current_user.access_level != "admin":
return redirect_to_user(
- 'Sorry {}, you do not have admin rights'
- .format(flask_login.current_user.firstname),
- is_error=True
+ "Sorry {}, you do not have admin rights".format(
+ flask_login.current_user.firstname
+ ),
+ is_error=True,
)
user_interactions_html = get_user_interactions_by_name(
- db.session, output_format='html'
+ db.session, output_format="html"
)
return render_template(
- 'user_interactions.html',
- user_interactions_title='User interactions',
- user_interactions=user_interactions_html
+ "user_interactions.html",
+ user_interactions_title="User interactions",
+ user_interactions=user_interactions_html,
)
@@ -329,50 +338,51 @@ def dashboard_submissions(event_name):
"""
if not is_admin(db.session, event_name, flask_login.current_user.name):
return redirect_to_user(
- 'Sorry {}, you do not have admin rights'
- .format(flask_login.current_user.firstname),
- is_error=True
+ "Sorry {}, you do not have admin rights".format(
+ flask_login.current_user.firstname
+ ),
+ is_error=True,
)
event = get_event(db.session, event_name)
# Get dates and number of submissions
- submissions = \
- (Submission.query
- .filter(Event.name == event.name)
- .filter(Event.id == EventTeam.event_id)
- .filter(EventTeam.id == Submission.event_team_id)
- .order_by(Submission.submission_timestamp)
- .all())
+ submissions = (
+ Submission.query.filter(Event.name == event.name)
+ .filter(Event.id == EventTeam.event_id)
+ .filter(EventTeam.id == Submission.event_team_id)
+ .order_by(Submission.submission_timestamp)
+ .all()
+ )
submissions = [sub for sub in submissions if sub.is_not_sandbox]
timestamp_submissions = [
- sub.submission_timestamp.strftime('%Y-%m-%d %H:%M:%S')
- for sub in submissions]
+ sub.submission_timestamp.strftime("%Y-%m-%d %H:%M:%S") for sub in submissions
+ ]
name_submissions = [sub.name for sub in submissions]
cumulated_submissions = list(range(1, 1 + len(submissions)))
training_sec = [
- (
- sub.training_timestamp - sub.submission_timestamp
- ).total_seconds() / 60.
- if sub.training_timestamp is not None else 0
+ (sub.training_timestamp - sub.submission_timestamp).total_seconds() / 60.0
+ if sub.training_timestamp is not None
+ else 0
for sub in submissions
]
- dashboard_kwargs = {'event': event,
- 'timestamp_submissions': timestamp_submissions,
- 'training_sec': training_sec,
- 'cumulated_submissions': cumulated_submissions,
- 'name_submissions': name_submissions}
+ dashboard_kwargs = {
+ "event": event,
+ "timestamp_submissions": timestamp_submissions,
+ "training_sec": training_sec,
+ "cumulated_submissions": cumulated_submissions,
+ "name_submissions": name_submissions,
+ }
failed_leaderboard_html = event.failed_leaderboard_html
new_leaderboard_html = event.new_leaderboard_html
- approved = is_user_signed_up(
- db.session, event_name, flask_login.current_user.name
- )
+ approved = is_user_signed_up(db.session, event_name, flask_login.current_user.name)
asked = is_user_sign_up_requested(
db.session, event_name, flask_login.current_user.name
)
return render_template(
- 'dashboard_submissions.html',
+ "dashboard_submissions.html",
failed_leaderboard=failed_leaderboard_html,
new_leaderboard=new_leaderboard_html,
admin=True,
approved=approved,
asked=asked,
- **dashboard_kwargs)
+ **dashboard_kwargs,
+ )
diff --git a/ramp-frontend/ramp_frontend/views/auth.py b/ramp-frontend/ramp_frontend/views/auth.py
index aee64f141..29aee9c4f 100644
--- a/ramp-frontend/ramp_frontend/views/auth.py
+++ b/ramp-frontend/ramp_frontend/views/auth.py
@@ -40,8 +40,8 @@
from ..utils import body_formatter_user
from ..utils import send_mail
-logger = logging.getLogger('RAMP-FRONTEND')
-mod = Blueprint('auth', __name__)
+logger = logging.getLogger("RAMP-FRONTEND")
+mod = Blueprint("auth", __name__)
ts = URLSafeTimedSerializer(app.config["SECRET_KEY"])
@@ -59,48 +59,45 @@ def load_user(id):
return User.query.get(id)
-@mod.route("/login", methods=['GET', 'POST'])
+@mod.route("/login", methods=["GET", "POST"])
def login():
"""Login request."""
- if app.config['TRACK_USER_INTERACTION']:
- add_user_interaction(db.session, interaction='landing')
+ if app.config["TRACK_USER_INTERACTION"]:
+ add_user_interaction(db.session, interaction="landing")
if flask_login.current_user.is_authenticated:
- logger.info('User already logged-in')
- session['logged_in'] = True
- return redirect(url_for('ramp.problems'))
+ logger.info("User already logged-in")
+ session["logged_in"] = True
+ return redirect(url_for("ramp.problems"))
form = LoginForm()
if form.validate_on_submit():
- user = get_user_by_name_or_email(db.session,
- name=form.user_name.data)
+ user = get_user_by_name_or_email(db.session, name=form.user_name.data)
if user is None:
msg = 'User "{}" does not exist'.format(form.user_name.data)
flash(msg)
logger.info(msg)
- return redirect(url_for('auth.login'))
- if not check_password(form.password.data,
- user.hashed_password):
- msg = 'Wrong password'
+ return redirect(url_for("auth.login"))
+ if not check_password(form.password.data, user.hashed_password):
+ msg = "Wrong password"
flash(msg)
logger.info(msg)
- return redirect(url_for('auth.login'))
+ return redirect(url_for("auth.login"))
flask_login.login_user(user, remember=True)
- session['logged_in'] = True
+ session["logged_in"] = True
user.is_authenticated = True
db.session.commit()
- logger.info('User "{}" is logged in'
- .format(flask_login.current_user.name))
- if app.config['TRACK_USER_INTERACTION']:
+ logger.info('User "{}" is logged in'.format(flask_login.current_user.name))
+ if app.config["TRACK_USER_INTERACTION"]:
add_user_interaction(
- db.session, interaction='login', user=flask_login.current_user
+ db.session, interaction="login", user=flask_login.current_user
)
- next_ = request.args.get('next')
+ next_ = request.args.get("next")
if next_ is None:
- next_ = url_for('ramp.problems')
+ next_ = url_for("ramp.problems")
return redirect(next_)
- return render_template('login.html', form=form)
+ return render_template("login.html", form=form)
@mod.route("/logout")
@@ -108,23 +105,23 @@ def login():
def logout():
"""Logout request."""
user = flask_login.current_user
- if app.config['TRACK_USER_INTERACTION']:
- add_user_interaction(db.session, interaction='logout', user=user)
- session['logged_in'] = False
+ if app.config["TRACK_USER_INTERACTION"]:
+ add_user_interaction(db.session, interaction="logout", user=user)
+ session["logged_in"] = False
user.is_authenticated = False
db.session.commit()
- logger.info('{} is logged out'.format(user))
+ logger.info("{} is logged out".format(user))
flask_login.logout_user()
- return redirect(url_for('auth.login'))
+ return redirect(url_for("auth.login"))
-@mod.route("/sign_up", methods=['GET', 'POST'])
+@mod.route("/sign_up", methods=["GET", "POST"])
def sign_up():
"""Sign-up request."""
if flask_login.current_user.is_authenticated:
- session['logged_in'] = True
- return redirect(url_for('ramp.problems'))
+ session["logged_in"] = True
+ return redirect(url_for("ramp.problems"))
form = UserCreateProfileForm()
if form.validate_on_submit():
@@ -144,38 +141,35 @@ def sign_up():
website_url=form.website_url.data,
bio=form.bio.data,
is_want_news=form.is_want_news.data,
- access_level='not_confirmed'
+ access_level="not_confirmed",
)
except NameClashError as e:
flash(str(e))
logger.info(str(e))
- return redirect(url_for('auth.sign_up'))
+ return redirect(url_for("auth.sign_up"))
# send an email to the participant such that he can confirm his email
token = ts.dumps(user.email)
- recover_url = url_for(
- 'auth.user_confirm_email', token=token, _external=True
- )
+ recover_url = url_for("auth.user_confirm_email", token=token, _external=True)
subject = "Confirm your email for signing-up to RAMP"
- body = ('Hi {}, \n\n Click on the following link to confirm your email'
- ' address and finalize your sign-up to RAMP.\n\n Note that '
- 'your account still needs to be approved by a RAMP '
- 'administrator.\n\n'
- .format(user.firstname))
+ body = (
+ "Hi {}, \n\n Click on the following link to confirm your email"
+ " address and finalize your sign-up to RAMP.\n\n Note that "
+ "your account still needs to be approved by a RAMP "
+ "administrator.\n\n".format(user.firstname)
+ )
body += recover_url
- body += '\n\nSee you on the RAMP website!'
+ body += "\n\nSee you on the RAMP website!"
send_mail(user.email, subject, body)
- logger.info(
- '{} has signed-up to RAMP'.format(user.name)
- )
+ logger.info("{} has signed-up to RAMP".format(user.name))
flash(
"We sent a confirmation email. Go read your email and click on "
"the confirmation link"
)
- return redirect(url_for('auth.login'))
- return render_template('sign_up.html', form=form)
+ return redirect(url_for("auth.login"))
+ return render_template("sign_up.html", form=form)
-@mod.route("/update_profile", methods=['GET', 'POST'])
+@mod.route("/update_profile", methods=["GET", "POST"])
@flask_login.login_required
def update_profile():
"""User profile update."""
@@ -194,10 +188,10 @@ def update_profile():
google_url=form.google_url.data,
github_url=form.github_url.data,
website_url=form.website_url.data,
- is_want_news=form.is_want_news.data
+ is_want_news=form.is_want_news.data,
)
# send_register_request_mail(user)
- return redirect(url_for('ramp.problems'))
+ return redirect(url_for("ramp.problems"))
form.lastname.data = flask_login.current_user.lastname
form.firstname.data = flask_login.current_user.firstname
form.email.data = flask_login.current_user.email
@@ -209,16 +203,16 @@ def update_profile():
form.website_url.data = flask_login.current_user.website_url
form.bio.data = flask_login.current_user.bio
form.is_want_news.data = flask_login.current_user.is_want_news
- return render_template('update_profile.html', form=form)
+ return render_template("update_profile.html", form=form)
-@mod.route("/delete_profile", methods=['GET'])
+@mod.route("/delete_profile", methods=["GET"])
@flask_login.login_required
def delete_profile():
user = flask_login.current_user
user_id = user.id
user_name = user.name
- session['logged_in'] = False
+ session["logged_in"] = False
user.name = f"deleted_{user_id}"
user.is_authenticated = False
user.hashed_password = uuid.uuid4().hex
@@ -235,47 +229,48 @@ def delete_profile():
lastname="deleted",
firstname="deleted",
)
- logger.info(f'User {user_name} profile is deleted.')
+ logger.info(f"User {user_name} profile is deleted.")
flask_login.logout_user()
- return redirect('/')
+ return redirect("/")
-@mod.route('/reset_password', methods=["GET", "POST"])
+@mod.route("/reset_password", methods=["GET", "POST"])
def reset_password():
"""Reset password of a RAMP user."""
form = EmailForm()
- error = ''
+ error = ""
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).one_or_none()
- if user and user.access_level != 'asked':
+ if user and user.access_level != "asked":
token = ts.dumps(user.email)
- recover_url = url_for(
- 'auth.reset_with_token', token=token, _external=True
- )
+ recover_url = url_for("auth.reset_with_token", token=token, _external=True)
subject = "Password reset requested - RAMP website"
- body = ('Hi {}, \n\nclick on the link to reset your password:\n'
- .format(user.firstname))
+ body = "Hi {}, \n\nclick on the link to reset your password:\n".format(
+ user.firstname
+ )
body += recover_url
- body += '\n\nSee you on the RAMP website!'
+ body += "\n\nSee you on the RAMP website!"
send_mail(user.email, subject, body)
- logger.info(
- 'Password reset requested for user {}'.format(user.name)
- )
+ logger.info("Password reset requested for user {}".format(user.name))
logger.info(recover_url)
- flash('An email to reset your password has been sent')
- return redirect(url_for('auth.login'))
+ flash("An email to reset your password has been sent")
+ return redirect(url_for("auth.login"))
elif user is None:
- error = ('The email address is not linked to any user. You can '
- 'sign-up instead.')
+ error = (
+ "The email address is not linked to any user. You can "
+ "sign-up instead."
+ )
else:
- error = ('Your account has not been yet approved. You cannot '
- 'change the password already.')
- return render_template('reset_password.html', form=form, error=error)
+ error = (
+ "Your account has not been yet approved. You cannot "
+ "change the password already."
+ )
+ return render_template("reset_password.html", form=form, error=error)
-@mod.route('/reset/', methods=["GET", "POST"])
+@mod.route("/reset/", methods=["GET", "POST"])
def reset_with_token(token):
"""Reset password by passing a token (email).
@@ -294,20 +289,20 @@ def reset_with_token(token):
if form.validate_on_submit():
user = User.query.filter_by(email=email).one_or_none()
if user is None:
- logger.error('The error was deleted before resetting his/her '
- 'password')
+ logger.error("The error was deleted before resetting his/her " "password")
abort(404)
- (User.query.filter_by(email=email)
- .update({
- "hashed_password":
- hash_password(form.password.data).decode()}))
+ (
+ User.query.filter_by(email=email).update(
+ {"hashed_password": hash_password(form.password.data).decode()}
+ )
+ )
db.session.commit()
- return redirect(url_for('auth.login'))
+ return redirect(url_for("auth.login"))
- return render_template('reset_with_token.html', form=form, token=token)
+ return render_template("reset_with_token.html", form=form, token=token)
-@mod.route('/confirm_email/', methods=["GET", "POST"])
+@mod.route("/confirm_email/", methods=["GET", "POST"])
def user_confirm_email(token):
"""Confirm a user account using his email address and a token to approve.
@@ -325,37 +320,38 @@ def user_confirm_email(token):
user = User.query.filter_by(email=email).one_or_none()
if user is None:
flash(
- 'You did not sign-up yet to RAMP. Please sign-up first.',
- category='error'
+ "You did not sign-up yet to RAMP. Please sign-up first.",
+ category="error",
)
- return redirect(url_for('auth.sign_up'))
- elif user.access_level in ('user', 'admin'):
+ return redirect(url_for("auth.sign_up"))
+ elif user.access_level in ("user", "admin"):
flash(
"Your account is already approved. You don't need to confirm your "
- "email address", category='error'
+ "email address",
+ category="error",
)
- return redirect(url_for('auth.login'))
- elif user.access_level == 'asked':
+ return redirect(url_for("auth.login"))
+ elif user.access_level == "asked":
flash(
"Your email address already has been confirmed. You need to wait "
- "for an approval from a RAMP administrator", category='error'
+ "for an approval from a RAMP administrator",
+ category="error",
)
- return redirect(url_for('general.index'))
- User.query.filter_by(email=email).update({'access_level': 'asked'})
+ return redirect(url_for("general.index"))
+ User.query.filter_by(email=email).update({"access_level": "asked"})
db.session.commit()
- admin_users = User.query.filter_by(access_level='admin')
+ admin_users = User.query.filter_by(access_level="admin")
for admin in admin_users:
- subject = 'Approve registration of {}'.format(
- user.name
- )
+ subject = "Approve registration of {}".format(user.name)
body = body_formatter_user(user)
- url_approve = ('http://{}/sign_up/{}'
- .format(app.config['DOMAIN_NAME'], user.name))
- body += 'Click on the link to approve the registration '
- body += 'of this user: {}'.format(url_approve)
+ url_approve = "http://{}/sign_up/{}".format(
+ app.config["DOMAIN_NAME"], user.name
+ )
+ body += "Click on the link to approve the registration "
+ body += "of this user: {}".format(url_approve)
send_mail(admin.email, subject, body)
flash(
"An email has been sent to the RAMP administrator(s) who will "
"approve your account"
)
- return redirect(url_for('auth.login'))
+ return redirect(url_for("auth.login"))
diff --git a/ramp-frontend/ramp_frontend/views/general.py b/ramp-frontend/ramp_frontend/views/general.py
index 37bb40332..e9a606684 100644
--- a/ramp-frontend/ramp_frontend/views/general.py
+++ b/ramp-frontend/ramp_frontend/views/general.py
@@ -11,31 +11,31 @@
from .redirect import redirect_to_user
from .._version import __version__
-mod = Blueprint('general', __name__)
+mod = Blueprint("general", __name__)
-@mod.route('/')
+@mod.route("/")
def index():
"""Default landing page."""
- img_ext = ('.png', '.jpg', '.jpeg', '.gif', '.svg')
+ img_ext = (".png", ".jpg", ".jpeg", ".gif", ".svg")
current_dir = os.path.dirname(__file__)
img_folder = os.path.join(current_dir, "..", "static", "img", "powered_by")
context = {}
if os.path.isdir(img_folder):
- images = [f for f in os.listdir(img_folder)
- if f.endswith(img_ext)]
+ images = [f for f in os.listdir(img_folder) if f.endswith(img_ext)]
context["images"] = images
context["version"] = __version__
- return render_template('index.html', **context)
+ return render_template("index.html", **context)
@mod.route("/description")
def ramp():
"""RAMP description request."""
- user = (flask_login.current_user
- if flask_login.current_user.is_authenticated else None)
- admin = user.access_level == 'admin' if user is not None else False
- return render_template('ramp_description.html', admin=admin)
+ user = (
+ flask_login.current_user if flask_login.current_user.is_authenticated else None
+ )
+ admin = user.access_level == "admin" if user is not None else False
+ return render_template("ramp_description.html", admin=admin)
@mod.route("/data_domains")
@@ -44,23 +44,24 @@ def data_domains():
problems."""
current_keywords = Keyword.query.order_by(Keyword.name)
current_problems = Problem.query.order_by(Problem.id)
- return render_template('data_domains.html',
- keywords=current_keywords,
- problems=current_problems)
+ return render_template(
+ "data_domains.html",
+ keywords=current_keywords,
+ problems=current_problems,
+ )
@mod.route("/teaching")
def teaching():
"""Page related to RAMP offers for teaching classes."""
- return render_template('teaching.html')
+ return render_template("teaching.html")
@mod.route("/data_science_themes")
def data_science_themes():
"""Page reviewing problems organized by ML themes."""
current_keywords = Keyword.query.order_by(Keyword.name)
- return render_template('data_science_themes.html',
- keywords=current_keywords)
+ return render_template("data_science_themes.html", keywords=current_keywords)
@mod.route("/keywords/")
@@ -68,14 +69,15 @@ def keywords(keyword_name):
"""Page which give details about a keyword."""
keyword = Keyword.query.filter_by(name=keyword_name).one_or_none()
if keyword:
- return render_template('keyword.html', keyword=keyword)
- return redirect_to_user('Keyword {} does not exist.'
- .format(keyword_name), is_error=True)
+ return render_template("keyword.html", keyword=keyword)
+ return redirect_to_user(
+ "Keyword {} does not exist.".format(keyword_name), is_error=True
+ )
@mod.route("/privacy_policy")
def privacy_policy():
- if not app.config['PRIVACY_POLICY_PAGE']:
+ if not app.config["PRIVACY_POLICY_PAGE"]:
flask.abort(404)
- return render_template('privacy_policy.html')
+ return render_template("privacy_policy.html")
diff --git a/ramp-frontend/ramp_frontend/views/leaderboard.py b/ramp-frontend/ramp_frontend/views/leaderboard.py
index e3cba2e43..d35f15371 100644
--- a/ramp-frontend/ramp_frontend/views/leaderboard.py
+++ b/ramp-frontend/ramp_frontend/views/leaderboard.py
@@ -23,8 +23,8 @@
from .redirect import redirect_to_user
-mod = Blueprint('leaderboard', __name__)
-logger = logging.getLogger('RAMP-FRONTEND')
+mod = Blueprint("leaderboard", __name__)
+logger = logging.getLogger("RAMP-FRONTEND")
SORTING_COLUMN_INDEX = 2
@@ -40,45 +40,50 @@ def my_submissions(event_name):
The name of the event.
"""
event = get_event(db.session, event_name)
- if not is_accessible_event(db.session, event_name,
- flask_login.current_user.name):
+ if not is_accessible_event(db.session, event_name, flask_login.current_user.name):
return redirect_to_user(
- '{}: no event named "{}"'
- .format(flask_login.current_user.firstname, event_name)
+ '{}: no event named "{}"'.format(
+ flask_login.current_user.firstname, event_name
+ )
)
- if app.config['TRACK_USER_INTERACTION']:
+ if app.config["TRACK_USER_INTERACTION"]:
add_user_interaction(
- db.session, interaction='looking at my_submissions',
- user=flask_login.current_user, event=event
+ db.session,
+ interaction="looking at my_submissions",
+ user=flask_login.current_user,
+ event=event,
+ )
+ if not is_accessible_code(db.session, event_name, flask_login.current_user.name):
+ error_str = (
+ "No access to my submissions for event {}. If you have "
+ "already signed up, please wait for approval.".format(event.name)
)
- if not is_accessible_code(db.session, event_name,
- flask_login.current_user.name):
- error_str = ('No access to my submissions for event {}. If you have '
- 'already signed up, please wait for approval.'
- .format(event.name))
return redirect_to_user(error_str)
# Doesn't work if team mergers are allowed
- event_team = get_event_team_by_name(db.session, event_name,
- flask_login.current_user.name)
+ event_team = get_event_team_by_name(
+ db.session, event_name, flask_login.current_user.name
+ )
leaderboard_html = event_team.leaderboard_html
failed_leaderboard_html = event_team.failed_leaderboard_html
new_leaderboard_html = event_team.new_leaderboard_html
admin = is_admin(db.session, event_name, flask_login.current_user.name)
if event.official_score_type.is_lower_the_better:
- sorting_direction = 'asc'
+ sorting_direction = "asc"
else:
- sorting_direction = 'desc'
+ sorting_direction = "desc"
- return render_template('leaderboard.html',
- leaderboard_title='Trained submissions',
- leaderboard=leaderboard_html,
- failed_leaderboard=failed_leaderboard_html,
- new_leaderboard=new_leaderboard_html,
- sorting_column_index=SORTING_COLUMN_INDEX,
- sorting_direction=sorting_direction,
- event=event,
- admin=admin)
+ return render_template(
+ "leaderboard.html",
+ leaderboard_title="Trained submissions",
+ leaderboard=leaderboard_html,
+ failed_leaderboard=failed_leaderboard_html,
+ new_leaderboard=new_leaderboard_html,
+ sorting_column_index=SORTING_COLUMN_INDEX,
+ sorting_direction=sorting_direction,
+ event=event,
+ admin=admin,
+ )
@mod.route("/events//leaderboard")
@@ -92,51 +97,49 @@ def leaderboard(event_name):
The name of the event.
"""
event = get_event(db.session, event_name)
- if not is_accessible_event(db.session, event_name,
- flask_login.current_user.name):
+ if not is_accessible_event(db.session, event_name, flask_login.current_user.name):
return redirect_to_user(
- '{}: no event named "{}"'
- .format(flask_login.current_user.firstname, event_name))
- if app.config['TRACK_USER_INTERACTION']:
+ '{}: no event named "{}"'.format(
+ flask_login.current_user.firstname, event_name
+ )
+ )
+ if app.config["TRACK_USER_INTERACTION"]:
add_user_interaction(
db.session,
- interaction='looking at leaderboard',
+ interaction="looking at leaderboard",
user=flask_login.current_user,
- event=event
+ event=event,
)
- if is_accessible_leaderboard(db.session, event_name,
- flask_login.current_user.name):
+ if is_accessible_leaderboard(db.session, event_name, flask_login.current_user.name):
leaderboard_html = event.public_leaderboard_html_with_links
else:
leaderboard_html = event.public_leaderboard_html_no_links
if event.official_score_type.is_lower_the_better:
- sorting_direction = 'asc'
+ sorting_direction = "asc"
else:
- sorting_direction = 'desc'
+ sorting_direction = "desc"
leaderboard_kwargs = dict(
leaderboard=leaderboard_html,
- leaderboard_title='Leaderboard',
+ leaderboard_title="Leaderboard",
sorting_column_index=SORTING_COLUMN_INDEX,
sorting_direction=sorting_direction,
- event=event
+ event=event,
)
if is_admin(db.session, event_name, flask_login.current_user.name):
failed_leaderboard_html = event.failed_leaderboard_html
new_leaderboard_html = event.new_leaderboard_html
template = render_template(
- 'leaderboard.html',
+ "leaderboard.html",
failed_leaderboard=failed_leaderboard_html,
new_leaderboard=new_leaderboard_html,
admin=True,
- **leaderboard_kwargs
+ **leaderboard_kwargs,
)
else:
- template = render_template(
- 'leaderboard.html', **leaderboard_kwargs
- )
+ template = render_template("leaderboard.html", **leaderboard_kwargs)
return template
@@ -152,37 +155,35 @@ def competition_leaderboard(event_name):
The event name.
"""
event = get_event(db.session, event_name)
- if not is_accessible_event(db.session, event_name,
- flask_login.current_user.name):
+ if not is_accessible_event(db.session, event_name, flask_login.current_user.name):
return redirect_to_user(
- '{}: no event named "{}"'
- .format(flask_login.current_user.firstname, event_name)
+ '{}: no event named "{}"'.format(
+ flask_login.current_user.firstname, event_name
+ )
)
- if app.config['TRACK_USER_INTERACTION']:
+ if app.config["TRACK_USER_INTERACTION"]:
add_user_interaction(
db.session,
- interaction='looking at leaderboard',
+ interaction="looking at leaderboard",
user=flask_login.current_user,
- event=event
+ event=event,
)
admin = is_admin(db.session, event_name, flask_login.current_user.name)
- approved = is_user_signed_up(
- db.session, event_name, flask_login.current_user.name
- )
+ approved = is_user_signed_up(db.session, event_name, flask_login.current_user.name)
asked = approved
leaderboard_html = event.public_competition_leaderboard_html
leaderboard_kwargs = dict(
leaderboard=leaderboard_html,
- leaderboard_title='Leaderboard',
+ leaderboard_title="Leaderboard",
sorting_column_index=0,
- sorting_direction='asc',
+ sorting_direction="asc",
event=event,
admin=admin,
asked=asked,
- approved=approved
+ approved=approved,
)
- return render_template('leaderboard.html', **leaderboard_kwargs)
+ return render_template("leaderboard.html", **leaderboard_kwargs)
@mod.route("/events//private_leaderboard")
@@ -196,48 +197,47 @@ def private_leaderboard(event_name):
The event name.
"""
if not flask_login.current_user.is_authenticated:
- return redirect(url_for('auth.login'))
+ return redirect(url_for("auth.login"))
event = get_event(db.session, event_name)
- if not is_accessible_event(db.session, event_name,
- flask_login.current_user.name):
+ if not is_accessible_event(db.session, event_name, flask_login.current_user.name):
return redirect_to_user(
- '{}: no event named "{}"'
- .format(flask_login.current_user.firstname, event_name)
+ '{}: no event named "{}"'.format(
+ flask_login.current_user.firstname, event_name
+ )
)
- if (not is_admin(db.session, event_name, flask_login.current_user.name) and
- (event.closing_timestamp is None or
- event.closing_timestamp > datetime.datetime.utcnow())):
- return redirect(url_for('ramp.problems'))
+ if not is_admin(db.session, event_name, flask_login.current_user.name) and (
+ event.closing_timestamp is None
+ or event.closing_timestamp > datetime.datetime.utcnow()
+ ):
+ return redirect(url_for("ramp.problems"))
- if app.config['TRACK_USER_INTERACTION']:
+ if app.config["TRACK_USER_INTERACTION"]:
add_user_interaction(
db.session,
- interaction='looking at private leaderboard',
+ interaction="looking at private leaderboard",
user=flask_login.current_user,
- event=event
+ event=event,
)
leaderboard_html = event.private_leaderboard_html
admin = is_admin(db.session, event_name, flask_login.current_user.name)
if event.official_score_type.is_lower_the_better:
- sorting_direction = 'asc'
+ sorting_direction = "asc"
else:
- sorting_direction = 'desc'
+ sorting_direction = "desc"
- approved = is_user_signed_up(
- db.session, event_name, flask_login.current_user.name
- )
+ approved = is_user_signed_up(db.session, event_name, flask_login.current_user.name)
asked = approved
template = render_template(
- 'leaderboard.html',
- leaderboard_title='Leaderboard',
+ "leaderboard.html",
+ leaderboard_title="Leaderboard",
leaderboard=leaderboard_html,
- sorting_column_index=SORTING_COLUMN_INDEX+1,
+ sorting_column_index=SORTING_COLUMN_INDEX + 1,
sorting_direction=sorting_direction,
event=event,
private=True,
admin=admin,
asked=asked,
- approved=approved
+ approved=approved,
)
return template
@@ -254,43 +254,42 @@ def private_competition_leaderboard(event_name):
The event name.
"""
if not flask_login.current_user.is_authenticated:
- return redirect(url_for('auth.login'))
+ return redirect(url_for("auth.login"))
event = get_event(db.session, event_name)
- if not is_accessible_event(db.session, event_name,
- flask_login.current_user.name):
+ if not is_accessible_event(db.session, event_name, flask_login.current_user.name):
return redirect_to_user(
- '{}: no event named "{}"'
- .format(flask_login.current_user.firstname, event_name)
+ '{}: no event named "{}"'.format(
+ flask_login.current_user.firstname, event_name
+ )
)
- if (not is_admin(db.session, event_name, flask_login.current_user.name) and
- (event.closing_timestamp is None or
- event.closing_timestamp > datetime.datetime.utcnow())):
- return redirect(url_for('ramp.problems'))
+ if not is_admin(db.session, event_name, flask_login.current_user.name) and (
+ event.closing_timestamp is None
+ or event.closing_timestamp > datetime.datetime.utcnow()
+ ):
+ return redirect(url_for("ramp.problems"))
- if app.config['TRACK_USER_INTERACTION']:
+ if app.config["TRACK_USER_INTERACTION"]:
add_user_interaction(
db.session,
- interaction='looking at private leaderboard',
+ interaction="looking at private leaderboard",
user=flask_login.current_user,
- event=event
+ event=event,
)
admin = is_admin(db.session, event_name, flask_login.current_user.name)
- approved = is_user_signed_up(
- db.session, event_name, flask_login.current_user.name
- )
+ approved = is_user_signed_up(db.session, event_name, flask_login.current_user.name)
asked = approved
leaderboard_html = event.private_competition_leaderboard_html
leaderboard_kwargs = dict(
leaderboard=leaderboard_html,
- leaderboard_title='Leaderboard',
+ leaderboard_title="Leaderboard",
sorting_column_index=0,
- sorting_direction='asc',
+ sorting_direction="asc",
event=event,
admin=admin,
asked=asked,
- approved=approved
+ approved=approved,
)
- return render_template('leaderboard.html', **leaderboard_kwargs)
+ return render_template("leaderboard.html", **leaderboard_kwargs)
diff --git a/ramp-frontend/ramp_frontend/views/ramp.py b/ramp-frontend/ramp_frontend/views/ramp.py
index 0bbf90ce1..5b87e744b 100644
--- a/ramp-frontend/ramp_frontend/views/ramp.py
+++ b/ramp-frontend/ramp_frontend/views/ramp.py
@@ -72,20 +72,19 @@
from .visualization import score_plot
-mod = Blueprint('ramp', __name__)
-logger = logging.getLogger('RAMP-FRONTEND')
+mod = Blueprint("ramp", __name__)
+logger = logging.getLogger("RAMP-FRONTEND")
@mod.route("/problems")
def problems():
"""Landing page showing all the RAMP problems."""
- user = (flask_login.current_user
- if flask_login.current_user.is_authenticated else None)
- admin = user.access_level == 'admin' if user is not None else False
- if app.config['TRACK_USER_INTERACTION']:
- add_user_interaction(
- db.session, interaction='looking at problems', user=user
- )
+ user = (
+ flask_login.current_user if flask_login.current_user.is_authenticated else None
+ )
+ admin = user.access_level == "admin" if user is not None else False
+ if app.config["TRACK_USER_INTERACTION"]:
+ add_user_interaction(db.session, interaction="looking at problems", user=user)
problems = get_problem(db.session, None)
for problem in problems:
@@ -96,28 +95,26 @@ def problems():
start_collab = event.public_opening_timestamp
end = event.closing_timestamp
if now < start or now >= end:
- event.state = 'close'
+ event.state = "close"
elif now >= start and now < start_collab:
- event.state = 'competitive'
+ event.state = "competitive"
elif now >= start and now >= start_collab and now < end:
- event.state = 'collab'
+ event.state = "collab"
if user:
signed = get_event_team_by_name(
- db.session, event.name,
- flask_login.current_user.name)
+ db.session, event.name, flask_login.current_user.name
+ )
if not signed:
- event.state_user = 'not_signed'
+ event.state_user = "not_signed"
elif signed.approved:
- event.state_user = 'signed'
+ event.state_user = "signed"
elif signed:
- event.state_user = 'waiting'
+ event.state_user = "waiting"
else:
- event.state_user = 'not_signed'
+ event.state_user = "not_signed"
# problems = Problem.query.order_by(Problem.id.desc())
- return render_template('problems.html',
- problems=problems,
- admin=admin)
+ return render_template("problems.html", problems=problems, admin=admin)
@mod.route("/problems/")
@@ -130,44 +127,48 @@ def problem(problem_name):
The name of a problem.
"""
current_problem = get_problem(db.session, problem_name)
- user = (flask_login.current_user
- if flask_login.current_user.is_authenticated else None)
- admin = user.access_level == 'admin' if user is not None else False
+ user = (
+ flask_login.current_user if flask_login.current_user.is_authenticated else None
+ )
+ admin = user.access_level == "admin" if user is not None else False
if current_problem:
- if app.config['TRACK_USER_INTERACTION']:
+ if app.config["TRACK_USER_INTERACTION"]:
if flask_login.current_user.is_authenticated:
add_user_interaction(
db.session,
- interaction='looking at problem',
+ interaction="looking at problem",
user=flask_login.current_user,
- problem=current_problem
+ problem=current_problem,
)
else:
add_user_interaction(
- db.session, interaction='looking at problem',
- problem=current_problem
+ db.session,
+ interaction="looking at problem",
+ problem=current_problem,
)
description_f_name = os.path.join(
current_problem.path_ramp_kit,
- '{}_starting_kit.html'.format(current_problem.name)
+ "{}_starting_kit.html".format(current_problem.name),
)
# check which event ramp-kit archive is the latest
- archive_dir = os.path.join(
- current_problem.path_ramp_kit, "events_archived"
- )
+ archive_dir = os.path.join(current_problem.path_ramp_kit, "events_archived")
latest_event_zip = max(
[f for f in os.scandir(archive_dir) if f.name.endswith(".zip")],
- key=lambda x: x.stat().st_mtime
+ key=lambda x: x.stat().st_mtime,
)
latest_event = os.path.splitext(latest_event_zip.name)[0]
return render_template(
- 'problem.html', problem=current_problem, admin=admin,
- notebook_filename=description_f_name, latest_event=latest_event
+ "problem.html",
+ problem=current_problem,
+ admin=admin,
+ notebook_filename=description_f_name,
+ latest_event=latest_event,
)
else:
- return redirect_to_user('Problem {} does not exist'
- .format(problem_name), is_error=True)
+ return redirect_to_user(
+ "Problem {} does not exist".format(problem_name), is_error=True
+ )
@mod.route("/download_starting_kit/")
@@ -175,7 +176,7 @@ def download_starting_kit(event_name):
event = db.session.query(Event).filter_by(name=event_name).one()
return send_from_directory(
os.path.join(event.problem.path_ramp_kit, "events_archived"),
- event_name + ".zip"
+ event_name + ".zip",
)
@@ -184,14 +185,14 @@ def notebook(problem_name):
current_problem = get_problem(db.session, problem_name)
return send_from_directory(
current_problem.path_ramp_kit,
- '{}_starting_kit.html'.format(current_problem.name)
+ "{}_starting_kit.html".format(current_problem.name),
)
@mod.route("/rules/")
def rules(event_name):
event = get_event(db.session, event_name)
- return render_template('rules.html', event=event)
+ return render_template("rules.html", event=event)
@mod.route("/events/")
@@ -204,20 +205,25 @@ def user_event(event_name):
event_name : str
The event name.
"""
- if flask_login.current_user.access_level == 'asked':
- msg = 'Your account has not been approved yet by the administrator'
+ if flask_login.current_user.access_level == "asked":
+ msg = "Your account has not been approved yet by the administrator"
logger.error(msg)
return redirect_to_user(msg)
- if not is_accessible_event(db.session, event_name,
- flask_login.current_user.name):
- return redirect_to_user('{}: no event named "{}"'
- .format(flask_login.current_user.firstname,
- event_name))
+ if not is_accessible_event(db.session, event_name, flask_login.current_user.name):
+ return redirect_to_user(
+ '{}: no event named "{}"'.format(
+ flask_login.current_user.firstname, event_name
+ )
+ )
event = get_event(db.session, event_name)
if event:
- if app.config['TRACK_USER_INTERACTION']:
- add_user_interaction(db.session, interaction='looking at event',
- event=event, user=flask_login.current_user)
+ if app.config["TRACK_USER_INTERACTION"]:
+ add_user_interaction(
+ db.session,
+ interaction="looking at event",
+ event=event,
+ user=flask_login.current_user,
+ )
admin = is_admin(db.session, event_name, flask_login.current_user.name)
approved = is_user_signed_up(
db.session, event_name, flask_login.current_user.name
@@ -225,13 +231,16 @@ def user_event(event_name):
asked = is_user_sign_up_requested(
db.session, event_name, flask_login.current_user.name
)
- return render_template('event.html',
- event=event,
- admin=admin,
- approved=approved,
- asked=asked)
- return redirect_to_user('Event {} does not exist.'
- .format(event_name), is_error=True)
+ return render_template(
+ "event.html",
+ event=event,
+ admin=admin,
+ approved=approved,
+ asked=asked,
+ )
+ return redirect_to_user(
+ "Event {} does not exist.".format(event_name), is_error=True
+ )
@mod.route("/events//sign_up")
@@ -245,43 +254,52 @@ def sign_up_for_event(event_name):
The name of the event.
"""
event = get_event(db.session, event_name)
- if not is_accessible_event(db.session, event_name,
- flask_login.current_user.name):
- return redirect_to_user('{}: no event named "{}"'
- .format(flask_login.current_user.firstname,
- event_name))
- if app.config['TRACK_USER_INTERACTION']:
- add_user_interaction(db.session, interaction='signing up at event',
- user=flask_login.current_user, event=event)
+ if not is_accessible_event(db.session, event_name, flask_login.current_user.name):
+ return redirect_to_user(
+ '{}: no event named "{}"'.format(
+ flask_login.current_user.firstname, event_name
+ )
+ )
+ if app.config["TRACK_USER_INTERACTION"]:
+ add_user_interaction(
+ db.session,
+ interaction="signing up at event",
+ user=flask_login.current_user,
+ event=event,
+ )
ask_sign_up_team(db.session, event.name, flask_login.current_user.name)
if event.is_controled_signup:
- admin_users = User.query.filter_by(access_level='admin')
+ admin_users = User.query.filter_by(access_level="admin")
for admin in admin_users:
- subject = ('Request to sign-up {} to RAMP event {}'
- .format(event.name, flask_login.current_user.name))
+ subject = "Request to sign-up {} to RAMP event {}".format(
+ event.name, flask_login.current_user.name
+ )
body = body_formatter_user(flask_login.current_user)
- url_approve = ('http://{}/events/{}/sign_up/{}'
- .format(
- app.config['DOMAIN_NAME'], event.name,
- flask_login.current_user.name
- ))
- body += ('Click on this link to approve the sign-up request: {}'
- .format(url_approve))
+ url_approve = "http://{}/events/{}/sign_up/{}".format(
+ app.config["DOMAIN_NAME"],
+ event.name,
+ flask_login.current_user.name,
+ )
+ body += "Click on this link to approve the sign-up request: {}".format(
+ url_approve
+ )
send_mail(admin.email, subject, body)
- return redirect_to_user("Sign-up request is sent to event admins.",
- is_error=False, category='Request sent')
+ return redirect_to_user(
+ "Sign-up request is sent to event admins.",
+ is_error=False,
+ category="Request sent",
+ )
sign_up_team(db.session, event.name, flask_login.current_user.name)
return redirect_to_sandbox(
event,
- '{} is signed up for {}.'
- .format(flask_login.current_user.firstname, event),
+ "{} is signed up for {}.".format(flask_login.current_user.firstname, event),
is_error=False,
- category='Successful sign-up'
+ category="Successful sign-up",
)
-@mod.route("/events//sandbox", methods=['GET', 'POST'])
+@mod.route("/events//sandbox", methods=["GET", "POST"])
@flask_login.login_required
def sandbox(event_name):
"""Landing page for the user's sandbox.
@@ -292,23 +310,25 @@ def sandbox(event_name):
The event name.
"""
event = get_event(db.session, event_name)
- if not is_accessible_event(db.session, event_name,
- flask_login.current_user.name):
+ if not is_accessible_event(db.session, event_name, flask_login.current_user.name):
return redirect_to_user(
- '{}: no event named "{}"'
- .format(flask_login.current_user.firstname, event_name)
+ '{}: no event named "{}"'.format(
+ flask_login.current_user.firstname, event_name
+ )
+ )
+ if not is_accessible_code(db.session, event_name, flask_login.current_user.name):
+ error_str = (
+ "No access to sandbox for event {}. If you have "
+ "already signed up, please wait for approval.".format(event.name)
)
- if not is_accessible_code(db.session, event_name,
- flask_login.current_user.name):
- error_str = ('No access to sandbox for event {}. If you have '
- 'already signed up, please wait for approval.'
- .format(event.name))
return redirect_to_user(error_str)
# setup the webpage when loading
# we use the code store in the sandbox to show to the user
sandbox_submission = get_submission_by_name(
- db.session, event_name, flask_login.current_user.name,
- event.ramp_sandbox_name
+ db.session,
+ event_name,
+ flask_login.current_user.name,
+ event.ramp_sandbox_name,
)
event_team = get_event_team_by_name(
db.session, event_name, flask_login.current_user.name
@@ -332,10 +352,9 @@ def sandbox(event_name):
for submission_file in sandbox_submission.files:
if submission_file.is_editable:
f_field = submission_file.name
- setattr(CodeForm,
- f_field, StringField('Text', widget=TextArea()))
+ setattr(CodeForm, f_field, StringField("Text", widget=TextArea()))
code_form_kwargs[f_field] = submission_file.get_code()
- code_form_kwargs['prefix'] = 'code'
+ code_form_kwargs["prefix"] = "code"
code_form = CodeForm(**code_form_kwargs)
# Then, to be able to iterate over the files in the sandbox.html
# template, we also fill a separate table of pairs (file name, code).
@@ -343,21 +362,21 @@ def sandbox(event_name):
for submission_file in sandbox_submission.files:
if submission_file.is_editable:
code_form.names_codes.append(
- (submission_file.name, submission_file.get_code()))
+ (submission_file.name, submission_file.get_code())
+ )
# initialize the submission field and the the uploading form
submit_form = SubmitForm(
- submission_name=event_team.last_submission_name, prefix='submit'
+ submission_name=event_team.last_submission_name, prefix="submit"
)
- upload_form = UploadForm(prefix='upload')
+ upload_form = UploadForm(prefix="upload")
# check if the event is before, during or after open state
now = datetime.datetime.now()
start = event.opening_timestamp
end = event.closing_timestamp
- event_status = {"msg": "",
- "state": "not_yet"}
+ event_status = {"msg": "", "state": "not_yet"}
start_str = start.strftime("%d of %B %Y at %H:%M")
end_str = end.strftime("%d of %B %Y, %H:%M")
if now < start:
@@ -371,80 +390,89 @@ def sandbox(event_name):
event_status["state"] = "close"
admin = is_admin(db.session, event_name, flask_login.current_user.name)
- if request.method == 'GET':
+ if request.method == "GET":
return render_template(
- 'sandbox.html',
+ "sandbox.html",
submission_names=sandbox_submission.f_names,
code_form=code_form,
- submit_form=submit_form, upload_form=upload_form,
+ submit_form=submit_form,
+ upload_form=upload_form,
event=event,
admin=admin,
- event_status=event_status
+ event_status=event_status,
)
- if request.method == 'POST':
- if ('code-csrf_token' in request.form and
- code_form.validate_on_submit()):
+ if request.method == "POST":
+ if "code-csrf_token" in request.form and code_form.validate_on_submit():
try:
for submission_file in sandbox_submission.files:
if submission_file.is_editable:
old_code = submission_file.get_code()
- submission_file.set_code(
- request.form[submission_file.name])
+ submission_file.set_code(request.form[submission_file.name])
new_code = submission_file.get_code()
- diff = '\n'.join(difflib.unified_diff(
- old_code.splitlines(), new_code.splitlines()))
+ diff = "\n".join(
+ difflib.unified_diff(
+ old_code.splitlines(), new_code.splitlines()
+ )
+ )
similarity = difflib.SequenceMatcher(
- a=old_code, b=new_code).ratio()
- if app.config['TRACK_USER_INTERACTION']:
+ a=old_code, b=new_code
+ ).ratio()
+ if app.config["TRACK_USER_INTERACTION"]:
add_user_interaction(
db.session,
- interaction='save',
+ interaction="save",
user=flask_login.current_user,
event=event,
submission_file=submission_file,
- diff=diff, similarity=similarity
+ diff=diff,
+ similarity=similarity,
)
except Exception as e:
- return redirect_to_sandbox(event, 'Error: {}'.format(e))
+ return redirect_to_sandbox(event, "Error: {}".format(e))
# if we required to only save the file, redirect now
if "saving" in request.form:
return redirect_to_sandbox(
event,
- 'Your submission has been saved. You can safely comeback '
- 'to your sandbox later.',
- is_error=False, category='File saved'
+ "Your submission has been saved. You can safely comeback "
+ "to your sandbox later.",
+ is_error=False,
+ category="File saved",
)
elif request.files:
- upload_f_name = secure_filename(
- request.files['file'].filename)
- upload_name = upload_f_name.split('.')[0]
+ upload_f_name = secure_filename(request.files["file"].filename)
+ upload_name = upload_f_name.split(".")[0]
# TODO: create a get_function
upload_workflow_element = WorkflowElement.query.filter_by(
- name=upload_name, workflow=event.workflow).one_or_none()
+ name=upload_name, workflow=event.workflow
+ ).one_or_none()
if upload_workflow_element is None:
- return redirect_to_sandbox(event,
- '{} is not in the file list.'
- .format(upload_f_name))
+ return redirect_to_sandbox(
+ event, "{} is not in the file list.".format(upload_f_name)
+ )
# TODO: create a get_function
submission_file = SubmissionFile.query.filter_by(
submission=sandbox_submission,
- workflow_element=upload_workflow_element).one()
+ workflow_element=upload_workflow_element,
+ ).one()
if submission_file.is_editable:
old_code = submission_file.get_code()
tmp_f_name = os.path.join(tempfile.gettempdir(), upload_f_name)
- request.files['file'].save(tmp_f_name)
+ request.files["file"].save(tmp_f_name)
file_length = os.stat(tmp_f_name).st_size
- if (upload_workflow_element.max_size is not None and
- file_length > upload_workflow_element.max_size):
+ if (
+ upload_workflow_element.max_size is not None
+ and file_length > upload_workflow_element.max_size
+ ):
return redirect_to_sandbox(
event,
- 'File is too big: {} exceeds max size {}'
- .format(file_length, upload_workflow_element.max_size)
+ "File is too big: {} exceeds max size {}".format(
+ file_length, upload_workflow_element.max_size
+ ),
)
if submission_file.is_editable:
try:
@@ -452,39 +480,41 @@ def sandbox(event_name):
code = f.read()
submission_file.set_code(code)
except Exception as e:
- return redirect_to_sandbox(event, 'Error: {}'.format(e))
+ return redirect_to_sandbox(event, "Error: {}".format(e))
else:
# non-editable files are not verified for now
dst = os.path.join(sandbox_submission.path, upload_f_name)
shutil.copy2(tmp_f_name, dst)
- logger.info('{} uploaded {} in {}'
- .format(flask_login.current_user.name, upload_f_name,
- event))
+ logger.info(
+ "{} uploaded {} in {}".format(
+ flask_login.current_user.name, upload_f_name, event
+ )
+ )
if submission_file.is_editable:
new_code = submission_file.get_code()
- diff = '\n'.join(difflib.unified_diff(
- old_code.splitlines(), new_code.splitlines()))
- similarity = difflib.SequenceMatcher(
- a=old_code, b=new_code).ratio()
- if app.config['TRACK_USER_INTERACTION']:
+ diff = "\n".join(
+ difflib.unified_diff(old_code.splitlines(), new_code.splitlines())
+ )
+ similarity = difflib.SequenceMatcher(a=old_code, b=new_code).ratio()
+ if app.config["TRACK_USER_INTERACTION"]:
add_user_interaction(
db.session,
- interaction='upload',
+ interaction="upload",
user=flask_login.current_user,
event=event,
submission_file=submission_file,
diff=diff,
- similarity=similarity
+ similarity=similarity,
)
else:
- if app.config['TRACK_USER_INTERACTION']:
+ if app.config["TRACK_USER_INTERACTION"]:
add_user_interaction(
db.session,
- interaction='upload',
+ interaction="upload",
user=flask_login.current_user,
event=event,
- submission_file=submission_file
+ submission_file=submission_file,
)
return redirect(request.referrer)
@@ -492,49 +522,54 @@ def sandbox(event_name):
# ie: now we let upload eg external_data.bla, and only fail at
# submission, without giving a message
- if 'submission' in request.form:
+ if "submission" in request.form:
if not submit_form.validate_on_submit():
return redirect_to_sandbox(
- event,
- 'Submission name should not contain any spaces'
+ event, "Submission name should not contain any spaces"
)
- new_submission_name = request.form['submit-submission_name']
+ new_submission_name = request.form["submit-submission_name"]
if not 4 < len(new_submission_name) < 20:
return redirect_to_sandbox(
event,
- 'Submission name should have length between 4 and '
- '20 characters.'
+ "Submission name should have length between 4 and "
+ "20 characters.",
)
try:
- new_submission_name.encode('ascii')
+ new_submission_name.encode("ascii")
except Exception as e:
- return redirect_to_sandbox(event, 'Error: {}'.format(e))
+ return redirect_to_sandbox(event, "Error: {}".format(e))
try:
- new_submission = add_submission(db.session, event_name,
- event_team.team.name,
- new_submission_name,
- sandbox_submission.path)
+ new_submission = add_submission(
+ db.session,
+ event_name,
+ event_team.team.name,
+ new_submission_name,
+ sandbox_submission.path,
+ )
except DuplicateSubmissionError:
return redirect_to_sandbox(
event,
- 'Submission {} already exists. Please change the name.'
- .format(new_submission_name)
+ "Submission {} already exists. Please change the name.".format(
+ new_submission_name
+ ),
)
except MissingExtensionError:
- return redirect_to_sandbox(
- event, 'Missing extension'
- )
+ return redirect_to_sandbox(event, "Missing extension")
except TooEarlySubmissionError as e:
return redirect_to_sandbox(event, str(e))
- logger.info('{} submitted {} for {}.'
- .format(flask_login.current_user.name,
- new_submission.name, event_team))
+ logger.info(
+ "{} submitted {} for {}.".format(
+ flask_login.current_user.name,
+ new_submission.name,
+ event_team,
+ )
+ )
if event.is_send_submitted_mails:
- admin_users = User.query.filter_by(access_level='admin')
+ admin_users = User.query.filter_by(access_level="admin")
for admin in admin_users:
- subject = 'Submission {} sent for training'.format(
+ subject = "Submission {} sent for training".format(
new_submission.name
)
body = """A new submission have been submitted:
@@ -542,56 +577,69 @@ def sandbox(event_name):
user: {}
submission: {}
submission path: {}
- """.format(event_team.event.name,
- flask_login.current_user.name,
- new_submission.name, new_submission.path)
+ """.format(
+ event_team.event.name,
+ flask_login.current_user.name,
+ new_submission.name,
+ new_submission.path,
+ )
send_mail(admin.email, subject, body)
- if app.config['TRACK_USER_INTERACTION']:
+ if app.config["TRACK_USER_INTERACTION"]:
add_user_interaction(
db.session,
- interaction='submit',
+ interaction="submit",
user=flask_login.current_user,
event=event,
- submission=new_submission
+ submission=new_submission,
)
- if app.config['TRACK_CREDITS']:
+ if app.config["TRACK_CREDITS"]:
return redirect_to_credit(
submission_hash=new_submission.hash_,
- message_str='{}'.format(
- 'Successful submission, please provide credits.'),
- is_error=False)
+ message_str="{}".format(
+ "Successful submission, please provide credits."
+ ),
+ is_error=False,
+ )
else:
return redirect_to_sandbox(
event,
- '{} submitted {} for {}'
- .format(flask_login.current_user.firstname,
- new_submission.name, event_team),
- is_error=False, category='Submission'
+ "{} submitted {} for {}".format(
+ flask_login.current_user.firstname,
+ new_submission.name,
+ event_team,
+ ),
+ is_error=False,
+ category="Submission",
)
admin = is_admin(db.session, event_name, flask_login.current_user.name)
return render_template(
- 'sandbox.html',
+ "sandbox.html",
submission_names=sandbox_submission.f_names,
code_form=code_form,
- submit_form=submit_form, upload_form=upload_form,
+ submit_form=submit_form,
+ upload_form=upload_form,
event=event,
admin=admin,
- event_status=event_status
+ event_status=event_status,
)
-@mod.route("/problems//ask_for_event", methods=['GET', 'POST'])
+@mod.route("/problems//ask_for_event", methods=["GET", "POST"])
@flask_login.login_required
def ask_for_event(problem_name):
problem = Problem.query.filter_by(name=problem_name).one_or_none()
if problem is None:
return redirect_to_user(
- '{}: no problem named "{}"'
- .format(flask_login.current_user.firstname, problem_name)
+ '{}: no problem named "{}"'.format(
+ flask_login.current_user.firstname, problem_name
+ )
+ )
+ logger.info(
+ "{} is asking for event on {}".format(
+ flask_login.current_user.name, problem.name
)
- logger.info('{} is asking for event on {}'
- .format(flask_login.current_user.name, problem.name))
+ )
# We assume here that event name has the syntax _
form = AskForEventForm(
min_duration_between_submissions_hour=8,
@@ -599,9 +647,9 @@ def ask_for_event(problem_name):
min_duration_between_submissions_second=0,
)
if form.validate_on_submit():
- admin_users = User.query.filter_by(access_level='admin')
+ admin_users = User.query.filter_by(access_level="admin")
for admin in admin_users:
- subject = 'Request to add a new event'
+ subject = "Request to add a new event"
body = """User {} asked to add a new event:
event name: {}
event title: {}
@@ -611,25 +659,26 @@ def ask_for_event(problem_name):
closing data: {}
""".format(
flask_login.current_user.name,
- problem.name + '_' + form.suffix.data,
+ problem.name + "_" + form.suffix.data,
form.title.data,
form.n_students.data,
form.min_duration_between_submissions_hour.data,
form.min_duration_between_submissions_minute.data,
form.min_duration_between_submissions_second.data,
form.opening_date.data,
- form.closing_date.data
+ form.closing_date.data,
)
send_mail(admin.email, subject, body)
return redirect_to_user(
- 'Thank you. Your request has been sent to RAMP administrators.',
- category='Event request', is_error=False
+ "Thank you. Your request has been sent to RAMP administrators.",
+ category="Event request",
+ is_error=False,
)
- return render_template('ask_for_event.html', form=form, problem=problem)
+ return render_template("ask_for_event.html", form=form, problem=problem)
-@mod.route("/credit/", methods=['GET', 'POST'])
+@mod.route("/credit/", methods=["GET", "POST"])
@flask_login.login_required
def credit(submission_hash):
"""The landing page to credit other submission when a user submit is own.
@@ -639,54 +688,51 @@ def credit(submission_hash):
submission_hash : str
The submission hash of the current submission.
"""
- submission = (Submission.query.filter_by(hash_=submission_hash)
- .one_or_none())
+ submission = Submission.query.filter_by(hash_=submission_hash).one_or_none()
access_code = is_accessible_code(
- db.session, submission.event_team.event.name,
- flask_login.current_user.name, submission.id
+ db.session,
+ submission.event_team.event.name,
+ flask_login.current_user.name,
+ submission.id,
)
if submission is None or not access_code:
- error_str = 'Missing submission: {}'.format(submission_hash)
+ error_str = "Missing submission: {}".format(submission_hash)
return redirect_to_user(error_str)
event_team = submission.event_team
event = event_team.event
source_submissions = get_source_submissions(db.session, submission.id)
def get_s_field(source_submission):
- return '{}/{}/{}'.format(
+ return "{}/{}/{}".format(
source_submission.event_team.event.name,
source_submission.event_team.team.name,
- source_submission.name)
+ source_submission.name,
+ )
# Make sure that CreditForm is empty
CreditForm.name_credits = []
credit_form_kwargs = {}
for source_submission in source_submissions:
s_field = get_s_field(source_submission)
- setattr(CreditForm, s_field, StringField('Text'))
+ setattr(CreditForm, s_field, StringField("Text"))
credit_form = CreditForm(**credit_form_kwargs)
sum_credit = 0
# new = True
for source_submission in source_submissions:
s_field = get_s_field(source_submission)
- submission_similaritys = \
- (SubmissionSimilarity.query
- .filter_by(
- type='target_credit',
- user=flask_login.current_user,
- source_submission=source_submission,
- target_submission=submission)
- .all())
+ submission_similaritys = SubmissionSimilarity.query.filter_by(
+ type="target_credit",
+ user=flask_login.current_user,
+ source_submission=source_submission,
+ target_submission=submission,
+ ).all()
if not submission_similaritys:
submission_credit = 0
else:
# new = False
# find the last credit (in case crediter changes her mind)
- submission_similaritys.sort(
- key=lambda x: x.timestamp, reverse=True)
- submission_credit = int(
- round(100 * submission_similaritys[0].similarity)
- )
+ submission_similaritys.sort(key=lambda x: x.timestamp, reverse=True)
+ submission_credit = int(round(100 * submission_similaritys[0].similarity))
sum_credit += submission_credit
credit_form.name_credits.append(
(s_field, str(submission_credit), source_submission.link)
@@ -704,50 +750,52 @@ def get_s_field(source_submission):
if sum_credit != 100:
return redirect_to_credit(
submission_hash,
- 'Error: The total credit should add up to 100'
+ "Error: The total credit should add up to 100",
)
except Exception as e:
- return redirect_to_credit(submission_hash, 'Error: {}'.format(e))
+ return redirect_to_credit(submission_hash, "Error: {}".format(e))
for source_submission in source_submissions:
s_field = get_s_field(source_submission)
- similarity = int(getattr(credit_form, s_field).data) / 100.
- submission_similarity = \
- (SubmissionSimilarity.query
- .filter_by(
- type='target_credit',
- user=flask_login.current_user,
- source_submission=source_submission,
- target_submission=submission)
- .all())
+ similarity = int(getattr(credit_form, s_field).data) / 100.0
+ submission_similarity = SubmissionSimilarity.query.filter_by(
+ type="target_credit",
+ user=flask_login.current_user,
+ source_submission=source_submission,
+ target_submission=submission,
+ ).all()
# if submission_similarity is not empty, we need to
# add zero to cancel previous credits explicitly
if similarity > 0 or submission_similarity:
add_submission_similarity(
db.session,
- credit_type='target_credit',
+ credit_type="target_credit",
user=flask_login.current_user,
source_submission=source_submission,
target_submission=submission,
similarity=similarity,
- timestamp=datetime.datetime.utcnow()
+ timestamp=datetime.datetime.utcnow(),
)
- if app.config['TRACK_USER_INTERACTION']:
+ if app.config["TRACK_USER_INTERACTION"]:
add_user_interaction(
db.session,
- interaction='giving credit',
+ interaction="giving credit",
user=flask_login.current_user,
event=event,
- submission=submission
+ submission=submission,
)
- return redirect('/events/{}/sandbox'.format(event.name))
+ return redirect("/events/{}/sandbox".format(event.name))
admin = is_admin(db.session, event.name, flask_login.current_user.name)
return render_template(
- 'credit.html', submission=submission,
- source_submissions=source_submissions, credit_form=credit_form,
- event=event, admin=admin)
+ "credit.html",
+ submission=submission,
+ source_submissions=source_submissions,
+ credit_form=credit_form,
+ event=event,
+ admin=admin,
+ )
@mod.route("/event_plots/")
@@ -762,24 +810,22 @@ def event_plots(event_name):
The name of the event.
"""
event = get_event(db.session, event_name)
- if not is_accessible_event(db.session, event_name,
- flask_login.current_user.name):
- return redirect_to_user('{}: no event named "{}"'
- .format(flask_login.current_user.firstname,
- event_name))
+ if not is_accessible_event(db.session, event_name, flask_login.current_user.name):
+ return redirect_to_user(
+ '{}: no event named "{}"'.format(
+ flask_login.current_user.firstname, event_name
+ )
+ )
if event:
p = score_plot(db.session, event)
script, div = components(p)
- return render_template('event_plots.html',
- script=script,
- div=div,
- event=event)
- return redirect_to_user('Event {} does not exist.'
- .format(event_name),
- is_error=True)
+ return render_template("event_plots.html", script=script, div=div, event=event)
+ return redirect_to_user(
+ "Event {} does not exist.".format(event_name), is_error=True
+ )
-@mod.route("//", methods=['GET', 'POST'])
+@mod.route("//", methods=["GET", "POST"])
@flask_login.login_required
def view_model(submission_hash, f_name):
"""Rendering submission codes using templates/submission.html.
@@ -797,35 +843,34 @@ def view_model(submission_hash, f_name):
f_name : tr
The name of the submission file.
"""
- submission = (Submission.query.filter_by(hash_=submission_hash)
- .one_or_none())
- if (submission is None or
- not is_accessible_code(db.session, submission.event.name,
- flask_login.current_user.name,
- submission.id)):
- error_str = 'Missing submission: {}'.format(submission_hash)
+ submission = Submission.query.filter_by(hash_=submission_hash).one_or_none()
+ if submission is None or not is_accessible_code(
+ db.session,
+ submission.event.name,
+ flask_login.current_user.name,
+ submission.id,
+ ):
+ error_str = "Missing submission: {}".format(submission_hash)
return redirect_to_user(error_str)
event = submission.event_team.event
team = submission.event_team.team
- workflow_element_name = f_name.split('.')[0]
- workflow_element = \
- (WorkflowElement.query.filter_by(name=workflow_element_name,
- workflow=event.workflow)
- .one_or_none())
+ workflow_element_name = f_name.split(".")[0]
+ workflow_element = WorkflowElement.query.filter_by(
+ name=workflow_element_name, workflow=event.workflow
+ ).one_or_none()
if workflow_element is None:
- error_str = ('{} is not a valid workflow element by {} '
- .format(workflow_element_name,
- flask_login.current_user.name))
- error_str += 'in {}/{}/{}/{}'.format(event, team, submission, f_name)
+ error_str = "{} is not a valid workflow element by {} ".format(
+ workflow_element_name, flask_login.current_user.name
+ )
+ error_str += "in {}/{}/{}/{}".format(event, team, submission, f_name)
return redirect_to_user(error_str)
- submission_file = \
- (SubmissionFile.query.filter_by(submission=submission,
- workflow_element=workflow_element)
- .one_or_none())
+ submission_file = SubmissionFile.query.filter_by(
+ submission=submission, workflow_element=workflow_element
+ ).one_or_none()
if submission_file is None:
- error_str = ('No submission file by {} in {}/{}/{}/{}'
- .format(flask_login.current_user.name,
- event, team, submission, f_name))
+ error_str = "No submission file by {} in {}/{}/{}/{}".format(
+ flask_login.current_user.name, event, team, submission, f_name
+ )
return redirect_to_user(error_str)
# superfluous, perhaps when we'll have different extensions?
@@ -833,24 +878,31 @@ def view_model(submission_hash, f_name):
submission_abspath = os.path.abspath(submission.path)
if not os.path.exists(submission_abspath):
- error_str = ('{} does not exist by {} in {}/{}/{}/{}'
- .format(submission_abspath, flask_login.current_user.name,
- event, team, submission, f_name))
+ error_str = "{} does not exist by {} in {}/{}/{}/{}".format(
+ submission_abspath,
+ flask_login.current_user.name,
+ event,
+ team,
+ submission,
+ f_name,
+ )
return redirect_to_user(error_str)
- if app.config['TRACK_USER_INTERACTION'] or app.config['TRACK_CREDITS']:
+ if app.config["TRACK_USER_INTERACTION"] or app.config["TRACK_CREDITS"]:
add_user_interaction(
db.session,
- interaction='looking at submission',
+ interaction="looking at submission",
user=flask_login.current_user,
event=event,
submission=submission,
- submission_file=submission_file
+ submission_file=submission_file,
)
- logger.info('{} is looking at {}/{}/{}/{}'
- .format(flask_login.current_user.name, event, team, submission,
- f_name))
+ logger.info(
+ "{} is looking at {}/{}/{}/{}".format(
+ flask_login.current_user.name, event, team, submission, f_name
+ )
+ )
# Downloading file if it is not editable (e.g., external_data.csv)
if not workflow_element.is_editable:
@@ -858,20 +910,22 @@ def view_model(submission_hash, f_name):
# with changedir(submission_abspath):
# with ZipFile(archive_filename, 'w') as archive:
# archive.write(f_name)
- if app.config['TRACK_USER_INTERACTION']:
+ if app.config["TRACK_USER_INTERACTION"]:
add_user_interaction(
db.session,
- interaction='download',
+ interaction="download",
user=flask_login.current_user,
event=event,
submission=submission,
- submission_file=submission_file
+ submission_file=submission_file,
)
return send_from_directory(
- submission_abspath, f_name, as_attachment=True,
- attachment_filename='{}_{}'.format(submission.hash_[:6], f_name),
- mimetype='application/octet-stream'
+ submission_abspath,
+ f_name,
+ as_attachment=True,
+ attachment_filename="{}_{}".format(submission.hash_[:6], f_name),
+ mimetype="application/octet-stream",
)
# Importing selected files into sandbox
@@ -880,51 +934,59 @@ def view_model(submission_hash, f_name):
import_form.selected_f_names.choices = choices
if import_form.validate_on_submit():
sandbox_submission = get_submission_by_name(
- db.session, event.name, flask_login.current_user.name,
- event.ramp_sandbox_name
+ db.session,
+ event.name,
+ flask_login.current_user.name,
+ event.ramp_sandbox_name,
)
for filename in import_form.selected_f_names.data:
logger.info(
- '{} is importing {}/{}/{}/{}'
- .format(flask_login.current_user.name, event, team,
- submission, filename)
+ "{} is importing {}/{}/{}/{}".format(
+ flask_login.current_user.name,
+ event,
+ team,
+ submission,
+ filename,
+ )
)
workflow_element = WorkflowElement.query.filter_by(
- name=filename.split('.')[0], workflow=event.workflow).one()
+ name=filename.split(".")[0], workflow=event.workflow
+ ).one()
# TODO: deal with different extensions of the same file
src = os.path.join(submission.path, filename)
dst = os.path.join(sandbox_submission.path, filename)
shutil.copy2(src, dst) # copying also metadata
- logger.info('Copying {} to {}'.format(src, dst))
+ logger.info("Copying {} to {}".format(src, dst))
submission_file = SubmissionFile.query.filter_by(
- submission=submission,
- workflow_element=workflow_element).one()
- if app.config['TRACK_USER_INTERACTION']:
+ submission=submission, workflow_element=workflow_element
+ ).one()
+ if app.config["TRACK_USER_INTERACTION"]:
add_user_interaction(
db.session,
- interaction='copy',
+ interaction="copy",
user=flask_login.current_user,
event=event,
submission=submission,
- submission_file=submission_file
+ submission_file=submission_file,
)
- return redirect('/events/{}/sandbox'.format(event.name))
+ return redirect("/events/{}/sandbox".format(event.name))
with open(os.path.join(submission.path, f_name)) as f:
code = f.read()
admin = is_admin(db.session, event.name, flask_login.current_user.name)
return render_template(
- 'submission.html',
+ "submission.html",
event=event,
code=code,
submission=submission,
f_name=f_name,
import_form=import_form,
- admin=admin)
+ admin=admin,
+ )
@mod.route("//error.txt")
@@ -943,27 +1005,27 @@ def view_submission_error(submission_hash):
submission_hash : str
The hash of the submission.
"""
- submission = (Submission.query.filter_by(hash_=submission_hash)
- .one_or_none())
+ submission = Submission.query.filter_by(hash_=submission_hash).one_or_none()
if submission is None:
- error_str = ('Missing submission {}: {}'
- .format(flask_login.current_user.name, submission_hash))
+ error_str = "Missing submission {}: {}".format(
+ flask_login.current_user.name, submission_hash
+ )
return redirect_to_user(error_str)
event = submission.event_team.event
team = submission.event_team.team
# TODO: check if event == submission.event_team.event
- if app.config['TRACK_USER_INTERACTION']:
+ if app.config["TRACK_USER_INTERACTION"]:
add_user_interaction(
db.session,
- interaction='looking at error',
+ interaction="looking at error",
user=flask_login.current_user,
event=event,
- submission=submission
+ submission=submission,
)
return render_template(
- 'submission_error.html', submission=submission, team=team, event=event
+ "submission_error.html", submission=submission, team=team, event=event
)
@@ -977,26 +1039,25 @@ def toggle_competition(submission_hash):
submission_hash : str
The submission hash of the current submission.
"""
- submission = (Submission.query.filter_by(hash_=submission_hash)
- .one_or_none())
+ submission = Submission.query.filter_by(hash_=submission_hash).one_or_none()
if submission is None:
- error_str = 'Missing submission: {}'.format(submission_hash)
+ error_str = "Missing submission: {}".format(submission_hash)
return redirect_to_user(error_str)
access_code = is_accessible_code(
- db.session, submission.event_team.event.name,
- flask_login.current_user.name, submission.id
+ db.session,
+ submission.event_team.event.name,
+ flask_login.current_user.name,
+ submission.id,
)
if not access_code:
- error_str = 'Missing submission: {}'.format(submission_hash)
+ error_str = "Missing submission: {}".format(submission_hash)
return redirect_to_user(error_str)
submission.is_in_competition = not submission.is_in_competition
db.session.commit()
update_leaderboards(db.session, submission.event_team.event.name)
- return redirect(
- '/{}/{}'.format(submission_hash, submission.files[0].f_name)
- )
+ return redirect("/{}/{}".format(submission_hash, submission.files[0].f_name))
@mod.route("/download/")
@@ -1009,22 +1070,23 @@ def download_submission(submission_hash):
submission_hash : str
The submission hash of the current submission.
"""
- submission = (Submission.query.filter_by(hash_=submission_hash)
- .one_or_none())
+ submission = Submission.query.filter_by(hash_=submission_hash).one_or_none()
if submission is None:
- error_str = 'Missing submission: {}'.format(submission_hash)
+ error_str = "Missing submission: {}".format(submission_hash)
return redirect_to_user(error_str)
access_code = is_accessible_code(
- db.session, submission.event_team.event.name,
- flask_login.current_user.name, submission.id
+ db.session,
+ submission.event_team.event.name,
+ flask_login.current_user.name,
+ submission.id,
)
if not access_code:
- error_str = 'Unauthorized access: {}'.format(submission_hash)
+ error_str = "Unauthorized access: {}".format(submission_hash)
return redirect_to_user(error_str)
file_in_memory = io.BytesIO()
- with zipfile.ZipFile(file_in_memory, 'w') as zf:
+ with zipfile.ZipFile(file_in_memory, "w") as zf:
for ff in submission.files:
data = zipfile.ZipInfo(ff.f_name)
data.date_time = time.localtime(time.time())[:6]
diff --git a/ramp-frontend/ramp_frontend/views/redirect.py b/ramp-frontend/ramp_frontend/views/redirect.py
index 91b733ca9..2f9ca4563 100644
--- a/ramp-frontend/ramp_frontend/views/redirect.py
+++ b/ramp-frontend/ramp_frontend/views/redirect.py
@@ -4,10 +4,10 @@
from flask import redirect
from flask import url_for
-logger = logging.getLogger('RAMP-FRONTEND')
+logger = logging.getLogger("RAMP-FRONTEND")
-def redirect_to_user(message_str, is_error=True, category='message'):
+def redirect_to_user(message_str, is_error=True, category="message"):
"""Redirect the page to the problem landing page.
Parameters
@@ -25,7 +25,7 @@ def redirect_to_user(message_str, is_error=True, category='message'):
logger.error(message_str)
else:
logger.info(message_str)
- return redirect(url_for('ramp.problems'))
+ return redirect(url_for("ramp.problems"))
def redirect_to_sandbox(event, message_str, is_error=True, category=None):
@@ -45,11 +45,10 @@ def redirect_to_sandbox(event, message_str, is_error=True, category=None):
logger.error(message_str)
else:
logger.info(message_str)
- return redirect('/events/{}/sandbox'.format(event.name))
+ return redirect("/events/{}/sandbox".format(event.name))
-def redirect_to_credit(submission_hash, message_str, is_error=True,
- category=None):
+def redirect_to_credit(submission_hash, message_str, is_error=True, category=None):
"""Redirect the page to the credit landing page.
Parameters
@@ -68,4 +67,4 @@ def redirect_to_credit(submission_hash, message_str, is_error=True,
logger.error(message_str)
else:
logger.info(message_str)
- return redirect('/credit/{}'.format(submission_hash))
+ return redirect("/credit/{}".format(submission_hash))
diff --git a/ramp-frontend/ramp_frontend/views/visualization.py b/ramp-frontend/ramp_frontend/views/visualization.py
index 021cf5c87..3f19ed8c4 100644
--- a/ramp-frontend/ramp_frontend/views/visualization.py
+++ b/ramp-frontend/ramp_frontend/views/visualization.py
@@ -22,25 +22,28 @@ def make_step_df(pareto_df, is_lower_the_better):
pareto_df = pareto_df.set_index(1 + 2 * np.arange(n_pareto))
for i in range(2, 2 * n_pareto, 2):
pareto_df.loc[i] = pareto_df.loc[i - 1]
- pareto_df.loc[i, 'x'] = pareto_df.loc[i + 1, 'x']
+ pareto_df.loc[i, "x"] = pareto_df.loc[i + 1, "x"]
pareto_df.loc[2 * n_pareto] = pareto_df.loc[2 * n_pareto - 1]
- pareto_df.loc[2 * n_pareto, 'x'] = max(pareto_df['x'])
+ pareto_df.loc[2 * n_pareto, "x"] = max(pareto_df["x"])
pareto_df.loc[0] = pareto_df.loc[1]
if is_lower_the_better:
- pareto_df.loc[0, 'y'] = max(pareto_df['y'])
+ pareto_df.loc[0, "y"] = max(pareto_df["y"])
else:
- pareto_df.loc[0, 'y'] = min(pareto_df['y'])
+ pareto_df.loc[0, "y"] = min(pareto_df["y"])
return pareto_df.sort_index()
def color_gradient(rgb, factor_array):
"""Rescale rgb by factor_array."""
from skimage.color import gray2rgb, rgb2gray
+
colors = np.array(
- [(255 - rgb[0], 255 - rgb[2], 255 - rgb[2]) for _ in factor_array])
+ [(255 - rgb[0], 255 - rgb[2], 255 - rgb[2]) for _ in factor_array]
+ )
colors = rgb2gray(colors)
- colors = gray2rgb(255 - np.array([color * factor for color, factor
- in zip(colors, factor_array)]))[:, :, 0]
+ colors = gray2rgb(
+ 255 - np.array([color * factor for color, factor in zip(colors, factor_array)])
+ )[:, :, 0]
return colors
@@ -66,21 +69,20 @@ def add_pareto(df, col, worst, is_lower_the_better):
The dataframe amended with the new column col + ' pareto'
"""
df_ = df.copy()
- df_.loc[:, col + ' pareto'] = pd.Series(np.zeros(df.shape[0]),
- index=df_.index)
+ df_.loc[:, col + " pareto"] = pd.Series(np.zeros(df.shape[0]), index=df_.index)
best_score = worst
if is_lower_the_better:
for i, row in df.iterrows():
score = row[col]
if score < best_score:
best_score = score
- df_.loc[i, col + ' pareto'] = 1
+ df_.loc[i, col + " pareto"] = 1
else:
for i, row in df.iterrows():
score = row[col]
if score > best_score:
best_score = score
- df_.loc[i, col + ' pareto'] = 1
+ df_.loc[i, col + " pareto"] = 1
return df_
@@ -93,22 +95,30 @@ def score_plot(session, event):
submissions = [
get_submission_by_id(session, sub_id)
for sub_id, _, _ in submissions
- if get_submission_by_id(session, sub_id).is_public_leaderboard and
- get_submission_by_id(session, sub_id).is_valid]
+ if get_submission_by_id(session, sub_id).is_public_leaderboard
+ and get_submission_by_id(session, sub_id).is_valid
+ ]
score_names = [score_type.name for score_type in event.score_types]
- scoress = np.array([
- [score.valid_score_cv_bag
- for score in submission.ordered_scores(score_names)]
- for submission in submissions
- ]).T
+ scoress = np.array(
+ [
+ [
+ score.valid_score_cv_bag
+ for score in submission.ordered_scores(score_names)
+ ]
+ for submission in submissions
+ ]
+ ).T
score_plot_df = pd.DataFrame()
- score_plot_df['submitted at (UTC)'] = [
- submission.submission_timestamp for submission in submissions]
- score_plot_df['contributivity'] = [
- submission.contributivity for submission in submissions]
- score_plot_df['historical contributivity'] = [
- submission.historical_contributivity for submission in submissions]
+ score_plot_df["submitted at (UTC)"] = [
+ submission.submission_timestamp for submission in submissions
+ ]
+ score_plot_df["contributivity"] = [
+ submission.contributivity for submission in submissions
+ ]
+ score_plot_df["historical contributivity"] = [
+ submission.historical_contributivity for submission in submissions
+ ]
for score_name in score_names: # to make sure the column is created
score_plot_df[score_name] = 0
for score_name, scores in zip(score_names, scoress):
@@ -116,86 +126,101 @@ def score_plot(session, event):
score_name = event.official_score_name
score_plot_df = score_plot_df[
- score_plot_df['submitted at (UTC)'] > event.opening_timestamp]
- score_plot_df = score_plot_df.sort_values('submitted at (UTC)')
+ score_plot_df["submitted at (UTC)"] > event.opening_timestamp
+ ]
+ score_plot_df = score_plot_df.sort_values("submitted at (UTC)")
score_plot_df = add_pareto(
- score_plot_df, score_name, event.official_score_type.worst,
- event.official_score_type.is_lower_the_better)
+ score_plot_df,
+ score_name,
+ event.official_score_type.worst,
+ event.official_score_type.is_lower_the_better,
+ )
- is_open = (score_plot_df['submitted at (UTC)'] >
- event.public_opening_timestamp).values
+ is_open = (
+ score_plot_df["submitted at (UTC)"] > event.public_opening_timestamp
+ ).values
- max_contributivity = max(
- 0.0000001, max(score_plot_df['contributivity'].values))
- max_historical_contributivity = max(0.0000001, max(
- score_plot_df['historical contributivity'].values))
+ max_contributivity = max(0.0000001, max(score_plot_df["contributivity"].values))
+ max_historical_contributivity = max(
+ 0.0000001, max(score_plot_df["historical contributivity"].values)
+ )
fill_color_1 = (176, 23, 31)
fill_color_2 = (16, 78, 139)
fill_colors_1 = color_gradient(
- fill_color_1, score_plot_df['contributivity'].values /
- max_contributivity)
+ fill_color_1,
+ score_plot_df["contributivity"].values / max_contributivity,
+ )
fill_colors_2 = color_gradient(
- fill_color_2, score_plot_df['historical contributivity'].values /
- max_historical_contributivity)
+ fill_color_2,
+ score_plot_df["historical contributivity"].values
+ / max_historical_contributivity,
+ )
fill_colors = np.minimum(fill_colors_1, fill_colors_2).astype(int)
fill_colors = ["#%02x%02x%02x" % (c[0], c[1], c[2]) for c in fill_colors]
- score_plot_df['x'] = score_plot_df['submitted at (UTC)']
- score_plot_df['y'] = score_plot_df[score_name]
- score_plot_df['line_color'] = 'royalblue'
- score_plot_df['circle_size'] = 8
- score_plot_df['line_color'] = 'royalblue'
- score_plot_df.loc[is_open, 'line_color'] = 'coral'
- score_plot_df['fill_color'] = fill_colors
- score_plot_df['fill_alpha'] = 0.5
- score_plot_df['line_width'] = 0
- score_plot_df['label'] = 'closed phase'
- score_plot_df.loc[is_open, 'label'] = 'open phase'
+ score_plot_df["x"] = score_plot_df["submitted at (UTC)"]
+ score_plot_df["y"] = score_plot_df[score_name]
+ score_plot_df["line_color"] = "royalblue"
+ score_plot_df["circle_size"] = 8
+ score_plot_df["line_color"] = "royalblue"
+ score_plot_df.loc[is_open, "line_color"] = "coral"
+ score_plot_df["fill_color"] = fill_colors
+ score_plot_df["fill_alpha"] = 0.5
+ score_plot_df["line_width"] = 0
+ score_plot_df["label"] = "closed phase"
+ score_plot_df.loc[is_open, "label"] = "open phase"
source = ColumnDataSource(score_plot_df)
- pareto_df = score_plot_df[
- score_plot_df[score_name + ' pareto'] == 1].copy()
+ pareto_df = score_plot_df[score_plot_df[score_name + " pareto"] == 1].copy()
pareto_df = pareto_df.append(pareto_df.iloc[-1])
- pareto_df.iloc[-1, pareto_df.columns.get_loc('x')] = (
- max(score_plot_df['x'])
- )
- pareto_df = make_step_df(
- pareto_df, event.official_score_type.is_lower_the_better)
+ pareto_df.iloc[-1, pareto_df.columns.get_loc("x")] = max(score_plot_df["x"])
+ pareto_df = make_step_df(pareto_df, event.official_score_type.is_lower_the_better)
source_pareto = ColumnDataSource(pareto_df)
- tools = ['pan,wheel_zoom,box_zoom,reset,save,tap']
- p = figure(plot_width=900, plot_height=600, tools=tools, title='Scores')
+ tools = ["pan,wheel_zoom,box_zoom,reset,save,tap"]
+ p = figure(plot_width=900, plot_height=600, tools=tools, title="Scores")
p.circle(
- 'x', 'y', size='circle_size', line_color='line_color',
- fill_color='fill_color', fill_alpha='fill_alpha', line_width=1,
- source=source, legend='label'
+ "x",
+ "y",
+ size="circle_size",
+ line_color="line_color",
+ fill_color="fill_color",
+ fill_alpha="fill_alpha",
+ line_width=1,
+ source=source,
+ legend="label",
)
p.line(
- 'x', 'y', line_width=3, line_color='goldenrod', source=source_pareto,
- legend='best score', alpha=0.9
+ "x",
+ "y",
+ line_width=3,
+ line_color="goldenrod",
+ source=source_pareto,
+ legend="best score",
+ alpha=0.9,
)
p.xaxis.formatter = DatetimeTickFormatter(
- hours=['%d %B %Y'],
- days=['%d %B %Y'],
- months=['%d %B %Y'],
- years=['%d %B %Y'],
+ hours=["%d %B %Y"],
+ days=["%d %B %Y"],
+ months=["%d %B %Y"],
+ years=["%d %B %Y"],
)
p.xaxis.major_label_orientation = np.pi / 4
if event.official_score_type.is_lower_the_better:
- p.yaxis.axis_label = score_name + ' (the lower the better)'
- p.legend.location = 'top_right'
+ p.yaxis.axis_label = score_name + " (the lower the better)"
+ p.legend.location = "top_right"
else:
- p.yaxis.axis_label = score_name + ' (the greater the better)'
- p.legend.location = 'bottom_right'
- p.xaxis.axis_label = 'submission timestamp (UTC)'
- p.xaxis.axis_label_text_font_size = '14pt'
- p.yaxis.axis_label_text_font_size = '14pt'
- p.legend.label_text_font_size = '14pt'
- p.title.text_font_size = '16pt'
- p.xaxis.major_label_text_font_size = '10pt'
- p.yaxis.major_label_text_font_size = '10pt'
+ p.yaxis.axis_label = score_name + " (the greater the better)"
+ p.legend.location = "bottom_right"
+ p.xaxis.axis_label = "submission timestamp (UTC)"
+ p.xaxis.axis_label_text_font_size = "14pt"
+ p.yaxis.axis_label_text_font_size = "14pt"
+ p.legend.label_text_font_size = "14pt"
+ p.title.text_font_size = "16pt"
+ p.xaxis.major_label_text_font_size = "10pt"
+ p.yaxis.major_label_text_font_size = "10pt"
return p
diff --git a/ramp-frontend/setup.py b/ramp-frontend/setup.py
index 753bd4663..7ce872ed2 100755
--- a/ramp-frontend/setup.py
+++ b/ramp-frontend/setup.py
@@ -5,54 +5,65 @@
from setuptools import find_packages, setup
# get __version__ from _version.py
-ver_file = os.path.join('ramp_frontend', '_version.py')
+ver_file = os.path.join("ramp_frontend", "_version.py")
with open(ver_file) as f:
exec(f.read())
-DISTNAME = 'ramp-frontend'
+DISTNAME = "ramp-frontend"
DESCRIPTION = "Website for RAMP"
-with codecs.open('README.rst', encoding='utf-8-sig') as f:
+with codecs.open("README.rst", encoding="utf-8-sig") as f:
LONG_DESCRIPTION = f.read()
-MAINTAINER = 'A. Boucaud, B. Kegl, G. Lemaitre, J. Van den Bossche'
-MAINTAINER_EMAIL = 'boucaud.alexandre@gmail.com, guillaume.lemaitre@inria.fr'
-URL = 'https://github.com/paris-saclay-cds/ramp-board'
-LICENSE = 'BSD (3-clause)'
-DOWNLOAD_URL = 'https://github.com/paris-saclay-cds/ramp-board'
+MAINTAINER = "A. Boucaud, B. Kegl, G. Lemaitre, J. Van den Bossche"
+MAINTAINER_EMAIL = "boucaud.alexandre@gmail.com, guillaume.lemaitre@inria.fr"
+URL = "https://github.com/paris-saclay-cds/ramp-board"
+LICENSE = "BSD (3-clause)"
+DOWNLOAD_URL = "https://github.com/paris-saclay-cds/ramp-board"
VERSION = __version__ # noqa
-CLASSIFIERS = ['Intended Audience :: Science/Research',
- 'Intended Audience :: Developers',
- 'License :: OSI Approved',
- 'Programming Language :: Python',
- 'Topic :: Software Development',
- 'Topic :: Scientific/Engineering',
- 'Operating System :: Microsoft :: Windows',
- 'Operating System :: POSIX',
- 'Operating System :: Unix',
- 'Operating System :: MacOS',
- 'Programming Language :: Python :: 3.6',
- 'Programming Language :: Python :: 3.7',
- 'Programming Language :: Python :: 3.8']
-INSTALL_REQUIRES = ['bokeh', 'click', 'Flask', 'Flask-Login', 'Flask-Mail',
- 'Flask-SQLAlchemy', 'Flask-WTF', 'itsdangerous', 'numpy',
- 'pandas']
+CLASSIFIERS = [
+ "Intended Audience :: Science/Research",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved",
+ "Programming Language :: Python",
+ "Topic :: Software Development",
+ "Topic :: Scientific/Engineering",
+ "Operating System :: Microsoft :: Windows",
+ "Operating System :: POSIX",
+ "Operating System :: Unix",
+ "Operating System :: MacOS",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+]
+INSTALL_REQUIRES = [
+ "bokeh",
+ "click",
+ "Flask",
+ "Flask-Login",
+ "Flask-Mail",
+ "Flask-SQLAlchemy",
+ "Flask-WTF",
+ "itsdangerous",
+ "numpy",
+ "pandas",
+]
EXTRAS_REQUIRE = {
- 'tests': ['pytest', 'pytest-cov'],
- 'docs': ['sphinx', 'sphinx_rtd_theme', 'numpydoc']
+ "tests": ["pytest", "pytest-cov"],
+ "docs": ["sphinx", "sphinx_rtd_theme", "numpydoc"],
}
PACKAGE_DATA = {
- 'ramp_frontend': [
- os.path.join('templates', '*'),
- os.path.join('static', 'css', 'style.css'),
- os.path.join('static', 'css', 'themes', 'flat-blue.css'),
- os.path.join('static', 'img', '*'),
- os.path.join('static', 'img', 'backdrop', '*'),
- os.path.join('static', 'img', 'partners', '*'),
- os.path.join('static', 'img', 'powered_by', '*'),
- os.path.join('static', 'js', '*'),
- os.path.join('static', 'lib', 'css', '*'),
- os.path.join('static', 'lib', 'fonts', '*'),
- os.path.join('static', 'lib', 'img', '*'),
- os.path.join('static', 'lib', 'js', '*'),
+ "ramp_frontend": [
+ os.path.join("templates", "*"),
+ os.path.join("static", "css", "style.css"),
+ os.path.join("static", "css", "themes", "flat-blue.css"),
+ os.path.join("static", "img", "*"),
+ os.path.join("static", "img", "backdrop", "*"),
+ os.path.join("static", "img", "partners", "*"),
+ os.path.join("static", "img", "powered_by", "*"),
+ os.path.join("static", "js", "*"),
+ os.path.join("static", "lib", "css", "*"),
+ os.path.join("static", "lib", "fonts", "*"),
+ os.path.join("static", "lib", "img", "*"),
+ os.path.join("static", "lib", "js", "*"),
]
}
@@ -73,7 +84,5 @@
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
python_requires=">=3.7",
- entry_points={
- 'console_scripts': ['ramp-frontend = ramp_frontend.cli:start']
- }
+ entry_points={"console_scripts": ["ramp-frontend = ramp_frontend.cli:start"]},
)
diff --git a/ramp-utils/ramp_utils/__init__.py b/ramp-utils/ramp_utils/__init__.py
index 998fdc545..84edfbfc8 100644
--- a/ramp-utils/ramp_utils/__init__.py
+++ b/ramp-utils/ramp_utils/__init__.py
@@ -7,10 +7,10 @@
from ._version import __version__
__all__ = [
- 'generate_flask_config',
- 'generate_ramp_config',
- 'generate_worker_config',
- 'import_module_from_source',
- 'read_config',
- '__version__'
+ "generate_flask_config",
+ "generate_ramp_config",
+ "generate_worker_config",
+ "import_module_from_source",
+ "read_config",
+ "__version__",
]
diff --git a/ramp-utils/ramp_utils/_version.py b/ramp-utils/ramp_utils/_version.py
index 3fdc7fd91..dcd7d05fa 100644
--- a/ramp-utils/ramp_utils/_version.py
+++ b/ramp-utils/ramp_utils/_version.py
@@ -21,4 +21,4 @@
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
-__version__ = '0.9.0.dev0'
+__version__ = "0.9.0.dev0"
diff --git a/ramp-utils/ramp_utils/cli.py b/ramp-utils/ramp_utils/cli.py
index 184d8ee10..832410184 100644
--- a/ramp-utils/ramp_utils/cli.py
+++ b/ramp-utils/ramp_utils/cli.py
@@ -14,7 +14,7 @@
HERE = os.path.dirname(__file__)
-CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
+CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.group(context_settings=CONTEXT_SETTINGS)
@@ -29,18 +29,24 @@ def main():
@main.command()
-@click.option("--deployment-dir", default='.', show_default=True,
- help='The directory where to create a config file.')
-@click.option('--force', is_flag=True,
- help='Whether or not to potentially overwrite the '
- 'repositories, problem and event in the database.')
+@click.option(
+ "--deployment-dir",
+ default=".",
+ show_default=True,
+ help="The directory where to create a config file.",
+)
+@click.option(
+ "--force",
+ is_flag=True,
+ help="Whether or not to potentially overwrite the "
+ "repositories, problem and event in the database.",
+)
def init(deployment_dir, force):
"""Initialize the deployment directory with a template config.yml"""
- template = os.path.join(HERE, 'template', 'database_config_template.yml')
- destination = os.path.join(deployment_dir, 'config.yml')
+ template = os.path.join(HERE, "template", "database_config_template.yml")
+ destination = os.path.join(deployment_dir, "config.yml")
if os.path.isfile(destination) and not force:
- click.echo(
- "Config file already exists. Specify --force to overwrite it.")
+ click.echo("Config file already exists. Specify --force to overwrite it.")
return
shutil.copy(template, destination)
click.echo("Created {}".format(destination))
@@ -48,17 +54,24 @@ def init(deployment_dir, force):
@main.command()
-@click.option("--name", help='The name of the event.', required=True)
-@click.option("--deployment-dir", default='.', show_default=True,
- help='The directory where to create a config file.')
-@click.option('--force', is_flag=True,
- help='Whether or not to potentially overwrite the '
- 'repositories, problem and event in the database.')
+@click.option("--name", help="The name of the event.", required=True)
+@click.option(
+ "--deployment-dir",
+ default=".",
+ show_default=True,
+ help="The directory where to create a config file.",
+)
+@click.option(
+ "--force",
+ is_flag=True,
+ help="Whether or not to potentially overwrite the "
+ "repositories, problem and event in the database.",
+)
def init_event(name, deployment_dir, force):
"""Initialize the event directory with a template config.yml"""
# create directories
- events_dir = os.path.join(deployment_dir, 'events')
+ events_dir = os.path.join(deployment_dir, "events")
if not os.path.isdir(events_dir):
os.mkdir(events_dir)
@@ -68,18 +81,18 @@ def init_event(name, deployment_dir, force):
shutil.rmtree(event_dir)
else:
click.echo(
- "{} already exists. Specify --force to overwrite it.".format(
- event_dir))
+ "{} already exists. Specify --force to overwrite it.".format(event_dir)
+ )
return
os.mkdir(event_dir)
# copy + edit config template
- template = os.path.join(HERE, 'template', 'ramp_config_template.yml')
- destination = os.path.join(event_dir, 'config.yml')
- with open(destination, 'w') as dest:
- with open(template, 'r') as src:
+ template = os.path.join(HERE, "template", "ramp_config_template.yml")
+ destination = os.path.join(event_dir, "config.yml")
+ with open(destination, "w") as dest:
+ with open(template, "r") as src:
content = src.read()
- content = content.replace('', name)
+ content = content.replace("", name)
dest.write(content)
click.echo("Created {}".format(destination))
@@ -87,18 +100,30 @@ def init_event(name, deployment_dir, force):
@main.command()
-@click.option("--config", default='config.yml', show_default=True,
- help='Configuration file in YAML format containing the database '
- 'information')
-@click.option("--event-config", default='config.yml', show_default=True,
- help='Configuration file in YAML format containing the RAMP '
- 'information')
-@click.option("--cloning/--no-cloning", default=True, show_default=True,
- help='Whether or not to clone the RAMP kit and data '
- 'repositories.')
-@click.option('--force', is_flag=True,
- help='Whether or not to potentially overwrite the '
- 'repositories, problem and event in the database.')
+@click.option(
+ "--config",
+ default="config.yml",
+ show_default=True,
+ help="Configuration file in YAML format containing the database " "information",
+)
+@click.option(
+ "--event-config",
+ default="config.yml",
+ show_default=True,
+ help="Configuration file in YAML format containing the RAMP " "information",
+)
+@click.option(
+ "--cloning/--no-cloning",
+ default=True,
+ show_default=True,
+ help="Whether or not to clone the RAMP kit and data " "repositories.",
+)
+@click.option(
+ "--force",
+ is_flag=True,
+ help="Whether or not to potentially overwrite the "
+ "repositories, problem and event in the database.",
+)
def deploy_event(config, event_config, cloning, force):
"""Deploy event (add problem and event to the database, optionally clone
kit and data)
@@ -107,33 +132,44 @@ def deploy_event(config, event_config, cloning, force):
@main.command()
-@click.option("--config", default='config.yml', show_default=True,
- help='Configuration file in YAML format containing the database '
- 'information')
-@click.option("--event-config", default='config.yml', show_default=True,
- help='Configuration file in YAML format containing the RAMP '
- 'information')
+@click.option(
+ "--config",
+ default="config.yml",
+ show_default=True,
+ help="Configuration file in YAML format containing the database " "information",
+)
+@click.option(
+ "--event-config",
+ default="config.yml",
+ show_default=True,
+ help="Configuration file in YAML format containing the RAMP " "information",
+)
def create_conda_env(config, event_config):
"""Create the conda environment for a specific event"""
conda_env_name = read_config(event_config)["worker"]["conda_env"]
ramp_config = generate_ramp_config(event_config, database_config=config)
- path_environment_file = os.path.join(
- ramp_config["ramp_kit_dir"], "environment.yml"
- )
- subprocess.run(
- ["conda", "create", "--name", conda_env_name, "--yes"]
- )
+ path_environment_file = os.path.join(ramp_config["ramp_kit_dir"], "environment.yml")
+ subprocess.run(["conda", "create", "--name", conda_env_name, "--yes"])
subprocess.run(
- ["conda", "env", "update",
- "--name", conda_env_name,
- "--file", path_environment_file]
+ [
+ "conda",
+ "env",
+ "update",
+ "--name",
+ conda_env_name,
+ "--file",
+ path_environment_file,
+ ]
)
@main.command()
-@click.option("--event-config", default='config.yml', show_default=True,
- help='Configuration file in YAML format containing the RAMP '
- 'information')
+@click.option(
+ "--event-config",
+ default="config.yml",
+ show_default=True,
+ help="Configuration file in YAML format containing the RAMP " "information",
+)
def update_conda_env(event_config):
"""Update the conda environment for a specific event"""
conda_env = read_config(event_config, filter_section="worker")["conda_env"]
@@ -148,24 +184,27 @@ def update_conda_env(event_config):
raise ValueError(stderr.decode("utf-8"))
conda_info = json.loads(stdout)
- if conda_env == 'base':
- python_bin_path = os.path.join(conda_info['envs'][0], 'bin')
+ if conda_env == "base":
+ python_bin_path = os.path.join(conda_info["envs"][0], "bin")
else:
- envs_path = conda_info['envs'][1:]
+ envs_path = conda_info["envs"][1:]
if not envs_path:
- raise ValueError('Only the conda base environment exist. You '
- 'need to create the "{}" conda environment '
- 'to use it.'.format(conda_env))
+ raise ValueError(
+ "Only the conda base environment exist. You "
+ 'need to create the "{}" conda environment '
+ "to use it.".format(conda_env)
+ )
is_env_found = False
for env in envs_path:
if conda_env == os.path.split(env)[-1]:
is_env_found = True
- python_bin_path = os.path.join(env, 'bin')
+ python_bin_path = os.path.join(env, "bin")
break
if not is_env_found:
- raise ValueError('The specified conda environment {} does not '
- 'exist. You need to create it.'
- .format(conda_env))
+ raise ValueError(
+ "The specified conda environment {} does not "
+ "exist. You need to create it.".format(conda_env)
+ )
# update the conda packages
subprocess.run(["conda", "update", "--name", conda_env, "--all", "--yes"])
@@ -187,7 +226,7 @@ def update_conda_env(event_config):
# update the pip packages
subprocess.run(
- [os.path.join(python_bin_path, 'pip'), "install", "-U"] + pip_packages
+ [os.path.join(python_bin_path, "pip"), "install", "-U"] + pip_packages
)
@@ -195,5 +234,5 @@ def start():
main()
-if __name__ == '__main__':
+if __name__ == "__main__":
start()
diff --git a/ramp-utils/ramp_utils/config_parser.py b/ramp-utils/ramp_utils/config_parser.py
index 52f17fd52..cc0f2b082 100644
--- a/ramp-utils/ramp_utils/config_parser.py
+++ b/ramp-utils/ramp_utils/config_parser.py
@@ -2,10 +2,16 @@
import yaml
REQUIRED_KEYS = {
- 'sqlalchemy': {'drivername', 'username', 'password', 'host', 'port',
- 'database'},
- 'ramp': {'problem_name', 'event_name', 'event_title', 'event_is_public'},
- 'worker': {'worker_type'}
+ "sqlalchemy": {
+ "drivername",
+ "username",
+ "password",
+ "host",
+ "port",
+ "database",
+ },
+ "ramp": {"problem_name", "event_name", "event_title", "event_is_public"},
+ "worker": {"worker_type"},
}
@@ -31,7 +37,7 @@ def read_config(config_file, filter_section=None, check_requirements=True):
config : dict
Configuration parsed as a dictionary.
"""
- with open(config_file, 'r') as f:
+ with open(config_file, "r") as f:
config = yaml.safe_load(f)
# if a single string is given, we will later unpack remove the first layer
@@ -45,25 +51,28 @@ def read_config(config_file, filter_section=None, check_requirements=True):
if sec not in config:
raise ValueError(
'The section "{}" is not in the "{}" file. Got these '
- 'sections instead {}.'
- .format(sec, os.path.basename(config_file),
- list(config.keys()))
+ "sections instead {}.".format(
+ sec, os.path.basename(config_file), list(config.keys())
+ )
)
- config = {key: value
- for key, value in config.items()
- if filter_section is None or key in filter_section}
+ config = {
+ key: value
+ for key, value in config.items()
+ if filter_section is None or key in filter_section
+ }
if check_requirements:
for section_name, required_field in REQUIRED_KEYS.items():
if section_name in config:
- missing_parameters = required_field.difference(
- config[section_name])
+ missing_parameters = required_field.difference(config[section_name])
if missing_parameters:
raise ValueError(
'The section "{}" in the "{}" file is missing the '
- 'required parameters {}.'
- .format(section_name, os.path.basename(config_file),
- missing_parameters)
+ "required parameters {}.".format(
+ section_name,
+ os.path.basename(config_file),
+ missing_parameters,
+ )
)
if unpack:
diff --git a/ramp-utils/ramp_utils/datasets.py b/ramp-utils/ramp_utils/datasets.py
index e0bfa3621..193c8423f 100644
--- a/ramp-utils/ramp_utils/datasets.py
+++ b/ramp-utils/ramp_utils/datasets.py
@@ -4,10 +4,7 @@
import os
from urllib import request
-OSFRemoteMetaData = namedtuple(
- "OSFRemoteMetaData",
- ["filename", "id", "revision"]
-)
+OSFRemoteMetaData = namedtuple("OSFRemoteMetaData", ["filename", "id", "revision"])
def _sha256(path):
@@ -42,16 +39,13 @@ def fetch_from_osf(path_data, metadata, token=None):
if not os.path.exists(path_data):
os.makedirs(path_data)
for file_info in metadata:
- file_info_url = (
- f"https://api.osf.io/v2/files/{file_info.id}/"
- )
+ file_info_url = f"https://api.osf.io/v2/files/{file_info.id}/"
req = request.Request(file_info_url)
if token is not None:
req.add_header("Authorization", f"Bearer {token}")
response = request.urlopen(req)
info = json.loads(response.read())
- original_checksum = \
- info["data"]["attributes"]["extra"]["hashes"]["sha256"]
+ original_checksum = info["data"]["attributes"]["extra"]["hashes"]["sha256"]
filename = os.path.join(path_data, file_info.filename)
if os.path.exists(filename):
if _sha256(filename) == original_checksum:
@@ -59,8 +53,7 @@ def fetch_from_osf(path_data, metadata, token=None):
continue
osf_url = (
- f"https://osf.io/download/"
- f"{file_info.id}/?revision={file_info.revision}"
+ f"https://osf.io/download/" f"{file_info.id}/?revision={file_info.revision}"
)
req = request.Request(osf_url)
if token is not None:
@@ -70,5 +63,6 @@ def fetch_from_osf(path_data, metadata, token=None):
raise RuntimeError(response.read())
with open(filename, "wb") as fid:
fid.write(response.read())
- assert _sha256(filename) == original_checksum, \
- f"{filename} was corrupted during download"
+ assert (
+ _sha256(filename) == original_checksum
+ ), f"{filename} was corrupted during download"
diff --git a/ramp-utils/ramp_utils/deploy.py b/ramp-utils/ramp_utils/deploy.py
index 9a135ce3b..8c7aab8cb 100644
--- a/ramp-utils/ramp_utils/deploy.py
+++ b/ramp-utils/ramp_utils/deploy.py
@@ -32,70 +32,79 @@ def deploy_ramp_event(config, event_config, setup_ramp_repo=True, force=False):
Whether or not to potentially overwrite the repositories, problem and
event in the database.
"""
- database_config = read_config(config, filter_section='sqlalchemy')
+ database_config = read_config(config, filter_section="sqlalchemy")
ramp_config = generate_ramp_config(event_config, config)
with session_scope(database_config) as session:
setup_files_extension_type(session)
if setup_ramp_repo:
- setup_ramp_kit_ramp_data(
- ramp_config, ramp_config['problem_name'], force
- )
+ setup_ramp_kit_ramp_data(ramp_config, ramp_config["problem_name"], force)
else:
# we do not clone the repository but we need to convert the
# notebook to html
current_directory = os.getcwd()
- problem_kit_path = ramp_config['ramp_kit_dir']
+ problem_kit_path = ramp_config["ramp_kit_dir"]
os.chdir(problem_kit_path)
- subprocess.check_output(["jupyter", "nbconvert", "--to", "html",
- "{}_starting_kit.ipynb"
- .format(ramp_config['problem_name'])])
+ subprocess.check_output(
+ [
+ "jupyter",
+ "nbconvert",
+ "--to",
+ "html",
+ "{}_starting_kit.ipynb".format(ramp_config["problem_name"]),
+ ]
+ )
# delete this line since it trigger in the front-end
# (try to open execute "custom.css".)
_delete_line_from_file(
- "{}_starting_kit.html".format(ramp_config['problem_name']),
- ' \n'
+ "{}_starting_kit.html".format(ramp_config["problem_name"]),
+ ' \n',
)
os.chdir(current_directory)
# check if the repository exists
- problem = get_problem(session, ramp_config['problem_name'])
+ problem = get_problem(session, ramp_config["problem_name"])
if problem is None:
- add_problem(session, ramp_config['problem_name'],
- ramp_config['ramp_kit_dir'],
- ramp_config['ramp_data_dir'])
+ add_problem(
+ session,
+ ramp_config["problem_name"],
+ ramp_config["ramp_kit_dir"],
+ ramp_config["ramp_data_dir"],
+ )
else:
- if ((ramp_config['ramp_kit_dir'] != problem.path_ramp_kit or
- ramp_config['ramp_data_dir'] != problem.path_ramp_data) and
- not force):
+ if (
+ ramp_config["ramp_kit_dir"] != problem.path_ramp_kit
+ or ramp_config["ramp_data_dir"] != problem.path_ramp_data
+ ) and not force:
raise ValueError(
- 'The RAMP problem already exists in the database. The path'
- ' to the kit or to the data is different. You need to set'
+ "The RAMP problem already exists in the database. The path"
+ " to the kit or to the data is different. You need to set"
' "force=True" if you want to overwrite these parameters.'
)
if setup_ramp_repo:
setup_ramp_kit_ramp_data(
- ramp_config, ramp_config['problem_name'], force
+ ramp_config, ramp_config["problem_name"], force
)
if force:
- add_problem(session, ramp_config['problem_name'],
- ramp_config['ramp_kit_dir'],
- ramp_config['ramp_data_dir'],
- force)
+ add_problem(
+ session,
+ ramp_config["problem_name"],
+ ramp_config["ramp_kit_dir"],
+ ramp_config["ramp_data_dir"],
+ force,
+ )
- if not os.path.exists(ramp_config['ramp_submissions_dir']):
- os.makedirs(ramp_config['ramp_submissions_dir'])
+ if not os.path.exists(ramp_config["ramp_submissions_dir"]):
+ os.makedirs(ramp_config["ramp_submissions_dir"])
# create a folder in the ramp-kit directory to store the archive
- archive_dir = os.path.abspath(os.path.join(
- ramp_config['ramp_kit_dir'], 'events_archived'
- ))
+ archive_dir = os.path.abspath(
+ os.path.join(ramp_config["ramp_kit_dir"], "events_archived")
+ )
if not os.path.exists(archive_dir):
os.makedirs(archive_dir)
- zip_filename = os.path.join(
- archive_dir, ramp_config["event_name"] + ".zip"
- )
- with zipfile.ZipFile(zip_filename, 'w', zipfile.ZIP_DEFLATED) as zipf:
- for root, dirs, files in os.walk(ramp_config['ramp_kit_dir']):
+ zip_filename = os.path.join(archive_dir, ramp_config["event_name"] + ".zip")
+ with zipfile.ZipFile(zip_filename, "w", zipfile.ZIP_DEFLATED) as zipf:
+ for root, dirs, files in os.walk(ramp_config["ramp_kit_dir"]):
if archive_dir not in os.path.abspath(root):
for f in files:
path_file = os.path.join(root, f)
@@ -103,13 +112,16 @@ def deploy_ramp_event(config, event_config, setup_ramp_repo=True, force=False):
path_file,
os.path.relpath(
path_file, start=ramp_config["ramp_kit_dir"]
- )
+ ),
)
- add_event(session, ramp_config['problem_name'],
- ramp_config['event_name'],
- ramp_config['event_title'],
- ramp_config['sandbox_name'],
- ramp_config['ramp_submissions_dir'],
- ramp_config['event_is_public'],
- force)
+ add_event(
+ session,
+ ramp_config["problem_name"],
+ ramp_config["event_name"],
+ ramp_config["event_title"],
+ ramp_config["sandbox_name"],
+ ramp_config["ramp_submissions_dir"],
+ ramp_config["event_is_public"],
+ force,
+ )
diff --git a/ramp-utils/ramp_utils/frontend.py b/ramp-utils/ramp_utils/frontend.py
index 999397ae7..17fae2f7e 100644
--- a/ramp-utils/ramp_utils/frontend.py
+++ b/ramp-utils/ramp_utils/frontend.py
@@ -2,17 +2,17 @@
DEFAULT_CONFIG = {
- 'WTF_CSRF_ENABLED': True,
- 'LOG_FILENAME': 'None',
- 'MAX_CONTENT_LENGTH': 1073741824,
- 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
- 'TRACK_USER_INTERACTION': False,
- 'TRACK_CREDITS': False,
- 'DOMAIN_NAME': 'localhost',
- 'LOGIN_INSTRUCTIONS': None,
- 'SIGN_UP_INSTRUCTIONS': None,
- 'SIGN_UP_ASK_SOCIAL_MEDIA': False,
- 'PRIVACY_POLICY_PAGE': None
+ "WTF_CSRF_ENABLED": True,
+ "LOG_FILENAME": "None",
+ "MAX_CONTENT_LENGTH": 1073741824,
+ "SQLALCHEMY_TRACK_MODIFICATIONS": False,
+ "TRACK_USER_INTERACTION": False,
+ "TRACK_CREDITS": False,
+ "DOMAIN_NAME": "localhost",
+ "LOGIN_INSTRUCTIONS": None,
+ "SIGN_UP_INSTRUCTIONS": None,
+ "SIGN_UP_ASK_SOCIAL_MEDIA": False,
+ "PRIVACY_POLICY_PAGE": None,
}
@@ -22,8 +22,8 @@ def _read_if_html_path(txt: str) -> str:
If the input is a path to a valid HTML file, read it.
Otherwise return the input
"""
- if txt and txt.endswith('.html'):
- with open(txt, 'rt') as fh:
+ if txt and txt.endswith(".html"):
+ with open(txt, "rt") as fh:
txt = fh.read()
return txt
@@ -42,21 +42,26 @@ def generate_flask_config(config):
The configuration for the RAMP worker.
"""
if isinstance(config, str):
- config = read_config(config, filter_section=['flask', 'sqlalchemy'])
+ config = read_config(config, filter_section=["flask", "sqlalchemy"])
flask_config = DEFAULT_CONFIG.copy()
- user_flask_config = {
- key.upper(): value for key, value in config['flask'].items()}
+ user_flask_config = {key.upper(): value for key, value in config["flask"].items()}
flask_config.update(user_flask_config)
- for key in ['LOGIN_INSTRUCTIONS', 'SIGN_UP_INSTRUCTIONS',
- 'PRIVACY_POLICY_PAGE']:
+ for key in [
+ "LOGIN_INSTRUCTIONS",
+ "SIGN_UP_INSTRUCTIONS",
+ "PRIVACY_POLICY_PAGE",
+ ]:
flask_config[key] = _read_if_html_path(flask_config[key])
- database_config = config['sqlalchemy']
- flask_config['SQLALCHEMY_DATABASE_URI'] = \
- ('{}://{}:{}@{}:{}/{}'
- .format(database_config['drivername'], database_config['username'],
- database_config['password'], database_config['host'],
- database_config['port'], database_config['database']))
+ database_config = config["sqlalchemy"]
+ flask_config["SQLALCHEMY_DATABASE_URI"] = "{}://{}:{}@{}:{}/{}".format(
+ database_config["drivername"],
+ database_config["username"],
+ database_config["password"],
+ database_config["host"],
+ database_config["port"],
+ database_config["database"],
+ )
return flask_config
diff --git a/ramp-utils/ramp_utils/ramp.py b/ramp-utils/ramp_utils/ramp.py
index 63aab27eb..344b804f3 100644
--- a/ramp-utils/ramp_utils/ramp.py
+++ b/ramp-utils/ramp_utils/ramp.py
@@ -2,8 +2,14 @@
from .config_parser import read_config
-MANDATORY_DICT_PARAMS = ('kit_dir', 'data_dir', 'submissions_dir',
- 'sandbox_dir', 'predictions_dir', 'logs_dir')
+MANDATORY_DICT_PARAMS = (
+ "kit_dir",
+ "data_dir",
+ "submissions_dir",
+ "sandbox_dir",
+ "predictions_dir",
+ "logs_dir",
+)
def _create_defaults(config, key, path_config):
@@ -11,22 +17,16 @@ def _create_defaults(config, key, path_config):
already.
"""
default_mapping = {
- 'kit_dir': os.path.join(
- path_config, 'ramp-kits', config['problem_name']
+ "kit_dir": os.path.join(path_config, "ramp-kits", config["problem_name"]),
+ "data_dir": os.path.join(path_config, "ramp-data", config["problem_name"]),
+ "submissions_dir": os.path.join(
+ path_config, "events", config["event_name"], "submissions"
),
- 'data_dir': os.path.join(
- path_config, 'ramp-data', config['problem_name']
+ "predictions_dir": os.path.join(
+ path_config, "events", config["event_name"], "predictions"
),
- 'submissions_dir': os.path.join(
- path_config, 'events', config['event_name'], 'submissions'
- ),
- 'predictions_dir': os.path.join(
- path_config, 'events', config['event_name'], 'predictions'
- ),
- 'logs_dir': os.path.join(
- path_config, 'events', config['event_name'], 'logs'
- ),
- 'sandbox_dir': 'starting_kit'
+ "logs_dir": os.path.join(path_config, "events", config["event_name"], "logs"),
+ "sandbox_dir": "starting_kit",
}
if key not in config:
return default_mapping[key]
@@ -52,70 +52,59 @@ def generate_ramp_config(event_config, database_config=None):
The configuration for the RAMP worker.
"""
if isinstance(event_config, str):
- if (database_config is None or
- not isinstance(database_config, str)):
+ if database_config is None or not isinstance(database_config, str):
raise ValueError(
'When "event_config" corresponds to the filename of the '
- 'configuration, you need to provide the filename of the '
+ "configuration, you need to provide the filename of the "
'database as well, by assigning "database_config".'
)
event_config = read_config(event_config)
- config = event_config['ramp']
+ config = event_config["ramp"]
- path_config = os.path.dirname(
- os.path.abspath(database_config)
- )
+ path_config = os.path.dirname(os.path.abspath(database_config))
else:
- if 'ramp' in event_config.keys():
- config = event_config['ramp']
+ if "ramp" in event_config.keys():
+ config = event_config["ramp"]
else:
config = event_config
if not all([key in config.keys() for key in MANDATORY_DICT_PARAMS]):
raise ValueError(
'When "event_config" is a dictionary, you need to provide all '
- 'following keys: {}'.format(MANDATORY_DICT_PARAMS)
+ "following keys: {}".format(MANDATORY_DICT_PARAMS)
)
- path_config = ''
+ path_config = ""
ramp_config = {}
# mandatory parameters
- ramp_config['problem_name'] = config['problem_name']
- ramp_config['event_name'] = config['event_name']
- ramp_config['event_title'] = config['event_title']
- ramp_config['event_is_public'] = config['event_is_public']
+ ramp_config["problem_name"] = config["problem_name"]
+ ramp_config["event_name"] = config["event_name"]
+ ramp_config["event_title"] = config["event_title"]
+ ramp_config["event_is_public"] = config["event_is_public"]
# parameters which can be built by default if given a string
- ramp_config['ramp_kit_dir'] = _create_defaults(
- config, 'kit_dir', path_config
- )
- ramp_config['ramp_data_dir'] = _create_defaults(
- config, 'data_dir', path_config
+ ramp_config["ramp_kit_dir"] = _create_defaults(config, "kit_dir", path_config)
+ ramp_config["ramp_data_dir"] = _create_defaults(config, "data_dir", path_config)
+ ramp_config["ramp_submissions_dir"] = _create_defaults(
+ config, "submissions_dir", path_config
)
- ramp_config['ramp_submissions_dir'] = _create_defaults(
- config, 'submissions_dir', path_config
- )
- ramp_config['sandbox_name'] = _create_defaults(
- config, 'sandbox_dir', ''
- )
- ramp_config['ramp_predictions_dir'] = _create_defaults(
- config, 'predictions_dir', path_config
- )
- ramp_config['ramp_logs_dir'] = _create_defaults(
- config, 'logs_dir', path_config
+ ramp_config["sandbox_name"] = _create_defaults(config, "sandbox_dir", "")
+ ramp_config["ramp_predictions_dir"] = _create_defaults(
+ config, "predictions_dir", path_config
)
+ ramp_config["ramp_logs_dir"] = _create_defaults(config, "logs_dir", path_config)
# parameters inferred from the previous one
- ramp_config['ramp_sandbox_dir'] = os.path.join(
- ramp_config['ramp_kit_dir'], 'submissions', ramp_config['sandbox_name']
+ ramp_config["ramp_sandbox_dir"] = os.path.join(
+ ramp_config["ramp_kit_dir"], "submissions", ramp_config["sandbox_name"]
)
- ramp_config['ramp_kit_submissions_dir'] = os.path.join(
- ramp_config['ramp_kit_dir'], 'submissions'
+ ramp_config["ramp_kit_submissions_dir"] = os.path.join(
+ ramp_config["ramp_kit_dir"], "submissions"
)
# parameters only used with DaskWorker
- if event_config.get('worker', {}).get('worker_type', None) == 'dask':
- ramp_config['dask_scheduler'] = ramp_config.get(
- 'worker', {}
- ).get('dask_scheduler', None)
+ if event_config.get("worker", {}).get("worker_type", None) == "dask":
+ ramp_config["dask_scheduler"] = ramp_config.get("worker", {}).get(
+ "dask_scheduler", None
+ )
return ramp_config
diff --git a/ramp-utils/ramp_utils/ramp_cli.py b/ramp-utils/ramp_utils/ramp_cli.py
index 4564a0f5d..acdbd15d5 100644
--- a/ramp-utils/ramp_utils/ramp_cli.py
+++ b/ramp-utils/ramp_utils/ramp_cli.py
@@ -11,15 +11,15 @@
class RAMPParser(argparse.ArgumentParser):
-
@property # type: ignore
def epilog(self):
"""Add subcommands to epilog on request
Avoids searching PATH for subcommands unless help output is requested.
"""
- return 'Available subcommands:\n - {}'.format(
- '\n - '.join(list_subcommands()))
+ return "Available subcommands:\n - {}".format(
+ "\n - ".join(list_subcommands())
+ )
@epilog.setter
def epilog(self, x):
@@ -31,13 +31,15 @@ def ramp_parser():
parser = RAMPParser(
description="RAMP: collaborative data science challenges",
# use raw formatting to preserver newlines in the epilog
- formatter_class=argparse.RawDescriptionHelpFormatter)
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
group = parser.add_mutually_exclusive_group(required=True)
# don't use argparse's version action because it prints to stderr on py2
# group.add_argument('--version', action='store_true',
# help="show the ramp command's version and exit")
- group.add_argument('subcommand', type=str, nargs='?',
- help='the subcommand to launch')
+ group.add_argument(
+ "subcommand", type=str, nargs="?", help="the subcommand to launch"
+ )
return parser
@@ -58,19 +60,18 @@ def list_subcommands():
except OSError:
continue
for name in names:
- if name.startswith('ramp-'):
- if sys.platform.startswith('win'):
+ if name.startswith("ramp-"):
+ if sys.platform.startswith("win"):
# remove file-extension on Windows
name = os.path.splitext(name)[0]
- subcommand_tuples.add(tuple(name.split('-')[1:]))
+ subcommand_tuples.add(tuple(name.split("-")[1:]))
# build a set of subcommand strings, excluding subcommands whose parents
# are defined
subcommands = set()
# Only include `jupyter-foo-bar` if `jupyter-foo` is not already present
for sub_tup in subcommand_tuples:
- if not any(sub_tup[:i] in subcommand_tuples
- for i in range(1, len(sub_tup))):
- subcommands.add('-'.join(sub_tup))
+ if not any(sub_tup[:i] in subcommand_tuples for i in range(1, len(sub_tup))):
+ subcommands.add("-".join(sub_tup))
return sorted(subcommands)
@@ -80,7 +81,7 @@ def _execvp(cmd, argv):
Python provides execvp on Windows, but its behavior is problematic
(Python bug#9148).
"""
- if sys.platform.startswith('win'):
+ if sys.platform.startswith("win"):
# PATH is ignored when shell=False,
# so rely on shutil.which
try:
@@ -89,11 +90,12 @@ def _execvp(cmd, argv):
from .utils.shutil_which import which
cmd_path = which(cmd)
if cmd_path is None:
- raise OSError('%r not found' % cmd, errno.ENOENT)
+ raise OSError("%r not found" % cmd, errno.ENOENT)
p = Popen([cmd_path] + argv[1:])
# Don't raise KeyboardInterrupt in the parent process.
# Set this after spawning, to avoid subprocess inheriting handler.
import signal
+
signal.signal(signal.SIGINT, signal.SIG_IGN)
p.wait()
sys.exit(p.returncode)
@@ -113,21 +115,22 @@ def _path_with_self():
# include realpath, if `ramp` is a symlinkxecvp(command, sys.argv[1:])
scripts.append(os.path.realpath(scripts[0]))
- path_list = (os.environ.get('PATH') or os.defpath).split(os.pathsep)
+ path_list = (os.environ.get("PATH") or os.defpath).split(os.pathsep)
for script in scripts:
bindir = os.path.dirname(script)
- if (os.path.isdir(bindir)
- and os.access(script, os.X_OK)): # only if it's a script
+ if os.path.isdir(bindir) and os.access(
+ script, os.X_OK
+ ): # only if it's a script
# ensure executable's dir is on PATH
# avoids missing subcommands when ramp is run via absolute path
path_list.insert(0, bindir)
- os.environ['PATH'] = os.pathsep.join(path_list)
+ os.environ["PATH"] = os.pathsep.join(path_list)
return path_list
def main():
_path_with_self() # ensure executable is on PATH
- if len(sys.argv) > 1 and not sys.argv[1].startswith('-'):
+ if len(sys.argv) > 1 and not sys.argv[1].startswith("-"):
# Don't parse if a subcommand is given
# Avoids argparse gobbling up args passed to subcommand, such as `-h`.
subcommand = sys.argv[1]
@@ -144,12 +147,12 @@ def main():
parser.print_usage(file=sys.stderr)
sys.exit("subcommand is required")
- command = 'ramp-' + subcommand
+ command = "ramp-" + subcommand
try:
_execvp(command, sys.argv[1:])
except OSError as e:
sys.exit("Error executing ramp command %r: %s" % (subcommand, e))
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/ramp-utils/ramp_utils/testing.py b/ramp-utils/ramp_utils/testing.py
index 934cefd19..5182c5e90 100644
--- a/ramp-utils/ramp_utils/testing.py
+++ b/ramp-utils/ramp_utils/testing.py
@@ -11,7 +11,7 @@ def database_config_template():
filename : str
The database configuration filename.
"""
- return os.path.join(HERE, 'template', 'database_config.yml')
+ return os.path.join(HERE, "template", "database_config.yml")
def ramp_config_template():
@@ -22,7 +22,7 @@ def ramp_config_template():
filename : str
The RAMP configuration filename.
"""
- return os.path.join(HERE, 'template', 'ramp_config.yml')
+ return os.path.join(HERE, "template", "ramp_config.yml")
def ramp_aws_config_template():
@@ -33,4 +33,4 @@ def ramp_aws_config_template():
filename : str
The RAMP configuration on AWS filename.
"""
- return os.path.join(HERE, 'template', 'ramp_config_aws.yml')
+ return os.path.join(HERE, "template", "ramp_config_aws.yml")
diff --git a/ramp-utils/ramp_utils/tests/test_cli.py b/ramp-utils/ramp_utils/tests/test_cli.py
index 076b56d58..45f56572d 100644
--- a/ramp-utils/ramp_utils/tests/test_cli.py
+++ b/ramp-utils/ramp_utils/tests/test_cli.py
@@ -20,14 +20,14 @@
def deployment_dir(database_connection):
ramp_config = read_config(ramp_config_template())
return os.path.commonpath(
- [ramp_config['ramp']['kit_dir'], ramp_config['ramp']['data_dir']]
+ [ramp_config["ramp"]["kit_dir"], ramp_config["ramp"]["data_dir"]]
)
def setup_function(function):
ramp_config = read_config(ramp_config_template())
function.deployment_dir = os.path.commonpath(
- [ramp_config['ramp']['kit_dir'], ramp_config['ramp']['data_dir']]
+ [ramp_config["ramp"]["kit_dir"], ramp_config["ramp"]["data_dir"]]
)
@@ -36,7 +36,7 @@ def teardown_function(function):
# FIXME: we are recreating the deployment directory but it should be
# replaced by an temporary creation of folder.
shutil.rmtree(function.deployment_dir, ignore_errors=True)
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
@@ -44,11 +44,9 @@ def test_setup_init(deployment_dir):
try:
os.mkdir(deployment_dir)
runner = CliRunner()
- result = runner.invoke(main, ['init',
- '--deployment-dir', deployment_dir])
+ result = runner.invoke(main, ["init", "--deployment-dir", deployment_dir])
assert result.exit_code == 0, result.output
- result = runner.invoke(main, ['init',
- '--deployment-dir', deployment_dir])
+ result = runner.invoke(main, ["init", "--deployment-dir", deployment_dir])
assert result.exit_code == 0, result.output
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
@@ -58,18 +56,39 @@ def test_setup_init_event(deployment_dir):
try:
os.mkdir(deployment_dir)
runner = CliRunner()
- result = runner.invoke(main, ['init-event',
- '--name', 'iris_test',
- '--deployment-dir', deployment_dir])
+ result = runner.invoke(
+ main,
+ [
+ "init-event",
+ "--name",
+ "iris_test",
+ "--deployment-dir",
+ deployment_dir,
+ ],
+ )
assert result.exit_code == 0, result.output
- result = runner.invoke(main, ['init-event',
- '--name', 'iris_test',
- '--deployment-dir', deployment_dir])
+ result = runner.invoke(
+ main,
+ [
+ "init-event",
+ "--name",
+ "iris_test",
+ "--deployment-dir",
+ deployment_dir,
+ ],
+ )
assert result.exit_code == 0, result.output
- result = runner.invoke(main, ['init-event',
- '--name', 'iris_test',
- '--deployment-dir', deployment_dir,
- '--force'])
+ result = runner.invoke(
+ main,
+ [
+ "init-event",
+ "--name",
+ "iris_test",
+ "--deployment-dir",
+ deployment_dir,
+ "--force",
+ ],
+ )
assert result.exit_code == 0, result.output
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
@@ -77,23 +96,37 @@ def test_setup_init_event(deployment_dir):
def test_deploy_ramp_event():
runner = CliRunner()
- result = runner.invoke(main, ['deploy-event',
- '--config', database_config_template(),
- '--event-config', ramp_config_template()])
+ result = runner.invoke(
+ main,
+ [
+ "deploy-event",
+ "--config",
+ database_config_template(),
+ "--event-config",
+ ramp_config_template(),
+ ],
+ )
assert result.exit_code == 0, result.output
- result = runner.invoke(main, ['deploy-event',
- '--config', database_config_template(),
- '--event-config', ramp_config_template(),
- '--force'])
+ result = runner.invoke(
+ main,
+ [
+ "deploy-event",
+ "--config",
+ database_config_template(),
+ "--event-config",
+ ramp_config_template(),
+ "--force",
+ ],
+ )
assert result.exit_code == 0, result.output
@pytest.mark.parametrize(
- 'subcommand', [None, 'database', 'frontend', 'launch', 'setup']
+ "subcommand", [None, "database", "frontend", "launch", "setup"]
)
def test_ramp_cli(subcommand):
- cmd = ['ramp']
+ cmd = ["ramp"]
if subcommand is not None:
cmd += [subcommand]
- cmd += ['-h']
+ cmd += ["-h"]
subprocess.check_output(cmd, env=os.environ.copy())
diff --git a/ramp-utils/ramp_utils/tests/test_config_parser.py b/ramp-utils/ramp_utils/tests/test_config_parser.py
index 3e9a8609e..98d09f3c4 100644
--- a/ramp-utils/ramp_utils/tests/test_config_parser.py
+++ b/ramp-utils/ramp_utils/tests/test_config_parser.py
@@ -7,33 +7,47 @@
@pytest.fixture
def simple_config(database_connection):
- data = {'sqlalchemy': {'username': 'mrramp', 'password': 'mrramp'},
- 'ramp': {'event_name': 'iris_test'}}
- with tempfile.NamedTemporaryFile(mode='w', suffix='.yml') as config_file:
+ data = {
+ "sqlalchemy": {"username": "mrramp", "password": "mrramp"},
+ "ramp": {"event_name": "iris_test"},
+ }
+ with tempfile.NamedTemporaryFile(mode="w", suffix=".yml") as config_file:
yaml.dump(data, config_file, default_flow_style=False)
yield config_file.name
@pytest.mark.parametrize(
"filter_section, expected_config",
- [(None, {'sqlalchemy': {'username': 'mrramp', 'password': 'mrramp'},
- 'ramp': {'event_name': 'iris_test'}}),
- (['ramp'], {'ramp': {'event_name': 'iris_test'}}),
- ('ramp', {'event_name': 'iris_test'})]
+ [
+ (
+ None,
+ {
+ "sqlalchemy": {"username": "mrramp", "password": "mrramp"},
+ "ramp": {"event_name": "iris_test"},
+ },
+ ),
+ (["ramp"], {"ramp": {"event_name": "iris_test"}}),
+ ("ramp", {"event_name": "iris_test"}),
+ ],
)
def test_read_config_filtering(simple_config, filter_section, expected_config):
- config = read_config(simple_config, filter_section=filter_section,
- check_requirements=False)
+ config = read_config(
+ simple_config, filter_section=filter_section, check_requirements=False
+ )
assert config == expected_config
@pytest.mark.parametrize(
"filter_section, check_requirements, err_msg",
- [('unknown', False, 'The section "unknown" is not in'),
- (None, True, 'The section "sqlalchemy" in the')]
+ [
+ ("unknown", False, 'The section "unknown" is not in'),
+ (None, True, 'The section "sqlalchemy" in the'),
+ ],
)
-def test_read_config_error(simple_config, filter_section, check_requirements,
- err_msg):
+def test_read_config_error(simple_config, filter_section, check_requirements, err_msg):
with pytest.raises(ValueError, match=err_msg):
- read_config(simple_config, filter_section=filter_section,
- check_requirements=check_requirements)
+ read_config(
+ simple_config,
+ filter_section=filter_section,
+ check_requirements=check_requirements,
+ )
diff --git a/ramp-utils/ramp_utils/tests/test_deploy.py b/ramp-utils/ramp_utils/tests/test_deploy.py
index b87902fc9..04e609bbd 100644
--- a/ramp-utils/ramp_utils/tests/test_deploy.py
+++ b/ramp-utils/ramp_utils/tests/test_deploy.py
@@ -34,10 +34,10 @@ def session_scope_function(database_connection):
# FIXME: we are recreating the deployment directory but it should be
# replaced by an temporary creation of folder.
deployment_dir = os.path.commonpath(
- [ramp_config['ramp']['kit_dir'], ramp_config['ramp']['data_dir']]
+ [ramp_config["ramp"]["kit_dir"], ramp_config["ramp"]["data_dir"]]
)
shutil.rmtree(deployment_dir, ignore_errors=True)
- db, _ = setup_db(database_config['sqlalchemy'])
+ db, _ = setup_db(database_config["sqlalchemy"])
Model.metadata.drop_all(db)
@@ -46,43 +46,47 @@ def test_deploy_ramp_event_options(session_scope_function):
ramp_config = generate_ramp_config(read_config(ramp_config_template()))
deploy_ramp_event(database_config_template(), ramp_config_template())
# deploy again by forcing the deployment
- deploy_ramp_event(
- database_config_template(), ramp_config_template(), force=True
- )
+ deploy_ramp_event(database_config_template(), ramp_config_template(), force=True)
# do not deploy the kit to trigger the error in the problem with we don't
# force the deployment
- msg_err = 'The RAMP problem already exists in the database.'
+ msg_err = "The RAMP problem already exists in the database."
with pytest.raises(ValueError, match=msg_err):
- with session_scope(database_config['sqlalchemy']) as session:
+ with session_scope(database_config["sqlalchemy"]) as session:
# if one of the ramp-kit or ramp-data folders changed
- problem = get_problem(session, 'iris')
- problem.path_ramp_kit = problem.path_ramp_kit + '_xxx'
+ problem = get_problem(session, "iris")
+ problem.path_ramp_kit = problem.path_ramp_kit + "_xxx"
session.commit()
deploy_ramp_event(
- database_config_template(), ramp_config_template(),
- setup_ramp_repo=False, force=False
+ database_config_template(),
+ ramp_config_template(),
+ setup_ramp_repo=False,
+ force=False,
)
- problem = get_problem(session, 'iris')
- problem.path_ramp_kit = ramp_config['ramp_kit_dir']
- problem.path_ramp_data = problem.path_ramp_data + '_xxx'
+ problem = get_problem(session, "iris")
+ problem.path_ramp_kit = ramp_config["ramp_kit_dir"]
+ problem.path_ramp_data = problem.path_ramp_data + "_xxx"
session.commit()
deploy_ramp_event(
- database_config_template(), ramp_config_template(),
- setup_ramp_repo=False, force=False
+ database_config_template(),
+ ramp_config_template(),
+ setup_ramp_repo=False,
+ force=False,
)
- msg_err = 'Attempting to overwrite existing event'
+ msg_err = "Attempting to overwrite existing event"
with pytest.raises(ValueError, match=msg_err):
- with session_scope(database_config['sqlalchemy']) as session:
+ with session_scope(database_config["sqlalchemy"]) as session:
# if the problem is the same, then the event should be overwritten
- problem = get_problem(session, 'iris')
- problem.path_ramp_kit = ramp_config['ramp_kit_dir']
- problem.path_ramp_data = ramp_config['ramp_data_dir']
+ problem = get_problem(session, "iris")
+ problem.path_ramp_kit = ramp_config["ramp_kit_dir"]
+ problem.path_ramp_data = ramp_config["ramp_data_dir"]
session.commit()
deploy_ramp_event(
- database_config_template(), ramp_config_template(),
- setup_ramp_repo=False, force=False
+ database_config_template(),
+ ramp_config_template(),
+ setup_ramp_repo=False,
+ force=False,
)
@@ -96,30 +100,37 @@ def test_deploy_ramp_event(session_scope_function):
# check that we created the archive
assert os.path.isfile(
os.path.join(
- ramp_config['ramp_kit_dir'], 'events_archived',
- ramp_config['event_name'] + '.zip'
+ ramp_config["ramp_kit_dir"],
+ "events_archived",
+ ramp_config["event_name"] + ".zip",
)
)
# simulate that we add users and sign-up for the event and that they
# submitted the starting kit
- with session_scope(database_config['sqlalchemy']) as session:
+ with session_scope(database_config["sqlalchemy"]) as session:
add_users(session)
- sign_up_team(session, ramp_config['event_name'], 'test_user')
- submit_starting_kits(session, ramp_config['event_name'], 'test_user',
- ramp_config['ramp_kit_submissions_dir'])
+ sign_up_team(session, ramp_config["event_name"], "test_user")
+ submit_starting_kits(
+ session,
+ ramp_config["event_name"],
+ "test_user",
+ ramp_config["ramp_kit_submissions_dir"],
+ )
# run the dispatcher on the event which are in the dataset
- dispatcher = Dispatcher(config=database_config,
- event_config=event_config,
- worker=CondaEnvWorker,
- n_workers=-1,
- hunger_policy='exit')
+ dispatcher = Dispatcher(
+ config=database_config,
+ event_config=event_config,
+ worker=CondaEnvWorker,
+ n_workers=-1,
+ hunger_policy="exit",
+ )
dispatcher.launch()
# the iris kit contain a submission which should fail for a user
- with session_scope(database_config['sqlalchemy']) as session:
+ with session_scope(database_config["sqlalchemy"]) as session:
submission = get_submissions(
- session, event_config['ramp']['event_name'], 'training_error'
+ session, event_config["ramp"]["event_name"], "training_error"
)
assert len(submission) == 1
diff --git a/ramp-utils/ramp_utils/tests/test_frontend.py b/ramp-utils/ramp_utils/tests/test_frontend.py
index a0707656c..1f90ade3d 100644
--- a/ramp-utils/ramp_utils/tests/test_frontend.py
+++ b/ramp-utils/ramp_utils/tests/test_frontend.py
@@ -9,44 +9,44 @@
@pytest.mark.parametrize(
"config",
- [database_config_template(),
- read_config(database_config_template())]
+ [database_config_template(), read_config(database_config_template())],
)
def test_generate_flask_config(config):
flask_config = generate_flask_config(config)
expected_config = {
- 'SECRET_KEY': 'abcdefghijkl',
- 'WTF_CSRF_ENABLED': True,
- 'LOGIN_INSTRUCTIONS': None,
- 'LOG_FILENAME': 'None',
- 'MAX_CONTENT_LENGTH': 1073741824,
- 'PRIVACY_POLICY_PAGE': None,
- 'DEBUG': True,
- 'TESTING': False,
- 'MAIL_SERVER': 'localhost',
- 'MAIL_PORT': 8025,
- 'MAIL_DEFAULT_SENDER': ['RAMP admin', 'rampmailer@localhost.com'],
- 'SIGN_UP_ASK_SOCIAL_MEDIA': False,
- 'SIGN_UP_INSTRUCTIONS': None,
- 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
- 'SQLALCHEMY_DATABASE_URI': ('postgresql://mrramp:mrramp@localhost:5432'
- '/databoard_test'),
- 'TRACK_USER_INTERACTION': True,
- 'TRACK_CREDITS': True,
- 'DOMAIN_NAME': 'localhost'
- }
+ "SECRET_KEY": "abcdefghijkl",
+ "WTF_CSRF_ENABLED": True,
+ "LOGIN_INSTRUCTIONS": None,
+ "LOG_FILENAME": "None",
+ "MAX_CONTENT_LENGTH": 1073741824,
+ "PRIVACY_POLICY_PAGE": None,
+ "DEBUG": True,
+ "TESTING": False,
+ "MAIL_SERVER": "localhost",
+ "MAIL_PORT": 8025,
+ "MAIL_DEFAULT_SENDER": ["RAMP admin", "rampmailer@localhost.com"],
+ "SIGN_UP_ASK_SOCIAL_MEDIA": False,
+ "SIGN_UP_INSTRUCTIONS": None,
+ "SQLALCHEMY_TRACK_MODIFICATIONS": False,
+ "SQLALCHEMY_DATABASE_URI": (
+ "postgresql://mrramp:mrramp@localhost:5432" "/databoard_test"
+ ),
+ "TRACK_USER_INTERACTION": True,
+ "TRACK_CREDITS": True,
+ "DOMAIN_NAME": "localhost",
+ }
assert flask_config == expected_config
def test_read_if_html_path(tmpdir):
- msg = 'some arbitrary text'
+ msg = "some arbitrary text"
assert _read_if_html_path(msg) == msg
- msg = 'an_ivalid_path.html'
+ msg = "an_ivalid_path.html"
with pytest.raises(FileNotFoundError):
_read_if_html_path(msg)
- msg = str(tmpdir / 'some_file.html')
- with open(msg, 'wt') as fh:
- fh.write('Privacy Policy')
- assert _read_if_html_path(msg) == 'Privacy Policy'
+ msg = str(tmpdir / "some_file.html")
+ with open(msg, "wt") as fh:
+ fh.write("Privacy Policy")
+ assert _read_if_html_path(msg) == "Privacy Policy"
diff --git a/ramp-utils/ramp_utils/tests/test_ramp.py b/ramp-utils/ramp_utils/tests/test_ramp.py
index c5fe8031b..93daa8e82 100644
--- a/ramp-utils/ramp_utils/tests/test_ramp.py
+++ b/ramp-utils/ramp_utils/tests/test_ramp.py
@@ -12,17 +12,23 @@
def _get_event_config(version):
- return os.path.join(
- HERE, 'data', 'ramp_config_{}.yml'.format(version)
- )
+ return os.path.join(HERE, "data", "ramp_config_{}.yml".format(version))
@pytest.mark.parametrize(
"event_config, database_config, err_msg",
- [(_get_event_config('absolute'), None,
- "you need to provide the filename of the database as well"),
- (read_config(_get_event_config('missing')), None,
- "you need to provide all following keys")]
+ [
+ (
+ _get_event_config("absolute"),
+ None,
+ "you need to provide the filename of the database as well",
+ ),
+ (
+ read_config(_get_event_config("missing")),
+ None,
+ "you need to provide all following keys",
+ ),
+ ],
)
def test_generate_ramp_config_error(event_config, database_config, err_msg):
with pytest.raises(ValueError, match=err_msg):
@@ -31,70 +37,63 @@ def test_generate_ramp_config_error(event_config, database_config, err_msg):
@pytest.mark.parametrize(
"event_config, database_config",
- [(ramp_config_template(), database_config_template()),
- (read_config(ramp_config_template()), None),
- (read_config(ramp_config_template(), filter_section='ramp'), None)]
+ [
+ (ramp_config_template(), database_config_template()),
+ (read_config(ramp_config_template()), None),
+ (read_config(ramp_config_template(), filter_section="ramp"), None),
+ ],
)
def test_generate_ramp_config(event_config, database_config):
ramp_config = generate_ramp_config(event_config, database_config)
expected_config = {
- 'problem_name': 'iris',
- 'event_name': 'iris_test',
- 'event_title': 'Iris event',
- 'event_is_public': True,
- 'sandbox_name': 'starting_kit',
- 'ramp_kit_dir': os.path.join(
- '/tmp/databoard_test', 'ramp-kits', 'iris'
- ),
- 'ramp_data_dir': os.path.join(
- '/tmp/databoard_test', 'ramp-data', 'iris'
- ),
- 'ramp_kit_submissions_dir': os.path.join(
- '/tmp/databoard_test', 'ramp-kits', 'iris', 'submissions'
- ),
- 'ramp_submissions_dir': os.path.join(
- '/tmp/databoard_test', 'submissions'
- ),
- 'ramp_sandbox_dir': os.path.join(
- '/tmp/databoard_test', 'ramp-kits', 'iris', 'submissions',
- 'starting_kit'
+ "problem_name": "iris",
+ "event_name": "iris_test",
+ "event_title": "Iris event",
+ "event_is_public": True,
+ "sandbox_name": "starting_kit",
+ "ramp_kit_dir": os.path.join("/tmp/databoard_test", "ramp-kits", "iris"),
+ "ramp_data_dir": os.path.join("/tmp/databoard_test", "ramp-data", "iris"),
+ "ramp_kit_submissions_dir": os.path.join(
+ "/tmp/databoard_test", "ramp-kits", "iris", "submissions"
),
- 'ramp_logs_dir': os.path.join(
- '/tmp/databoard_test', 'log'
+ "ramp_submissions_dir": os.path.join("/tmp/databoard_test", "submissions"),
+ "ramp_sandbox_dir": os.path.join(
+ "/tmp/databoard_test",
+ "ramp-kits",
+ "iris",
+ "submissions",
+ "starting_kit",
),
- 'ramp_predictions_dir': os.path.join(
- '/tmp/databoard_test', 'preds'
- )
+ "ramp_logs_dir": os.path.join("/tmp/databoard_test", "log"),
+ "ramp_predictions_dir": os.path.join("/tmp/databoard_test", "preds"),
}
assert ramp_config == expected_config
def test_generate_ramp_config_short():
ramp_config = generate_ramp_config(
- _get_event_config('short'), database_config_template()
+ _get_event_config("short"), database_config_template()
)
expected_config = {
- 'problem_name': 'iris',
- 'event_name': 'iris_test',
- 'event_title': 'Iris event',
- 'ramp_kit_dir': os.path.join('template', 'ramp-kits', 'iris'),
- 'ramp_data_dir': os.path.join('template', 'ramp-data', 'iris'),
- 'ramp_submissions_dir': os.path.join(
- 'template', 'events', 'iris_test', 'submissions'
+ "problem_name": "iris",
+ "event_name": "iris_test",
+ "event_title": "Iris event",
+ "ramp_kit_dir": os.path.join("template", "ramp-kits", "iris"),
+ "ramp_data_dir": os.path.join("template", "ramp-data", "iris"),
+ "ramp_submissions_dir": os.path.join(
+ "template", "events", "iris_test", "submissions"
),
- 'sandbox_name': 'starting_kit',
- 'ramp_predictions_dir': os.path.join(
- 'template', 'events', 'iris_test', 'predictions'
+ "sandbox_name": "starting_kit",
+ "ramp_predictions_dir": os.path.join(
+ "template", "events", "iris_test", "predictions"
),
- 'ramp_logs_dir': os.path.join(
- 'template', 'events', 'iris_test', 'logs'
+ "ramp_logs_dir": os.path.join("template", "events", "iris_test", "logs"),
+ "ramp_sandbox_dir": os.path.join(
+ "template", "ramp-kits", "iris", "submissions", "starting_kit"
),
- 'ramp_sandbox_dir': os.path.join(
- 'template', 'ramp-kits', 'iris', 'submissions', 'starting_kit'
+ "ramp_kit_submissions_dir": os.path.join(
+ "template", "ramp-kits", "iris", "submissions"
),
- 'ramp_kit_submissions_dir': os.path.join(
- 'template', 'ramp-kits', 'iris', 'submissions'
- )
}
for key in expected_config:
assert expected_config[key] in ramp_config[key]
diff --git a/ramp-utils/ramp_utils/tests/test_testing.py b/ramp-utils/ramp_utils/tests/test_testing.py
index 829b3f232..c84242d87 100644
--- a/ramp-utils/ramp_utils/tests/test_testing.py
+++ b/ramp-utils/ramp_utils/tests/test_testing.py
@@ -10,8 +10,10 @@
@pytest.mark.parametrize(
"config_func, partial_path",
- [(database_config_template, join('template', 'database_config.yml')),
- (ramp_config_template, join('template', 'ramp_config.yml'))]
+ [
+ (database_config_template, join("template", "database_config.yml")),
+ (ramp_config_template, join("template", "ramp_config.yml")),
+ ],
)
def test_path_configuration(config_func, partial_path):
path = config_func()
diff --git a/ramp-utils/ramp_utils/tests/test_utils.py b/ramp-utils/ramp_utils/tests/test_utils.py
index d2f48baf6..06b735136 100644
--- a/ramp-utils/ramp_utils/tests/test_utils.py
+++ b/ramp-utils/ramp_utils/tests/test_utils.py
@@ -6,7 +6,5 @@
def test_import_module_from_source():
module_path = os.path.dirname(__file__)
# import the local_module.py which consist of a single function.
- mod = import_module_from_source(
- os.path.join(module_path, 'local_module.py'), 'mod'
- )
- assert hasattr(mod, 'func_local_module')
+ mod = import_module_from_source(os.path.join(module_path, "local_module.py"), "mod")
+ assert hasattr(mod, "func_local_module")
diff --git a/ramp-utils/ramp_utils/tests/test_worker.py b/ramp-utils/ramp_utils/tests/test_worker.py
index 4bcb6ae98..3e1fcefd1 100644
--- a/ramp-utils/ramp_utils/tests/test_worker.py
+++ b/ramp-utils/ramp_utils/tests/test_worker.py
@@ -14,13 +14,13 @@ def test_generate_worker_config():
ramp_config_template(), database_config_template()
)
expected_config = {
- 'worker_type': 'conda',
- 'conda_env': 'ramp-iris',
- 'kit_dir': os.path.join('/tmp/databoard_test', 'ramp-kits', 'iris'),
- 'data_dir': os.path.join('/tmp/databoard_test', 'ramp-data', 'iris'),
- 'submissions_dir': os.path.join('/tmp/databoard_test', 'submissions'),
- 'predictions_dir': os.path.join('/tmp/databoard_test', 'preds'),
- 'logs_dir': os.path.join('/tmp/databoard_test', 'log')
+ "worker_type": "conda",
+ "conda_env": "ramp-iris",
+ "kit_dir": os.path.join("/tmp/databoard_test", "ramp-kits", "iris"),
+ "data_dir": os.path.join("/tmp/databoard_test", "ramp-data", "iris"),
+ "submissions_dir": os.path.join("/tmp/databoard_test", "submissions"),
+ "predictions_dir": os.path.join("/tmp/databoard_test", "preds"),
+ "logs_dir": os.path.join("/tmp/databoard_test", "log"),
}
assert worker_config == expected_config
@@ -28,8 +28,8 @@ def test_generate_worker_config():
def test_generate_worker_config_missing_params():
ramp_config = read_config(ramp_config_template())
# rename on of the key to make the generation failed
- ramp_config['worker']['env'] = ramp_config['worker']['conda_env']
- del ramp_config['worker']['conda_env']
+ ramp_config["worker"]["env"] = ramp_config["worker"]["conda_env"]
+ del ramp_config["worker"]["conda_env"]
err_msg = "The conda worker is missing the parameter"
with pytest.raises(ValueError, match=err_msg):
generate_worker_config(ramp_config)
diff --git a/ramp-utils/ramp_utils/worker.py b/ramp-utils/ramp_utils/worker.py
index a8ee6230e..4308b1f38 100644
--- a/ramp-utils/ramp_utils/worker.py
+++ b/ramp-utils/ramp_utils/worker.py
@@ -2,11 +2,20 @@
from .ramp import generate_ramp_config
REQUIRED_KEYS = {
- 'conda': {'conda_env'},
- 'aws': {'access_key_id', 'secret_access_key', 'region_name',
- 'ami_image_name', 'ami_user_name', 'instance_type',
- 'key_name', 'security_group', 'key_path', 'remote_ramp_kit_folder',
- 'memory_profiling'}
+ "conda": {"conda_env"},
+ "aws": {
+ "access_key_id",
+ "secret_access_key",
+ "region_name",
+ "ami_image_name",
+ "ami_user_name",
+ "instance_type",
+ "key_name",
+ "security_group",
+ "key_path",
+ "remote_ramp_kit_folder",
+ "memory_profiling",
+ },
}
@@ -31,31 +40,31 @@ def generate_worker_config(event_config, database_config=None):
"""
if isinstance(event_config, str):
ramp_config = generate_ramp_config(event_config, database_config)
- event_config = read_config(
- event_config, filter_section=['ramp', 'worker'])
+ event_config = read_config(event_config, filter_section=["ramp", "worker"])
else:
ramp_config = generate_ramp_config(event_config)
# copy the specific information for the given worker configuration
- worker_config = event_config['worker'].copy()
+ worker_config = event_config["worker"].copy()
# define the directory of the ramp-kit for the event
- worker_config['kit_dir'] = ramp_config['ramp_kit_dir']
+ worker_config["kit_dir"] = ramp_config["ramp_kit_dir"]
# define the directory of the ramp-data for the event
- worker_config['data_dir'] = ramp_config['ramp_data_dir']
+ worker_config["data_dir"] = ramp_config["ramp_data_dir"]
# define the directory of the submissions
- worker_config['submissions_dir'] = ramp_config['ramp_submissions_dir']
+ worker_config["submissions_dir"] = ramp_config["ramp_submissions_dir"]
# define the directory of the predictions
- worker_config['predictions_dir'] = ramp_config['ramp_predictions_dir']
+ worker_config["predictions_dir"] = ramp_config["ramp_predictions_dir"]
# define the directory of the logs
- worker_config['logs_dir'] = ramp_config['ramp_logs_dir']
+ worker_config["logs_dir"] = ramp_config["ramp_logs_dir"]
- if worker_config['worker_type'] in REQUIRED_KEYS.keys():
- required_fields = REQUIRED_KEYS[worker_config['worker_type']]
+ if worker_config["worker_type"] in REQUIRED_KEYS.keys():
+ required_fields = REQUIRED_KEYS[worker_config["worker_type"]]
missing_parameters = required_fields.difference(worker_config)
if missing_parameters:
raise ValueError(
- 'The {} worker is missing the parameter(s): {}'
- .format(worker_config['worker_type'], missing_parameters)
+ "The {} worker is missing the parameter(s): {}".format(
+ worker_config["worker_type"], missing_parameters
+ )
)
return worker_config
diff --git a/ramp-utils/setup.py b/ramp-utils/setup.py
index 4cf3d3d47..0b0fa843b 100755
--- a/ramp-utils/setup.py
+++ b/ramp-utils/setup.py
@@ -5,46 +5,50 @@
from setuptools import find_packages, setup
# get __version__ from _version.py
-ver_file = os.path.join('ramp_utils', '_version.py')
+ver_file = os.path.join("ramp_utils", "_version.py")
with open(ver_file) as f:
exec(f.read())
-DISTNAME = 'ramp-utils'
+DISTNAME = "ramp-utils"
DESCRIPTION = "Utilities shared across the RAMP bundle"
-with codecs.open('README.rst', encoding='utf-8-sig') as f:
+with codecs.open("README.rst", encoding="utf-8-sig") as f:
LONG_DESCRIPTION = f.read()
-MAINTAINER = 'A. Boucaud, B. Kegl, G. Lemaitre, J. Van den Bossche'
-MAINTAINER_EMAIL = 'boucaud.alexandre@gmail.com, guillaume.lemaitre@inria.fr'
-URL = 'https://github.com/paris-saclay-cds/ramp-board'
-LICENSE = 'BSD (3-clause)'
-DOWNLOAD_URL = 'https://github.com/paris-saclay-cds/ramp-board'
+MAINTAINER = "A. Boucaud, B. Kegl, G. Lemaitre, J. Van den Bossche"
+MAINTAINER_EMAIL = "boucaud.alexandre@gmail.com, guillaume.lemaitre@inria.fr"
+URL = "https://github.com/paris-saclay-cds/ramp-board"
+LICENSE = "BSD (3-clause)"
+DOWNLOAD_URL = "https://github.com/paris-saclay-cds/ramp-board"
VERSION = __version__ # noqa
-CLASSIFIERS = ['Intended Audience :: Science/Research',
- 'Intended Audience :: Developers',
- 'License :: OSI Approved',
- 'Programming Language :: Python',
- 'Topic :: Software Development',
- 'Topic :: Scientific/Engineering',
- 'Operating System :: Microsoft :: Windows',
- 'Operating System :: POSIX',
- 'Operating System :: Unix',
- 'Operating System :: MacOS',
- 'Programming Language :: Python :: 3.6',
- 'Programming Language :: Python :: 3.7',
- 'Programming Language :: Python :: 3.8']
-INSTALL_REQUIRES = ['click', 'pandas', 'pyyaml']
+CLASSIFIERS = [
+ "Intended Audience :: Science/Research",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved",
+ "Programming Language :: Python",
+ "Topic :: Software Development",
+ "Topic :: Scientific/Engineering",
+ "Operating System :: Microsoft :: Windows",
+ "Operating System :: POSIX",
+ "Operating System :: Unix",
+ "Operating System :: MacOS",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+]
+INSTALL_REQUIRES = ["click", "pandas", "pyyaml"]
EXTRAS_REQUIRE = {
- 'tests': ['pytest', 'pytest-cov'],
- 'docs': ['sphinx', 'sphinx_rtd_theme', 'numpydoc']
+ "tests": ["pytest", "pytest-cov"],
+ "docs": ["sphinx", "sphinx_rtd_theme", "numpydoc"],
}
PACKAGE_DATA = {
- 'ramp_utils': [os.path.join('tests', 'data', 'ramp_config_absolute.yml'),
- os.path.join('tests', 'data', 'ramp_config_missing.yml'),
- os.path.join('tests', 'data', 'ramp_config_short.yml'),
- os.path.join('template', 'database_config.yml'),
- os.path.join('template', 'ramp_config.yml'),
- os.path.join('template', 'ramp_config_template.yml'),
- os.path.join('template', 'database_config_template.yml')]
+ "ramp_utils": [
+ os.path.join("tests", "data", "ramp_config_absolute.yml"),
+ os.path.join("tests", "data", "ramp_config_missing.yml"),
+ os.path.join("tests", "data", "ramp_config_short.yml"),
+ os.path.join("template", "database_config.yml"),
+ os.path.join("template", "ramp_config.yml"),
+ os.path.join("template", "ramp_config_template.yml"),
+ os.path.join("template", "database_config_template.yml"),
+ ]
}
setup(
@@ -65,9 +69,9 @@
extras_require=EXTRAS_REQUIRE,
python_requires=">=3.7",
entry_points={
- 'console_scripts': [
- 'ramp = ramp_utils.ramp_cli:main',
- 'ramp-setup = ramp_utils.cli:start',
+ "console_scripts": [
+ "ramp = ramp_utils.ramp_cli:main",
+ "ramp-setup = ramp_utils.cli:start",
]
- }
+ },
)