Skip to content

Commit

Permalink
Only drop the support for Python 3.7
Browse files Browse the repository at this point in the history
Signed-off-by: Simon Zhao <[email protected]>
  • Loading branch information
SimonYansenZhao committed Sep 14, 2023
1 parent 659ac17 commit edfa101
Show file tree
Hide file tree
Showing 7 changed files with 16 additions and 15 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/sarplus.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ jobs:
runs-on: ubuntu-22.04
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11"]
python-version: ["3.8", "3.9"]
steps:
- uses: actions/checkout@v3

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -581,9 +581,9 @@ def __init__(
):
self._build_bias = build_bias

if args is None or (nest.is_nested(args) and not args):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_nested(args):
if not nest.is_sequence(args):
args = [args]
self._is_sequence = False
else:
Expand Down
4 changes: 2 additions & 2 deletions recommenders/models/rlrmc/RLRMCdataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,8 @@ def _data_processing(self, train, validation=None, test=None, mean_center=True):
"""
# Data processing and reindexing code is adopted from https://github.com/Microsoft/Recommenders/blob/main/recommenders/models/ncf/dataset.py
# If validation dataset is None
df = train if validation is None else pd.concat([train, validation])
df = df if test is None else pd.concat([df, test])
df = train if validation is None else train.append(validation)
df = df if test is None else df.append(test)

# Reindex user and item index
if self.user_idx is None:
Expand Down
2 changes: 1 addition & 1 deletion recommenders/models/tfidf/tfidf_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def clean_dataframe(self, df, cols_to_clean, new_col_name="cleaned_text"):
return df

def tokenize_text(
self, df_clean, text_col="cleaned_text", ngram_range=(1, 3), min_df=1
self, df_clean, text_col="cleaned_text", ngram_range=(1, 3), min_df=0
):
"""Tokenize the input text.
For more details on the TfidfVectorizer, see https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html
Expand Down
1 change: 1 addition & 0 deletions tests/ci/azureml_tests/submit_groupwise_azureml_pytest.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
"""
import argparse
import logging
import glob

from azureml.core.authentication import AzureCliAuthentication
from azureml.core import Workspace
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import pytest
from unittest.mock import Mock
from sklearn.preprocessing import minmax_scale
from pandas.testing import assert_frame_equal
from pandas.util.testing import assert_frame_equal

from recommenders.utils.constants import (
DEFAULT_USER_COL,
Expand Down
16 changes: 8 additions & 8 deletions tests/unit/recommenders/evaluation/test_spark_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from pandas.util.testing import assert_frame_equal

from recommenders.evaluation.python_evaluation import (
precision_at_k,
Expand Down Expand Up @@ -441,7 +441,7 @@ def test_item_novelty(spark_diversity_data, target_metrics):
)
actual = evaluator.historical_item_novelty().toPandas()
assert_frame_equal(
target_metrics["item_novelty"], actual, check_exact=False, atol=0.0001,
target_metrics["item_novelty"], actual, check_exact=False, check_less_precise=4
)
assert np.all(actual["item_novelty"].values >= 0)
# Test that novelty is zero when data includes only one item
Expand Down Expand Up @@ -482,7 +482,7 @@ def test_user_diversity(spark_diversity_data, target_metrics):
target_metrics["user_diversity"],
actual,
check_exact=False,
atol=0.0001,
check_less_precise=4,
)


Expand Down Expand Up @@ -510,7 +510,7 @@ def test_user_item_serendipity(spark_diversity_data, target_metrics):
target_metrics["user_item_serendipity"],
actual,
check_exact=False,
atol=0.0001,
check_less_precise=4,
)


Expand All @@ -529,7 +529,7 @@ def test_user_serendipity(spark_diversity_data, target_metrics):
target_metrics["user_serendipity"],
actual,
check_exact=False,
atol=0.0001,
check_less_precise=4,
)


Expand Down Expand Up @@ -562,7 +562,7 @@ def test_user_diversity_item_feature_vector(spark_diversity_data, target_metrics
target_metrics["user_diversity_item_feature_vector"],
actual,
check_exact=False,
atol=0.0001,
check_less_precise=4,
)


Expand Down Expand Up @@ -599,7 +599,7 @@ def test_user_item_serendipity_item_feature_vector(
target_metrics["user_item_serendipity_item_feature_vector"],
actual,
check_exact=False,
atol=0.0001,
check_less_precise=4,
)


Expand All @@ -620,7 +620,7 @@ def test_user_serendipity_item_feature_vector(spark_diversity_data, target_metri
target_metrics["user_serendipity_item_feature_vector"],
actual,
check_exact=False,
atol=0.0001,
check_less_precise=4,
)


Expand Down

0 comments on commit edfa101

Please sign in to comment.