Skip to content

Commit

Permalink
Merge pull request #181 from aidotse/johanos1-patch-2
Browse files Browse the repository at this point in the history
Update run_tests.yml
  • Loading branch information
johanos1 authored Dec 17, 2024
2 parents 1f3e747 + 3d68c7f commit ba802d8
Show file tree
Hide file tree
Showing 6 changed files with 59 additions and 50 deletions.
56 changes: 20 additions & 36 deletions .github/workflows/run_tests.yml
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: Running tests

on:
Expand All @@ -10,22 +8,8 @@ on:
workflow_dispatch:

jobs:
code-checks:
code-checks-and-tests:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Ruff Linting
uses: chartboost/ruff-action@v1
with:
args: check leakpro --exclude examples,leakpro/tests

build:
runs-on: ubuntu-latest
permissions:
pull-requests: write
contents: write # Ensure write permission for contents
steps:
- name: Checkout code
uses: actions/checkout@v4
Expand All @@ -34,33 +18,33 @@ jobs:
uses: actions/setup-python@v4
with:
python-version: 3.9

- name: Install micromamba
run: |
curl -L https://micromamba.snakepit.net/api/micromamba/linux-64/latest | tar -xvj -C /usr/local/bin/ --strip-components=1 bin/micromamba

- name: Install dependencies with micromamba
- name: Install dependencies
run: |
micromamba create --file environment.yml --name leakpro --root-prefix /home/runner/micromamba-root
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Set PYTHONPATH
run: echo "PYTHONPATH=$(pwd)" >> $GITHUB_ENV
- name: Ruff Linting
run: |
pip install ruff
ruff check leakpro --exclude examples,leakpro/tests
- name: Install pytest and pytest-cov
shell: bash -l {0}
run: |
micromamba activate leakpro
micromamba install pytest pytest-cov
pip install pytest pytest-cov pytest-mock coverage-badge
- name: Run tests with pytest
shell: bash -l {0}
run: |
micromamba activate leakpro
pytest --junitxml=pytest.xml --cov-report=term-missing:skip-covered --cov=leakpro leakpro/tests/ | tee pytest-coverage.txt
cat ./pytest-coverage.txt
pytest --cov=leakpro --cov-report=term-missing:skip-covered --cov-report=xml --cov-report=html leakpro/tests/
cat ./coverage.xml
- name: Create Coverage Badge
run: |
mkdir -p badges
coverage-badge -o badges/coverage.svg -f
- name: Upload test coverage report
uses: actions/upload-artifact@v4
- name: Deploy Coverage Badge to GitHub Pages
uses: JamesIves/github-pages-deploy-action@v4
with:
name: pytest-coverage.txt
path: pytest-coverage.txt
branch: gh-pages
folder: badges
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
![Open Issues](https://img.shields.io/github/issues/aidotse/LeakPro)
![Open PRs](https://img.shields.io/github/issues-pr/aidotse/LeakPro)
![Downloads](https://img.shields.io/github/downloads/aidotse/LeakPro/total)
![Coverage](https://github.com/aidotse/LeakPro/blob/gh-pages/coverage.svg)

## To install
0. **Clone repository**
Expand Down
1 change: 1 addition & 0 deletions environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ dependencies:
# Deep Learning
- pytorch
- torchvision
- torchmetrics

# Utilities
- dotmap
Expand Down
29 changes: 16 additions & 13 deletions leakpro/attacks/mia_attacks/lira.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,21 +134,21 @@ def prepare_attack(self:Self)->None:
mask = (num_shadow_models_seen_points > 0) & (num_shadow_models_seen_points < self.num_shadow_models)

# Filter the audit data
self.audit_dataset["data"] = self.audit_dataset["data"][mask]
self.audit_data_indices = self.audit_dataset["data"][mask]
self.in_indices_masks = self.in_indices_masks[mask, :]

# Filter IN and OUT members
self.in_members = np.arange(np.sum(mask[self.audit_dataset["in_members"]]))
num_out_members = np.sum(mask[self.audit_dataset["out_members"]])
self.out_members = np.arange(len(self.in_members), len(self.in_members) + num_out_members)

assert len(self.audit_dataset["data"]) == len(self.in_members) + len(self.out_members)
assert len(self.audit_data_indices) == len(self.in_members) + len(self.out_members)

if len(self.audit_dataset["data"]) == 0:
if len(self.audit_data_indices) == 0:
raise ValueError("No points in the audit dataset are used for the shadow models")

else:
self.audit_dataset["data"] = self.audit_dataset["data"]
self.audit_data_indices = self.audit_dataset["data"]
self.in_members = self.audit_dataset["in_members"]
self.out_members = self.audit_dataset["out_members"]

Expand All @@ -160,21 +160,24 @@ def prepare_attack(self:Self)->None:
logger.info("This is not an offline attack!")

logger.info(f"Calculating the logits for all {self.num_shadow_models} shadow models")
self.shadow_models_logits = np.swapaxes(self.signal(self.shadow_models, self.handler, self.audit_dataset["data"],
self.eval_batch_size), 0, 1)
self.shadow_models_logits = np.swapaxes(self.signal(self.shadow_models,
self.handler,
self.audit_data_indices,
self.eval_batch_size), 0, 1)

# Calculate logits for the target model
logger.info("Calculating the logits for the target model")
self.target_logits = np.swapaxes(self.signal([self.target_model], self.handler, self.audit_dataset["data"],
self.eval_batch_size), 0, 1).squeeze()
self.target_logits = np.swapaxes(self.signal([self.target_model],
self.handler,
self.audit_data_indices,
self.eval_batch_size), 0, 1).squeeze()

# Using Memorizationg boosting
if self.memorization:

# Prepare for memorization
org_audit_data_length = self.audit_dataset["data"].size
self.audit_dataset["data"] = self.audit_dataset["data"][mask] if self.online else self.audit_dataset["data"]
audit_data_labels = self.handler.get_labels(self.audit_dataset["data"])
org_audit_data_length = self.audit_data_indices.size
audit_data_labels = self.handler.get_labels(self.audit_data_indices)

logger.info("Running memorization")
memorization = Memorization(
Expand All @@ -185,7 +188,7 @@ def prepare_attack(self:Self)->None:
self.in_indices_masks,
self.shadow_models,
self.target_model,
self.audit_dataset["data"],
self.audit_data_indices,
audit_data_labels,
org_audit_data_length,
self.handler,
Expand Down Expand Up @@ -318,5 +321,5 @@ def run_attack(self:Self) -> MIAResult:
true_labels=true_labels,
predictions_proba=None,
signal_values=signal_values,
audit_indices=self.audit_dataset["data"],
audit_indices=self.audit_data_indices,
)
2 changes: 1 addition & 1 deletion leakpro/tests/mia_attacks/attacks/test_lira.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def test_lira_online_attack(image_handler:ImageInputHandler):
assert any(isnan(x) for x in lira_obj.in_member_signals) == False
assert any(isnan(x) for x in lira_obj.out_member_signals) == False

def test_lira_online_attack(image_handler:ImageInputHandler):
def test_lira_offline_attack(image_handler:ImageInputHandler):
# Set up for testing
audit_config = get_audit_config()
lira_params = audit_config.attack_list.lira
Expand Down
20 changes: 20 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
numpy
pandas
scipy
scikit-learn
matplotlib
seaborn
pillow
torch
torchmetrics
torchvision
dotmap
loguru
jinja2
tqdm
pyyaml
numba
pydantic
joblib
pytest
pytest-mock

0 comments on commit ba802d8

Please sign in to comment.