Skip to content
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 13 additions & 3 deletions frontend/summary/dataPivot/DataPivot.js
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,22 @@ class DataPivot {
}

static get_object(pk, callback) {
const url = `/summary/api/data_pivot/${pk}/`;
const url = `/summary/api/data_pivot/${pk}/`,
handleError = err => {
$("#loading_div").hide();
handleVisualError(err, $("#dp_display"));
};

fetch(url, h.fetchGet)
.then(d => d.json())
.then(d => {
fetch(d.data_url, h.fetchGet)
.then(resp => {
if (!resp.ok) {
throw Error(`Invalid server response: ${resp.status}`);
}
return resp;
})
.then(d => d.text())
.then(data => d3.tsvParse(data))
.then(data => {
Expand All @@ -58,9 +68,9 @@ class DataPivot {
callback(dp);
}
})
.catch(err => handleVisualError(err, null));
.catch(handleError);
})
.catch(err => handleVisualError(err, null));
.catch(handleError);
}

static displayAsModal(id) {
Expand Down
57 changes: 15 additions & 42 deletions hawc/apps/animal/exports.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from ..common.exports import Exporter, ModelExport
from ..common.helper import FlatFileExporter, cleanHTML
from ..common.models import sql_display, sql_format, str_m2m
from ..materialized.models import FinalRiskOfBiasScore
from ..materialized.exports import get_final_score_df
from ..study.exports import StudyExport
from . import constants, models

Expand Down Expand Up @@ -688,21 +688,18 @@ def _func(group_df: pd.DataFrame) -> pd.Series:
.reset_index(drop=True)
)

def handle_treatment_period(self, df: pd.DataFrame):
txt = df["experiment-type_display"].str.lower()
txt_index = txt.str.find("(")
txt_updated = (
txt.to_frame(name="txt")
.join(txt_index.to_frame(name="txt_index"))
.apply(
lambda x: x["txt"] if x["txt_index"] < 0 else x["txt"][: x["txt_index"]],
axis="columns",
result_type="reduce",
)
).astype(str)
df["treatment period"] = (
txt_updated + " (" + df["dosing_regime-duration_exposure_text"]
).where(df["dosing_regime-duration_exposure_text"].str.len() > 0) + ")"
def handle_treatment_period(self, df: pd.DataFrame) -> pd.DataFrame:
def _calc(row):
txt = row["experiment-type_display"].lower()
if txt.find("(") >= 0: # TODO - remove extra space after confirming changes
txt = txt[: txt.find("(")]

if row["dosing_regime-duration_exposure_text"]:
txt = f"{txt} ({row['dosing_regime-duration_exposure_text']})"

return txt

df["treatment period"] = df.apply(_calc, axis=1, result_type="expand")
return df

def handle_dose_groups(self, df: pd.DataFrame) -> pd.DataFrame:
Expand Down Expand Up @@ -810,19 +807,7 @@ def build_df(self) -> pd.DataFrame:
return df
if obj := self.queryset.first():
endpoint_ids = list(df["endpoint-id"].unique())
rob_headers, rob_data = FinalRiskOfBiasScore.get_dp_export(
obj.assessment_id,
endpoint_ids,
"animal",
)
rob_df = pd.DataFrame(
data=[
[rob_data[(endpoint_id, metric_id)] for metric_id in rob_headers.keys()]
for endpoint_id in endpoint_ids
],
columns=list(rob_headers.values()),
index=endpoint_ids,
)
rob_df = get_final_score_df(obj.assessment_id, endpoint_ids, "animal")
df = df.join(rob_df, on="endpoint-id")

df["route"] = df["dosing_regime-route_of_exposure_display"].str.lower()
Expand Down Expand Up @@ -1116,19 +1101,7 @@ def build_df(self) -> pd.DataFrame:
return df
if obj := self.queryset.first():
endpoint_ids = list(df["endpoint-id"].unique())
rob_headers, rob_data = FinalRiskOfBiasScore.get_dp_export(
obj.assessment_id,
endpoint_ids,
"animal",
)
rob_df = pd.DataFrame(
data=[
[rob_data[(endpoint_id, metric_id)] for metric_id in rob_headers.keys()]
for endpoint_id in endpoint_ids
],
columns=list(rob_headers.values()),
index=endpoint_ids,
)
rob_df = get_final_score_df(obj.assessment_id, endpoint_ids, "animal")
df = df.join(rob_df, on="endpoint-id")

df["route"] = df["dosing_regime-route_of_exposure_display"].str.lower()
Expand Down
13 changes: 13 additions & 0 deletions hawc/apps/common/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -587,3 +587,16 @@ def get_current_request() -> HttpRequest:
def get_current_user():
"""Returns the current request user"""
return get_current_request().user


def unique_text_list(items: list[str]) -> list[str]:
"""Return a list of unique items in a text list"""
items = items.copy()
duplicates = {}
for i, item in enumerate(items):
if item in duplicates:
duplicates[item] += 1
items[i] = f"{item} ({duplicates[item]})"
else:
duplicates[item] = 1
return items
55 changes: 37 additions & 18 deletions hawc/apps/epi/exports.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,12 @@

import pandas as pd
from django.db.models import Case, Q, When
from scipy.stats import t

from ..common.exports import Exporter, ModelExport
from ..common.helper import FlatFileExporter
from ..common.models import sql_display, sql_format, str_m2m
from ..materialized.models import FinalRiskOfBiasScore
from ..materialized.exports import get_final_score_df
from ..study.exports import StudyExport
from . import constants, models

Expand Down Expand Up @@ -460,6 +461,39 @@ def build_modules(self) -> list[ModelExport]:


class OutcomeDataPivot(FlatFileExporter):
def _add_ci(self, df: pd.DataFrame) -> pd.DataFrame:
# if CI are not reported, calculate from mean/variance estimates. This code is identical
# to `GroupResult.getConfidenceIntervals`, but applied to this data frame
def _calc_cis(row):
if (
row["result_group-lower_ci"] is None
and row["result_group-upper_ci"] is None
and row["result_group-n"] is not None
and row["result_group-estimate"] is not None
and row["result_group-variance"] is not None
and row["result_group-n"] > 0
):
n = row["result_group-n"]
est = row["result_group-estimate"]
var = row["result_group-variance"]
z = t.ppf(0.975, max(n - 1, 1))
change = None

if row["result-variance_type"] == "SD":
change = z * var / math.sqrt(n)
elif row["result-variance_type"] in ("SE", "SEM"):
change = z * var

if change is not None:
return est - change, est + change

return row["result_group-lower_ci"], row["result_group-upper_ci"]

df[["result_group-lower_ci", "result_group-upper_ci"]] = df.apply(
_calc_cis, axis=1, result_type="expand"
)
return df

def _add_percent_control(self, df: pd.DataFrame) -> pd.DataFrame:
def _get_stdev(x: pd.Series):
return models.GroupResult.stdev(
Expand Down Expand Up @@ -515,19 +549,7 @@ def build_df(self) -> pd.DataFrame:
df = EpiDataPivotExporter().get_df(self.queryset.order_by("id", "results__results"))
if obj := self.queryset.first():
outcome_ids = list(df["outcome-id"].unique())
rob_headers, rob_data = FinalRiskOfBiasScore.get_dp_export(
obj.assessment_id,
outcome_ids,
"epi",
)
rob_df = pd.DataFrame(
data=[
[rob_data[(outcome_id, metric_id)] for metric_id in rob_headers.keys()]
for outcome_id in outcome_ids
],
columns=list(rob_headers.values()),
index=outcome_ids,
)
rob_df = get_final_score_df(obj.assessment_id, outcome_ids, "epi")
df = df.join(rob_df, on="outcome-id")

df["Reference/Exposure group"] = (
Expand Down Expand Up @@ -557,6 +579,7 @@ def build_df(self) -> pd.DataFrame:
)
df = df.drop(columns="result_group-p_value_qualifier")

df = self._add_ci(df)
df = self._add_percent_control(df)

df = df.rename(
Expand All @@ -578,10 +601,6 @@ def build_df(self) -> pd.DataFrame:
"outcome-diagnostic": "diagnostic",
"outcome-age_of_measurement": "age of outcome measurement",
"outcome-effects": "tags",
}
)
df = df.rename(
columns={
"cs-id": "comparison set id",
"cs-name": "comparison set name",
"exposure-id": "exposure id",
Expand Down
63 changes: 16 additions & 47 deletions hawc/apps/invitro/exports.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from ..common.exports import Exporter, ModelExport
from ..common.helper import FlatFileExporter, df_move_column
from ..common.models import sql_display, str_m2m
from ..materialized.models import FinalRiskOfBiasScore
from ..materialized.exports import get_final_score_df
from ..study.exports import StudyExport
from . import constants, models

Expand Down Expand Up @@ -36,6 +36,17 @@ def assessment_categories(assessment_id: int) -> pd.DataFrame:
return df2


def handle_categories(df: pd.DataFrame, assessment_id: int) -> pd.DataFrame:
category_df = assessment_categories(assessment_id)
df["iv_endpoint-category_id"] = df["iv_endpoint-category_id"].astype("Int64")
df2 = df.merge(category_df, left_on="iv_endpoint-category_id", right_index=True, how="left")
if "Category 1" in df2.columns:
df2 = df_move_column(
df2, "Category 1", "iv_endpoint-category_id", n_cols=category_df.shape[1]
)
return df2.drop(columns=["iv_endpoint-category_id"])


class IVChemicalExport(ModelExport):
def get_value_map(self):
return {
Expand Down Expand Up @@ -310,15 +321,6 @@ def _func(group_df: pd.DataFrame) -> pd.DataFrame:
.reset_index(drop=True)
)

def handle_categories(self, df: pd.DataFrame) -> pd.DataFrame:
category_df = assessment_categories(self.kwargs["assessment"].id)
df2 = df.merge(category_df, left_on="iv_endpoint-category_id", right_index=True, how="left")
if "Category 1" in df2.columns:
df2 = df_move_column(
df2, "Category 1", "iv_endpoint-category_id", n_cols=category_df.shape[1]
)
return df2

def build_df(self) -> pd.DataFrame:
df = InvitroExporter().get_df(
self.queryset.select_related(
Expand All @@ -329,26 +331,14 @@ def build_df(self) -> pd.DataFrame:
)
if obj := self.queryset.first():
study_ids = list(df["study-id"].unique())
rob_headers, rob_data = FinalRiskOfBiasScore.get_dp_export(
obj.assessment_id,
study_ids,
"invitro",
)
rob_df = pd.DataFrame(
data=[
[rob_data[(study_id, metric_id)] for metric_id in rob_headers.keys()]
for study_id in study_ids
],
columns=list(rob_headers.values()),
index=study_ids,
)
rob_df = get_final_score_df(obj.assessment_id, study_ids, "invitro")
df = df.join(rob_df, on="study-id")

df["key"] = df["iv_endpoint-id"]

df = self.handle_dose_groups(df)
df = self.handle_benchmarks(df)
df = self.handle_categories(df)
df = handle_categories(df, self.kwargs["assessment"].id)

df = df.rename(
columns={
Expand Down Expand Up @@ -489,15 +479,6 @@ def __func(row: pd.Series) -> pd.Series:
.reset_index(drop=True)
)

def handle_categories(self, df: pd.DataFrame) -> pd.DataFrame:
category_df = assessment_categories(self.kwargs["assessment"].id)
df2 = df.merge(category_df, left_on="iv_endpoint-category_id", right_index=True, how="left")
if "Category 1" in df2.columns:
df2 = df_move_column(
df2, "Category 1", "iv_endpoint-category_id", n_cols=category_df.shape[1]
)
return df2

def build_df(self) -> pd.DataFrame:
df = InvitroGroupExporter().get_df(
self.queryset.select_related(
Expand All @@ -509,27 +490,15 @@ def build_df(self) -> pd.DataFrame:
)
if obj := self.queryset.first():
study_ids = list(df["study-id"].unique())
rob_headers, rob_data = FinalRiskOfBiasScore.get_dp_export(
obj.assessment_id,
study_ids,
"invitro",
)
rob_df = pd.DataFrame(
data=[
[rob_data[(study_id, metric_id)] for metric_id in rob_headers.keys()]
for study_id in study_ids
],
columns=list(rob_headers.values()),
index=study_ids,
)
rob_df = get_final_score_df(obj.assessment_id, study_ids, "invitro")
df = df.join(rob_df, on="study-id")

df["key"] = df["iv_endpoint_group-id"]
df = df.drop(columns=["iv_endpoint_group-id"])

df = self.handle_stdev(df)
df = self.handle_dose_groups(df)
df = self.handle_categories(df)
df = handle_categories(df, self.kwargs["assessment"].id)

df["iv_endpoint_group-difference_control"] = df["iv_endpoint_group-difference_control"].map(
models.IVEndpointGroup.DIFFERENCE_CONTROL_SYMBOLS
Expand Down
13 changes: 13 additions & 0 deletions hawc/apps/materialized/exports.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import pandas as pd

from ..common.helper import unique_text_list
from .models import FinalRiskOfBiasScore


def get_final_score_df(assessment_id: int, ids: list[int], model: str) -> pd.DataFrame:
rob_headers, rob_data = FinalRiskOfBiasScore.get_dp_export(assessment_id, ids, model)
return pd.DataFrame(
data=[[rob_data[(id, metric_id)] for metric_id in rob_headers.keys()] for id in ids],
columns=unique_text_list(list(rob_headers.values())),
index=ids,
)
1 change: 1 addition & 0 deletions hawc/main/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@
path("epi-meta/api/", include(hawc.apps.epimeta.urls.router.urls)),
path("epidemiology/api/", include(hawc.apps.epiv2.urls.router.urls)),
path("in-vitro/api/", include(hawc.apps.invitro.urls.router.urls)),
path("udf/api/", include(hawc.apps.udf.urls.router.urls)),
path("lit/api/", include(hawc.apps.lit.urls.router.urls)),
path("mgmt/api/", include(hawc.apps.mgmt.urls.router.urls)),
path("rob/api/", include(hawc.apps.riskofbias.urls.router.urls)),
Expand Down
1 change: 0 additions & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,6 @@ def vcr_cassette_dir(request):


@pytest.fixture
@pytest.mark.django_db
def pm_user():
return get_user_model().objects.get(email="[email protected]")

Expand Down
Loading