diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1379b99c2..37616dd38 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -7,6 +7,7 @@ on: paths: - ".github/workflows/*" - "pymc_experimental/**" + - "tests/**" - "setup.py" - "pyproject.toml" - "buildosx" @@ -20,7 +21,7 @@ jobs: os: [ubuntu-latest] python-version: ["3.10"] test-subset: - - pymc_experimental/tests + - tests fail-fast: false runs-on: ${{ matrix.os }} env: @@ -58,7 +59,7 @@ jobs: os: [windows-latest] python-version: ["3.12"] test-subset: - - pymc_experimental/tests + - tests fail-fast: false runs-on: ${{ matrix.os }} env: diff --git a/codecov.yml b/codecov.yml index 6e7041ca8..0a251b891 100644 --- a/codecov.yml +++ b/codecov.yml @@ -22,7 +22,7 @@ coverage: base: auto ignore: - - "pymc_experimental/tests/*" + - "tests/*" comment: layout: "reach, diff, flags, files" diff --git a/notebooks/SARMA Example.ipynb b/notebooks/SARMA Example.ipynb index 738afee30..33502eec4 100644 --- a/notebooks/SARMA Example.ipynb +++ b/notebooks/SARMA Example.ipynb @@ -2221,7 +2221,7 @@ ], "source": [ "airpass = pd.read_csv(\n", - " \"../pymc_experimental/tests/statespace/test_data/airpass.csv\",\n", + " \"../tests/statespace/test_data/airpass.csv\",\n", " parse_dates=True,\n", " date_format=\"%Y %b\",\n", " index_col=0,\n", diff --git a/notebooks/discrete_markov_chain.ipynb b/notebooks/discrete_markov_chain.ipynb index 8204b3f14..a629c7673 100644 --- a/notebooks/discrete_markov_chain.ipynb +++ b/notebooks/discrete_markov_chain.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "e1e16d35", "metadata": {}, "source": [ "# Discrete Markov Chain Distribution" @@ -10,6 +11,7 @@ { "cell_type": "code", "execution_count": 1, + "id": "cc15e4cd", "metadata": {}, "outputs": [], "source": [ @@ -20,8 +22,21 @@ { "cell_type": "code", "execution_count": 2, + "id": "7c983797", "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "ModuleNotFoundError", + "evalue": "No module named 'pymc_experimental'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[2], line 11\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mmatplotlib\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mpyplot\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mplt\u001b[39;00m\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mmatplotlib\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m ticker \u001b[38;5;28;01mas\u001b[39;00m mtick\n\u001b[0;32m---> 11\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mpymc_experimental\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mdistributions\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mtimeseries\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m DiscreteMarkovChain\n", + "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'pymc_experimental'" + ] + } + ], "source": [ "import arviz as az\n", "import numpy as np\n", @@ -38,6 +53,7 @@ }, { "cell_type": "markdown", + "id": "a36d5e56", "metadata": {}, "source": [ "## Demonstration of API " @@ -45,18 +61,10 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, + "id": "05d16797", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/ricardo/Documents/Projects/pymc-experimental/pymc_experimental/distributions/timeseries.py:159: UserWarning: Initial distribution not specified, defaulting to `Categorical.dist(p=pt.full((k_states, ), 1/k_states), shape=...)`. You can specify an init_dist manually to suppress this warning.\n", - " warnings.warn(\n" - ] - } - ], + "outputs": [], "source": [ "with pm.Model() as model:\n", " logit_P = pm.Normal(\"logit_P\", sigma=0.1, size=(3, 3))\n", @@ -66,6 +74,7 @@ { "cell_type": "code", "execution_count": 4, + "id": "911fc178", "metadata": {}, "outputs": [ { @@ -131,6 +140,7 @@ }, { "cell_type": "markdown", + "id": "5d74be4c", "metadata": {}, "source": [ "Dims of the output are `(batch, time)`." @@ -139,6 +149,7 @@ { "cell_type": "code", "execution_count": 5, + "id": "88acfab3", "metadata": {}, "outputs": [ { @@ -167,6 +178,7 @@ }, { "cell_type": "markdown", + "id": "2d67e342", "metadata": {}, "source": [ "## Parameter Recovery\n", @@ -177,6 +189,7 @@ { "cell_type": "code", "execution_count": 6, + "id": "33081e17", "metadata": {}, "outputs": [], "source": [ @@ -201,6 +214,7 @@ { "cell_type": "code", "execution_count": 7, + "id": "f92d551a", "metadata": {}, "outputs": [], "source": [ @@ -210,6 +224,7 @@ { "cell_type": "code", "execution_count": 8, + "id": "31751f8c", "metadata": { "scrolled": false }, @@ -287,6 +302,7 @@ { "cell_type": "code", "execution_count": 9, + "id": "87bdb1e0", "metadata": {}, "outputs": [ { @@ -306,6 +322,7 @@ }, { "cell_type": "markdown", + "id": "610b3596", "metadata": {}, "source": [ "## Hidden markov model\n", @@ -318,6 +335,7 @@ { "cell_type": "code", "execution_count": 10, + "id": "dc1f9e25", "metadata": {}, "outputs": [ { @@ -348,6 +366,7 @@ { "cell_type": "code", "execution_count": 11, + "id": "67f7e824", "metadata": {}, "outputs": [ { @@ -524,6 +543,7 @@ { "cell_type": "code", "execution_count": 12, + "id": "1b5d3444", "metadata": { "scrolled": false }, @@ -594,6 +614,7 @@ { "cell_type": "code", "execution_count": 13, + "id": "04de50a9", "metadata": {}, "outputs": [ { @@ -758,6 +779,7 @@ }, { "cell_type": "markdown", + "id": "44c1f25b", "metadata": {}, "source": [ "It's quite difficult to sample this model, so I use a very high `target_accept` and a very large number of draws. It seems to be hard to get a sufficient number of effective samples for rare rates without a lot of draws. " @@ -766,6 +788,7 @@ { "cell_type": "code", "execution_count": 14, + "id": "e382b4d4", "metadata": {}, "outputs": [ { @@ -848,6 +871,7 @@ }, { "cell_type": "markdown", + "id": "f778d068", "metadata": {}, "source": [ "### Post-Estimation Diagnostics\n", @@ -858,6 +882,7 @@ { "cell_type": "code", "execution_count": 15, + "id": "c45b726c", "metadata": {}, "outputs": [ { @@ -887,6 +912,7 @@ }, { "cell_type": "markdown", + "id": "b057e84c", "metadata": {}, "source": [ "...but the trace plots look great! " @@ -895,6 +921,7 @@ { "cell_type": "code", "execution_count": 16, + "id": "34bb9eb9", "metadata": {}, "outputs": [ { @@ -915,6 +942,7 @@ }, { "cell_type": "markdown", + "id": "65bb1c27", "metadata": {}, "source": [ "Even after 20,000 draws, we only have about 500 samples for the transition probabilities to the more rare state 1." @@ -923,6 +951,7 @@ { "cell_type": "code", "execution_count": 17, + "id": "2d050b4e", "metadata": {}, "outputs": [ { @@ -1133,6 +1162,7 @@ }, { "cell_type": "markdown", + "id": "aa017801", "metadata": {}, "source": [ "## Comparison with Statsmodels" @@ -1141,6 +1171,7 @@ { "cell_type": "code", "execution_count": 18, + "id": "8200aae3", "metadata": {}, "outputs": [], "source": [ @@ -1155,6 +1186,7 @@ { "cell_type": "code", "execution_count": 19, + "id": "febd7972", "metadata": {}, "outputs": [ { @@ -1196,6 +1228,7 @@ }, { "cell_type": "markdown", + "id": "5588a4f6", "metadata": {}, "source": [ "## Posterior Prediction" @@ -1204,6 +1237,7 @@ { "cell_type": "code", "execution_count": 20, + "id": "c4399802", "metadata": {}, "outputs": [ { @@ -1266,6 +1300,7 @@ { "cell_type": "code", "execution_count": 21, + "id": "47c8ce2a", "metadata": {}, "outputs": [], "source": [ @@ -1279,6 +1314,7 @@ { "cell_type": "code", "execution_count": 22, + "id": "9c047eb0", "metadata": {}, "outputs": [], "source": [ @@ -1288,6 +1324,7 @@ { "cell_type": "code", "execution_count": 23, + "id": "9a8b3055", "metadata": {}, "outputs": [ { @@ -1317,6 +1354,7 @@ }, { "cell_type": "markdown", + "id": "92eb9a8f", "metadata": {}, "source": [ "## Regime Inference\n", @@ -1327,6 +1365,7 @@ { "cell_type": "code", "execution_count": 24, + "id": "4487098b", "metadata": {}, "outputs": [ { @@ -1364,6 +1403,7 @@ { "cell_type": "code", "execution_count": null, + "id": "7f037fb9", "metadata": {}, "outputs": [], "source": [] @@ -1372,9 +1412,9 @@ "metadata": { "hide_input": false, "kernelspec": { - "display_name": "pymc-experimental", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "pymc-experimental" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -1386,7 +1426,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.6" + "version": "3.11.9" }, "toc": { "base_numbering": 1, diff --git a/pymc_experimental/distributions/timeseries.py b/pymc_experimental/distributions/timeseries.py index 91da141ac..02fb6b738 100644 --- a/pymc_experimental/distributions/timeseries.py +++ b/pymc_experimental/distributions/timeseries.py @@ -249,8 +249,8 @@ def discrete_mc_logp(op, values, P, steps, init_dist, state_rng, **kwargs): indexes = [value[..., i : -(n_lags - i) if n_lags != i else None] for i in range(n_lags + 1)] - mc_logprob = logp(init_dist, value[..., :n_lags]).sum(axis=-1) - mc_logprob += pt.log(P[tuple(indexes)]).sum(axis=-1) + mc_logprob = logp(init_dist, value[..., :n_lags]) + mc_logprob += pt.log(P[tuple(indexes)]) # We cannot leave any RV in the logp graph, even if just for an assert [init_dist_leading_dim] = constant_fold( diff --git a/pymc_experimental/model/marginal_model.py b/pymc_experimental/model/marginal_model.py index ead9a362b..da3bccf6f 100644 --- a/pymc_experimental/model/marginal_model.py +++ b/pymc_experimental/model/marginal_model.py @@ -3,6 +3,7 @@ import numpy as np import pymc +import pytensor import pytensor.tensor as pt from arviz import InferenceData, dict_to_dataset from pymc import SymbolicRandomVariable @@ -324,6 +325,7 @@ def recover_marginals( self, idata: InferenceData, var_names: Sequence[str] | None = None, + group: str = "posterior", return_samples: bool = True, extend_inferencedata: bool = True, random_seed: RandomState = None, @@ -344,6 +346,8 @@ def recover_marginals( List of variable names for which to compute posterior log-probabilities and samples. Defaults to all marginalized variables return_samples : bool, default True If True, also return samples of the marginalized variables + group: str, default "posterior" + Group from which to find samples. One of "posterior" or "prior" extend_inferencedata : bool, default True Whether to extend the original InferenceData or return a new one random_seed: int, array-like of int or SeedSequence, optional @@ -385,7 +389,7 @@ def recover_marginals( else: seeds = [None] * len(vars_to_recover) - posterior = idata.posterior + posterior = idata[group] # Remove Deterministics posterior_values = posterior[ @@ -406,7 +410,7 @@ def transform_input(inputs): rv_dict = {} rv_dims = {} for seed, marginalized_rv in zip(seeds, vars_to_recover): - supported_dists = (Bernoulli, Categorical, DiscreteUniform) + supported_dists = (Bernoulli, Categorical, DiscreteUniform, DiscreteMarkovChain) if not isinstance(marginalized_rv.owner.op, supported_dists): raise NotImplementedError( f"RV with distribution {marginalized_rv.owner.op} cannot be recovered. " @@ -418,6 +422,9 @@ def transform_input(inputs): m.unmarginalize([marginalized_rv]) dependent_vars = find_conditional_dependent_rvs(marginalized_rv, m.basic_RVs) joint_logps = m.logp(vars=[marginalized_rv] + dependent_vars, sum=False) + joint_logps = [ + pytensor.printing.Print(f"{x.name}")(x) for i, x in enumerate(joint_logps) + ] marginalized_value = m.rvs_to_values[marginalized_rv] other_values = [v for v in m.value_vars if v is not marginalized_value] diff --git a/pymc_experimental/tests/__init__.py b/tests/__init__.py similarity index 100% rename from pymc_experimental/tests/__init__.py rename to tests/__init__.py diff --git a/pymc_experimental/tests/distributions/__init__.py b/tests/distributions/__init__.py similarity index 100% rename from pymc_experimental/tests/distributions/__init__.py rename to tests/distributions/__init__.py diff --git a/pymc_experimental/tests/distributions/test_continuous.py b/tests/distributions/test_continuous.py similarity index 100% rename from pymc_experimental/tests/distributions/test_continuous.py rename to tests/distributions/test_continuous.py diff --git a/pymc_experimental/tests/distributions/test_discrete.py b/tests/distributions/test_discrete.py similarity index 100% rename from pymc_experimental/tests/distributions/test_discrete.py rename to tests/distributions/test_discrete.py diff --git a/pymc_experimental/tests/distributions/test_discrete_markov_chain.py b/tests/distributions/test_discrete_markov_chain.py similarity index 100% rename from pymc_experimental/tests/distributions/test_discrete_markov_chain.py rename to tests/distributions/test_discrete_markov_chain.py diff --git a/pymc_experimental/tests/distributions/test_multivariate.py b/tests/distributions/test_multivariate.py similarity index 100% rename from pymc_experimental/tests/distributions/test_multivariate.py rename to tests/distributions/test_multivariate.py diff --git a/pymc_experimental/tests/model/__init__.py b/tests/model/__init__.py similarity index 100% rename from pymc_experimental/tests/model/__init__.py rename to tests/model/__init__.py diff --git a/pymc_experimental/tests/model/test_marginal_model.py b/tests/model/test_marginal_model.py similarity index 96% rename from pymc_experimental/tests/model/test_marginal_model.py rename to tests/model/test_marginal_model.py index 31e38615a..447ae0a79 100644 --- a/pymc_experimental/tests/model/test_marginal_model.py +++ b/tests/model/test_marginal_model.py @@ -22,7 +22,7 @@ is_conditional_dependent, marginalize, ) -from pymc_experimental.tests.utils import equal_computations_up_to_root +from tests.utils import equal_computations_up_to_root @pytest.fixture @@ -707,6 +707,35 @@ def test_marginalized_hmm_normal_emission(batch_chain, batch_emission): np.testing.assert_allclose(logp_fn({f"emission": test_value}), expected_logp) +def test_unmarginalize_hmm_normal_emission(): + batch_chain = False + batch_emission = False + + if batch_chain and not batch_emission: + pytest.skip("Redundant implicit combination") + + with MarginalModel() as m: + P = [[0, 1], [1, 0]] + init_dist = pm.Categorical.dist(p=[1, 0]) + chain = DiscreteMarkovChain( + "chain", P=P, init_dist=init_dist, steps=3, shape=(3, 4) if batch_chain else None + ) + emission = pm.Normal( + "emission", mu=chain * 2 - 1, sigma=1e-1, shape=(3, 4) if batch_emission else None + ) + + m.marginalize([chain]) + + # logp_fn = m.compile_logp() + # + # test_value = np.array([-1, 1, -1, 1]) + # expected_logp = pm.logp(pm.Normal.dist(0, 1e-1), np.zeros_like(test_value)).sum().eval() + # if batch_emission: + # test_value = np.broadcast_to(test_value, (3, 4)) + # expected_logp *= 3 + # np.testing.assert_allclose(logp_fn({f"emission": test_value}), expected_logp) + + @pytest.mark.parametrize( "categorical_emission", [False, True], diff --git a/pymc_experimental/tests/model/test_model_api.py b/tests/model/test_model_api.py similarity index 100% rename from pymc_experimental/tests/model/test_model_api.py rename to tests/model/test_model_api.py diff --git a/pymc_experimental/tests/statespace/__init__.py b/tests/model/transforms/__init__.py similarity index 100% rename from pymc_experimental/tests/statespace/__init__.py rename to tests/model/transforms/__init__.py diff --git a/pymc_experimental/tests/model/transforms/test_autoreparam.py b/tests/model/transforms/test_autoreparam.py similarity index 100% rename from pymc_experimental/tests/model/transforms/test_autoreparam.py rename to tests/model/transforms/test_autoreparam.py diff --git a/pymc_experimental/tests/statespace/utilities/__init__.py b/tests/statespace/__init__.py similarity index 100% rename from pymc_experimental/tests/statespace/utilities/__init__.py rename to tests/statespace/__init__.py diff --git a/pymc_experimental/tests/statespace/test_SARIMAX.py b/tests/statespace/test_SARIMAX.py similarity index 98% rename from pymc_experimental/tests/statespace/test_SARIMAX.py rename to tests/statespace/test_SARIMAX.py index fc09a632b..fe9d8435e 100644 --- a/pymc_experimental/tests/statespace/test_SARIMAX.py +++ b/tests/statespace/test_SARIMAX.py @@ -17,10 +17,10 @@ SARIMAX_STATE_STRUCTURES, SHORT_NAME_TO_LONG, ) -from pymc_experimental.tests.statespace.utilities.shared_fixtures import ( # pylint: disable=unused-import +from tests.statespace.utilities.shared_fixtures import ( # pylint: disable=unused-import rng, ) -from pymc_experimental.tests.statespace.utilities.test_helpers import ( +from tests.statespace.utilities.test_helpers import ( load_nile_test_data, make_stationary_params, simulate_from_numpy_model, diff --git a/pymc_experimental/tests/statespace/test_VARMAX.py b/tests/statespace/test_VARMAX.py similarity index 96% rename from pymc_experimental/tests/statespace/test_VARMAX.py rename to tests/statespace/test_VARMAX.py index 2ca0b3635..43faebe8e 100644 --- a/pymc_experimental/tests/statespace/test_VARMAX.py +++ b/tests/statespace/test_VARMAX.py @@ -11,7 +11,7 @@ from pymc_experimental.statespace import BayesianVARMAX from pymc_experimental.statespace.utils.constants import SHORT_NAME_TO_LONG -from pymc_experimental.tests.statespace.utilities.shared_fixtures import ( # pylint: disable=unused-import +from tests.statespace.utilities.shared_fixtures import ( # pylint: disable=unused-import rng, ) @@ -25,7 +25,7 @@ @pytest.fixture(scope="session") def data(): df = pd.read_csv( - "pymc_experimental/tests/statespace/test_data/statsmodels_macrodata_processed.csv", + "tests/statespace/test_data/statsmodels_macrodata_processed.csv", index_col=0, parse_dates=True, ).astype(floatX) diff --git a/pymc_experimental/tests/statespace/test_coord_assignment.py b/tests/statespace/test_coord_assignment.py similarity index 97% rename from pymc_experimental/tests/statespace/test_coord_assignment.py rename to tests/statespace/test_coord_assignment.py index eadbc85f9..e7531d925 100644 --- a/pymc_experimental/tests/statespace/test_coord_assignment.py +++ b/tests/statespace/test_coord_assignment.py @@ -18,9 +18,7 @@ NO_FREQ_INFO_WARNING, NO_TIME_INDEX_WARNING, ) -from pymc_experimental.tests.statespace.utilities.test_helpers import ( - load_nile_test_data, -) +from tests.statespace.utilities.test_helpers import load_nile_test_data function_names = ["pandas_date_freq", "pandas_date_nofreq", "pandas_nodate", "numpy", "pytensor"] expected_warning = [ diff --git a/pymc_experimental/tests/statespace/test_data/airpass.csv b/tests/statespace/test_data/airpass.csv similarity index 100% rename from pymc_experimental/tests/statespace/test_data/airpass.csv rename to tests/statespace/test_data/airpass.csv diff --git a/pymc_experimental/tests/statespace/test_data/airpassangers.csv b/tests/statespace/test_data/airpassangers.csv similarity index 100% rename from pymc_experimental/tests/statespace/test_data/airpassangers.csv rename to tests/statespace/test_data/airpassangers.csv diff --git a/pymc_experimental/tests/statespace/test_data/nile.csv b/tests/statespace/test_data/nile.csv similarity index 100% rename from pymc_experimental/tests/statespace/test_data/nile.csv rename to tests/statespace/test_data/nile.csv diff --git a/pymc_experimental/tests/statespace/test_data/statsmodels_macrodata_processed.csv b/tests/statespace/test_data/statsmodels_macrodata_processed.csv similarity index 100% rename from pymc_experimental/tests/statespace/test_data/statsmodels_macrodata_processed.csv rename to tests/statespace/test_data/statsmodels_macrodata_processed.csv diff --git a/pymc_experimental/tests/statespace/test_distributions.py b/tests/statespace/test_distributions.py similarity index 97% rename from pymc_experimental/tests/statespace/test_distributions.py rename to tests/statespace/test_distributions.py index 441b255e9..123265b83 100644 --- a/pymc_experimental/tests/statespace/test_distributions.py +++ b/tests/statespace/test_distributions.py @@ -13,10 +13,10 @@ OBS_STATE_DIM, TIME_DIM, ) -from pymc_experimental.tests.statespace.utilities.shared_fixtures import ( # pylint: disable=unused-import +from tests.statespace.utilities.shared_fixtures import ( # pylint: disable=unused-import rng, ) -from pymc_experimental.tests.statespace.utilities.test_helpers import ( +from tests.statespace.utilities.test_helpers import ( delete_rvs_from_model, fast_eval, load_nile_test_data, diff --git a/pymc_experimental/tests/statespace/test_kalman_filter.py b/tests/statespace/test_kalman_filter.py similarity index 98% rename from pymc_experimental/tests/statespace/test_kalman_filter.py rename to tests/statespace/test_kalman_filter.py index 0d064cd86..dbc31cda8 100644 --- a/pymc_experimental/tests/statespace/test_kalman_filter.py +++ b/tests/statespace/test_kalman_filter.py @@ -13,10 +13,10 @@ UnivariateFilter, ) from pymc_experimental.statespace.filters.kalman_filter import BaseFilter -from pymc_experimental.tests.statespace.utilities.shared_fixtures import ( # pylint: disable=unused-import +from tests.statespace.utilities.shared_fixtures import ( # pylint: disable=unused-import rng, ) -from pymc_experimental.tests.statespace.utilities.test_helpers import ( +from tests.statespace.utilities.test_helpers import ( get_expected_shape, get_sm_state_from_output_name, initialize_filter, diff --git a/pymc_experimental/tests/statespace/test_representation.py b/tests/statespace/test_representation.py similarity index 96% rename from pymc_experimental/tests/statespace/test_representation.py rename to tests/statespace/test_representation.py index f6cb06a36..10388d94d 100644 --- a/pymc_experimental/tests/statespace/test_representation.py +++ b/tests/statespace/test_representation.py @@ -6,11 +6,8 @@ from numpy.testing import assert_allclose from pymc_experimental.statespace.core.representation import PytensorRepresentation -from pymc_experimental.tests.statespace.utilities.shared_fixtures import TEST_SEED -from pymc_experimental.tests.statespace.utilities.test_helpers import ( - fast_eval, - make_test_inputs, -) +from tests.statespace.utilities.shared_fixtures import TEST_SEED +from tests.statespace.utilities.test_helpers import fast_eval, make_test_inputs floatX = pytensor.config.floatX atol = 1e-12 if floatX == "float64" else 1e-6 diff --git a/pymc_experimental/tests/statespace/test_statespace.py b/tests/statespace/test_statespace.py similarity index 98% rename from pymc_experimental/tests/statespace/test_statespace.py rename to tests/statespace/test_statespace.py index e60378df3..8b61cb515 100644 --- a/pymc_experimental/tests/statespace/test_statespace.py +++ b/tests/statespace/test_statespace.py @@ -13,10 +13,10 @@ MATRIX_NAMES, SMOOTHER_OUTPUT_NAMES, ) -from pymc_experimental.tests.statespace.utilities.shared_fixtures import ( # pylint: disable=unused-import +from tests.statespace.utilities.shared_fixtures import ( # pylint: disable=unused-import rng, ) -from pymc_experimental.tests.statespace.utilities.test_helpers import ( +from tests.statespace.utilities.test_helpers import ( fast_eval, load_nile_test_data, make_test_inputs, diff --git a/pymc_experimental/tests/statespace/test_structural.py b/tests/statespace/test_structural.py similarity index 99% rename from pymc_experimental/tests/statespace/test_structural.py rename to tests/statespace/test_structural.py index 63d2c452d..af8a9fe81 100644 --- a/pymc_experimental/tests/statespace/test_structural.py +++ b/tests/statespace/test_structural.py @@ -24,10 +24,10 @@ SHOCK_DIM, SHORT_NAME_TO_LONG, ) -from pymc_experimental.tests.statespace.utilities.shared_fixtures import ( # pylint: disable=unused-import +from tests.statespace.utilities.shared_fixtures import ( # pylint: disable=unused-import rng, ) -from pymc_experimental.tests.statespace.utilities.test_helpers import ( +from tests.statespace.utilities.test_helpers import ( assert_pattern_repeats, simulate_from_numpy_model, unpack_symbolic_matrices_with_params, diff --git a/tests/statespace/utilities/__init__.py b/tests/statespace/utilities/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pymc_experimental/tests/statespace/utilities/shared_fixtures.py b/tests/statespace/utilities/shared_fixtures.py similarity index 100% rename from pymc_experimental/tests/statespace/utilities/shared_fixtures.py rename to tests/statespace/utilities/shared_fixtures.py diff --git a/pymc_experimental/tests/statespace/utilities/statsmodel_local_level.py b/tests/statespace/utilities/statsmodel_local_level.py similarity index 100% rename from pymc_experimental/tests/statespace/utilities/statsmodel_local_level.py rename to tests/statespace/utilities/statsmodel_local_level.py diff --git a/pymc_experimental/tests/statespace/utilities/test_helpers.py b/tests/statespace/utilities/test_helpers.py similarity index 97% rename from pymc_experimental/tests/statespace/utilities/test_helpers.py rename to tests/statespace/utilities/test_helpers.py index de5999fb6..3e8c7593d 100644 --- a/pymc_experimental/tests/statespace/utilities/test_helpers.py +++ b/tests/statespace/utilities/test_helpers.py @@ -11,9 +11,7 @@ MATRIX_NAMES, SHORT_NAME_TO_LONG, ) -from pymc_experimental.tests.statespace.utilities.statsmodel_local_level import ( - LocalLinearTrend, -) +from tests.statespace.utilities.statsmodel_local_level import LocalLinearTrend floatX = pytensor.config.floatX @@ -21,7 +19,7 @@ def load_nile_test_data(): from importlib.metadata import version - nile = pd.read_csv("pymc_experimental/tests/statespace/test_data/nile.csv", dtype={"x": floatX}) + nile = pd.read_csv("tests/statespace/test_data/nile.csv", dtype={"x": floatX}) major, minor, rev = map(int, version("pandas").split(".")) if major >= 2 and minor >= 2 and rev >= 0: freq_str = "YS-JAN" diff --git a/pymc_experimental/tests/test_blackjax_smc.py b/tests/test_blackjax_smc.py similarity index 100% rename from pymc_experimental/tests/test_blackjax_smc.py rename to tests/test_blackjax_smc.py diff --git a/pymc_experimental/tests/test_histogram_approximation.py b/tests/test_histogram_approximation.py similarity index 100% rename from pymc_experimental/tests/test_histogram_approximation.py rename to tests/test_histogram_approximation.py diff --git a/pymc_experimental/tests/test_linearmodel.py b/tests/test_linearmodel.py similarity index 100% rename from pymc_experimental/tests/test_linearmodel.py rename to tests/test_linearmodel.py diff --git a/pymc_experimental/tests/test_model_builder.py b/tests/test_model_builder.py similarity index 100% rename from pymc_experimental/tests/test_model_builder.py rename to tests/test_model_builder.py diff --git a/pymc_experimental/tests/test_pathfinder.py b/tests/test_pathfinder.py similarity index 100% rename from pymc_experimental/tests/test_pathfinder.py rename to tests/test_pathfinder.py diff --git a/pymc_experimental/tests/test_pivoted_cholesky.py b/tests/test_pivoted_cholesky.py similarity index 100% rename from pymc_experimental/tests/test_pivoted_cholesky.py rename to tests/test_pivoted_cholesky.py diff --git a/pymc_experimental/tests/test_prior_from_trace.py b/tests/test_prior_from_trace.py similarity index 100% rename from pymc_experimental/tests/test_prior_from_trace.py rename to tests/test_prior_from_trace.py diff --git a/pymc_experimental/tests/test_splines.py b/tests/test_splines.py similarity index 100% rename from pymc_experimental/tests/test_splines.py rename to tests/test_splines.py diff --git a/pymc_experimental/tests/utils.py b/tests/utils.py similarity index 100% rename from pymc_experimental/tests/utils.py rename to tests/utils.py