Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

removing dependency on setting the Keras backend #79

Merged
merged 5 commits into from
Sep 2, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions benchmarking/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,6 @@

from gpflux.architectures import Config, build_constant_input_dim_deep_gp

tf.keras.backend.set_floatx("float64")

Comment on lines -32 to -33
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

didnt try to run this as it needed additional packages

THIS_DIR = Path(__file__).parent
LOGS = THIS_DIR / "tmp"
EXPERIMENT = Experiment("UCI")
Expand Down
5 changes: 1 addition & 4 deletions docs/notebooks/deep_cde.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,7 @@
"from tqdm import tqdm\n",
"\n",
"import tensorflow_probability as tfp\n",
"from sklearn.neighbors import KernelDensity\n",
"\n",
"\n",
"tf.keras.backend.set_floatx(\"float64\")"
"from sklearn.neighbors import KernelDensity\n"
],
"outputs": [],
"metadata": {}
Expand Down
1 change: 0 additions & 1 deletion docs/notebooks/efficient_sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@
from gpflux.sampling import KernelWithFeatureDecomposition
from gpflux.models.deep_gp import sample_dgp

tf.keras.backend.set_floatx("float64")

# %% [markdown]
"""
Expand Down
1 change: 0 additions & 1 deletion docs/notebooks/gpflux_features.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@
import pandas as pd
import tensorflow as tf

tf.keras.backend.set_floatx("float64") # we want to carry out GP calculations in 64 bit
tf.get_logger().setLevel("INFO")


Expand Down
1 change: 0 additions & 1 deletion docs/notebooks/gpflux_with_keras_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@

from gpflow.config import default_float

tf.keras.backend.set_floatx("float64")

# %% [markdown]
"""
Expand Down
1 change: 0 additions & 1 deletion docs/notebooks/intro.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
import pandas as pd
import tensorflow as tf

tf.keras.backend.set_floatx("float64")
hstojic marked this conversation as resolved.
Show resolved Hide resolved
tf.get_logger().setLevel("INFO")

# %% [markdown]
Expand Down
2 changes: 0 additions & 2 deletions docs/notebooks/keras_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,6 @@

import matplotlib.pyplot as plt

# %%
tf.keras.backend.set_floatx("float64")

# %%
# %matplotlib inline
Expand Down
7 changes: 5 additions & 2 deletions gpflux/layers/latent_variable_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,10 @@ def __init__(
posterior distribution; see :attr:`encoder`.
:param compositor: A layer that combines layer inputs and latent variable
samples into a single tensor; see :attr:`compositor`. If you do not specify a value for
this parameter, the default is ``tf.keras.layers.Concatenate(axis=-1)``.
this parameter, the default is
``tf.keras.layers.Concatenate(axis=-1,dtype=default_float())``. Note that you should
hstojic marked this conversation as resolved.
Show resolved Hide resolved
set ``dtype`` of the layer to GPflow default dtype as in
hstojic marked this conversation as resolved.
Show resolved Hide resolved
:meth:`~gpflow.default_float()`.
:param name: The name of this layer (passed through to `tf.keras.layers.Layer`).
"""

Expand All @@ -103,7 +106,7 @@ def __init__(
self.distribution_class = prior.__class__
self.encoder = encoder
self.compositor = (
compositor if compositor is not None else tf.keras.layers.Concatenate(axis=-1)
compositor if compositor is not None else tf.keras.layers.Concatenate(axis=-1, dtype=default_float())
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this needed to be passed an explicit dtype as well

)

def call(
Expand Down
4 changes: 2 additions & 2 deletions gpflux/models/deep_gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ def __init__(
If you do not specify a value for this parameter explicitly, it is automatically
detected from the :attr:`~gpflux.layers.GPLayer.num_data` attribute in the GP layers.
"""
self.inputs = tf.keras.Input((input_dim,), name="inputs")
self.targets = tf.keras.Input((target_dim,), name="targets")
self.inputs = tf.keras.Input((input_dim,), dtype=tf.float64, name="inputs")
self.targets = tf.keras.Input((target_dim,), dtype=tf.float64, name="targets")
hstojic marked this conversation as resolved.
Show resolved Hide resolved
self.f_layers = f_layers
if isinstance(likelihood, gpflow.likelihoods.Likelihood):
self.likelihood_layer = LikelihoodLayer(likelihood)
Expand Down
2 changes: 0 additions & 2 deletions tests/gpflux/architectures/test_constant_input_dim_deep_gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
from gpflux.architectures import Config, build_constant_input_dim_deep_gp
from gpflux.helpers import make_dataclass_from_class

tf.keras.backend.set_floatx("float64")


class DemoConfig:
num_inducing = 7
Expand Down
1 change: 0 additions & 1 deletion tests/gpflux/layers/test_latent_variable_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
from gpflux.encoders import DirectlyParameterizedNormalDiag
from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer

tf.keras.backend.set_floatx("float64")

############
# Utilities
Expand Down
2 changes: 0 additions & 2 deletions tests/gpflux/models/test_bayesian_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,6 @@
from gpflux.layers import LatentVariableLayer, LikelihoodLayer
from tests.integration.test_latent_variable_integration import build_gp_layers # noqa: F401

tf.keras.backend.set_floatx("float64")

MAXITER = int(80e3)
PLOTTER_INTERVAL = 60

Expand Down
1 change: 0 additions & 1 deletion tests/gpflux/models/test_deep_gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,6 @@ def plotter(*args, **kwargs):


def run_demo(maxiter=int(80e3), plotter_interval=60):
tf.keras.backend.set_floatx("float64")
input_dim = 2
num_data = 1000
data = setup_dataset(input_dim, num_data)
Expand Down
2 changes: 0 additions & 2 deletions tests/gpflux/test_losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,6 @@
from gpflux.layers import LikelihoodLayer
from gpflux.losses import LikelihoodLoss

tf.keras.backend.set_floatx("float64")


def test_likelihood_layer_and_likelihood_loss_give_equal_results():
np.random.seed(123)
Expand Down
3 changes: 0 additions & 3 deletions tests/integration/test_compilation.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,6 @@
from gpflux.losses import LikelihoodLoss
from gpflux.models import DeepGP

tf.keras.backend.set_floatx("float64")


#########################################
# Helpers
#########################################
Expand Down
2 changes: 0 additions & 2 deletions tests/integration/test_latent_variable_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@
from gpflux.layers import GPLayer, LatentVariableLayer, LikelihoodLayer
from gpflux.models import DeepGP

tf.keras.backend.set_floatx("float64")

############
# Utilities
############
Expand Down
18 changes: 8 additions & 10 deletions tests/integration/test_svgp_equivalence.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,6 @@

import gpflux

tf.keras.backend.set_floatx("float64")


class LogPrior_ELBO_SVGP(gpflow.models.SVGP):
"""
Expand Down Expand Up @@ -264,15 +262,15 @@ def optimization_step():


@pytest.mark.parametrize(
"svgp_fitter, sldgp_fitter",
"svgp_fitter, sldgp_fitter, tol_kw",
[
(fit_adam, fit_adam),
(fit_adam, keras_fit_adam),
(fit_natgrad, fit_natgrad),
(fit_natgrad, keras_fit_natgrad),
(fit_adam, fit_adam, {}),
(fit_adam, keras_fit_adam, {}),
(fit_natgrad, fit_natgrad, {}),
(fit_natgrad, keras_fit_natgrad, dict(atol=1e-7)),
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this test required lowering tolerance slightly...

],
)
def test_svgp_equivalence_with_sldgp(svgp_fitter, sldgp_fitter, maxiter=20):
def test_svgp_equivalence_with_sldgp(svgp_fitter, sldgp_fitter, tol_kw, maxiter=20):
data = load_data()

svgp = create_gpflow_svgp(*make_kernel_likelihood_iv())
Expand All @@ -281,14 +279,14 @@ def test_svgp_equivalence_with_sldgp(svgp_fitter, sldgp_fitter, maxiter=20):
sldgp = create_gpflux_sldgp(*make_kernel_likelihood_iv(), get_num_data(data))
sldgp_fitter(sldgp, data, maxiter=maxiter)

assert_equivalence(svgp, sldgp, data)
assert_equivalence(svgp, sldgp, data, **tol_kw)


@pytest.mark.parametrize(
"svgp_fitter, keras_fitter, tol_kw",
[
(fit_adam, _keras_fit_adam, {}),
(fit_natgrad, _keras_fit_natgrad, dict(atol=1e-8)),
(fit_natgrad, _keras_fit_natgrad, dict(atol=1e-6)),
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this one as well... seems like there was an issue with this one already, so I assumed it might be ok?
but not sure where the small differences arise really, perhaps keras is still somewhere using float32, not sure how exactly to check that - any idea?

],
)
def test_svgp_equivalence_with_keras_sequential(svgp_fitter, keras_fitter, tol_kw, maxiter=10):
Expand Down