Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

GP Gradient Computation #314

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions src/limbo/kernel/exp.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,17 @@ namespace limbo {
return grad;
}

Eigen::MatrixXd gradient_input(const Eigen::VectorXd& x, const std::vector<Eigen::VectorXd>& X) const
{
double l_sq = _l * _l;
Eigen::MatrixXd g(X.size(), x.size());
for (size_t i = 0; i < X.size(); i++) {
g.row(i) = -1. / l_sq * (x - X[i]) * this->operator()(x, X[i]);
}

return g;
}

protected:
double _sf2, _l;

Expand Down
7 changes: 7 additions & 0 deletions src/limbo/kernel/kernel.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@
#ifndef LIMBO_KERNEL_KERNEL_HPP
#define LIMBO_KERNEL_KERNEL_HPP

#include <vector>

#include <Eigen/Core>

#include <limbo/tools/macros.hpp>
Expand Down Expand Up @@ -95,6 +97,11 @@ namespace limbo {
return g;
}

Eigen::MatrixXd grad_input(const Eigen::VectorXd& x, const std::vector<Eigen::VectorXd>& X) const
{
return static_cast<const Kernel*>(this)->gradient_input(x, X);
}

// Get the hyper parameters size
size_t h_params_size() const
{
Expand Down
30 changes: 30 additions & 0 deletions src/limbo/kernel/squared_exp_ard.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@

#include <limbo/kernel/kernel.hpp>

#include <Eigen/Cholesky>

namespace limbo {
namespace defaults {
struct kernel_squared_exp_ard {
Expand Down Expand Up @@ -135,6 +137,34 @@ namespace limbo {
}
}

Eigen::MatrixXd gradient_input(const Eigen::VectorXd& x, const std::vector<Eigen::VectorXd>& X) const
{
Eigen::MatrixXd L_minus_one;
if (Params::kernel_squared_exp_ard::k() > 0) {
Eigen::MatrixXd K = (_A * _A.transpose());
K.diagonal() += (Eigen::MatrixXd)(_ell.array().inverse().square());

Eigen::MatrixXd matrixL = Eigen::LLT<Eigen::MatrixXd>(K).matrixL();

// inverse using Cholesky
L_minus_one = Eigen::MatrixXd::Identity(K.rows(), K.cols());

matrixL.template triangularView<Eigen::Lower>().solveInPlace(L_minus_one);
matrixL.template triangularView<Eigen::Lower>().transpose().solveInPlace(L_minus_one);
}
else {
L_minus_one = Eigen::MatrixXd::Zero(x.size(), x.size());
L_minus_one.diagonal() = (Eigen::MatrixXd)(_ell.array().inverse().square());
}

Eigen::MatrixXd g(X.size(), x.size());
for (size_t i = 0; i < X.size(); i++) {
g.row(i) = -L_minus_one * (x - X[i]) * this->operator()(x, X[i]);
}

return g;
}

double kernel(const Eigen::VectorXd& x1, const Eigen::VectorXd& x2) const
{
assert(x1.size() == _ell.size());
Expand Down
11 changes: 11 additions & 0 deletions src/limbo/mean/function_ard.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,17 @@ namespace limbo {
return _tr * m_1;
}

template <typename GP>
Eigen::MatrixXd grad_input(const Eigen::VectorXd& x, const GP& gp) const
{
int d = gp.dim_out();
Eigen::MatrixXd grad = Eigen::MatrixXd::Zero(x.size(), d);
Eigen::MatrixXd mg = _mean_function.grad_input(x, gp); // Dxd
// _tr: dx(d+1)
grad = _tr.block(0, 0, d, d) * mg.transpose();
return grad.transpose();
}

protected:
MeanFunction _mean_function;
Eigen::MatrixXd _tr;
Expand Down
6 changes: 6 additions & 0 deletions src/limbo/mean/mean.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,12 @@ namespace limbo {
// This should never be called!
assert(false);
}

template <typename GP>
Eigen::MatrixXd grad_input(const Eigen::VectorXd& x, const GP& gp) const
{
return Eigen::MatrixXd::Zero(x.size(), gp.dim_out());
}
};
} // namespace mean
} // namespace limbo
Expand Down
14 changes: 14 additions & 0 deletions src/limbo/model/gp.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -412,6 +412,20 @@ namespace limbo {

const Eigen::MatrixXd& alpha() const { return _alpha; }

Eigen::MatrixXd gradient(const Eigen::VectorXd& v) const
{
Eigen::MatrixXd k_g = _kernel_function.grad_input(v, _samples);

Eigen::MatrixXd grad = Eigen::MatrixXd::Zero(v.size(), _dim_out);

for (size_t i = 0; i < _samples.size(); i++) {
for (int j = 0; j < _dim_out; j++)
grad.col(j) += k_g.row(i) * _alpha.col(j)(i);
}

return grad + _mean_function.grad_input(v, *this);
}

/// return the list of samples
const std::vector<Eigen::VectorXd>& samples() const { return _samples; }

Expand Down
15 changes: 15 additions & 0 deletions src/limbo/model/multi_gp.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,21 @@ namespace limbo {
});
}

Eigen::MatrixXd gradient(const Eigen::VectorXd& v) const
{
// if there are no GPs, there's nothing to recompute
if (_gp_models.size() == 0)
return Eigen::MatrixXd();

Eigen::MatrixXd grad(_dim_in, _dim_out);

for (int i = 0; i < _dim_out; i++) {
grad.col(i) = _gp_models[i].gradient(v).col(0); // we assume 1-D base GPs
}

return grad + _mean_function.grad_input(v, *this);
}

/// return the list of samples
const std::vector<Eigen::VectorXd>& samples() const
{
Expand Down
Loading