diff --git a/.gitignore b/.gitignore index ecab7c43f8..8d649169ff 100644 --- a/.gitignore +++ b/.gitignore @@ -72,3 +72,4 @@ Production *.tgz *.tar* +.vscode/settings.json diff --git a/src/aliceVision/camera/Equidistant.hpp b/src/aliceVision/camera/Equidistant.hpp new file mode 100644 index 0000000000..6dcd1d6bfa --- /dev/null +++ b/src/aliceVision/camera/Equidistant.hpp @@ -0,0 +1,184 @@ +// This file is part of the AliceVision project. +// Copyright (c) 2016 AliceVision contributors. +// Copyright (c) 2012 openMVG contributors. +// This Source Code Form is subject to the terms of the Mozilla Public License, +// v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + + +namespace aliceVision { +namespace camera { + +/// Define an equidistant camera (store a K 3x3 matrix) +/// with intrinsic parameters defining the K calibration matrix +class Equidistant : public IntrinsicBase +{ + public: + + Equidistant() = default; + + Equidistant( + unsigned int w, unsigned int h, + const Mat3 K) + :IntrinsicBase(w,h) + { + _K = K; + _Kinv = _K.inverse(); + } + + Equidistant( + unsigned int w, unsigned int h, + double focal_length_pix, + double ppx, double ppy, const std::vector& distortionParams = {}) + : IntrinsicBase(w,h) + , _distortionParams(distortionParams) + { + setK(focal_length_pix, ppx, ppy); + } + + virtual ~Equidistant() {} + + virtual Equidistant* clone() const override { return new Equidistant(*this); } + virtual void assign(const IntrinsicBase& other) override { *this = dynamic_cast(other); } + + virtual bool isValid() const override { return focal() > 0 && IntrinsicBase::isValid(); } + + virtual EINTRINSIC getType() const override { return EQUIDISTANT_CAMERA; } + std::string getTypeStr() const { return EINTRINSIC_enumToString(getType()); } + + double getFocalLengthPix() const { return _K(0,0); } + + Vec2 getPrincipalPoint() const { return Vec2(_K(0,2), _K(1,2)); } + + const Mat3& K() const { return _K; } + const Mat3& Kinv() const { return _Kinv; } + void setK(double focal_length_pix, double ppx, double ppy) + { + _K << focal_length_pix, 0., ppx, 0., focal_length_pix, ppy, 0., 0., 1.; + _Kinv = _K.inverse(); + } + void setK(const Mat3 &K) { _K = K;} + /// Return the value of the focal in pixels + inline double focal() const {return _K(0,0);} + inline Vec2 principal_point() const {return Vec2(_K(0,2), _K(1,2));} + + // Get bearing vector of p point (image coord) + Vec3 operator () (const Vec2& p) const override + { + Vec3 p3(p(0),p(1),1.0); + return (_Kinv * p3).normalized(); + } + + // Transform a point from the camera plane to the image plane + Vec2 cam2ima(const Vec2& p) const override + { + return focal() * p + principal_point(); + } + + // Transform a point from the image plane to the camera plane + Vec2 ima2cam(const Vec2& p) const override + { + return ( p - principal_point() ) / focal(); + } + + virtual bool have_disto() const override { return false; } + + virtual Vec2 add_disto(const Vec2& p) const override { return p; } + + virtual Vec2 remove_disto(const Vec2& p) const override { return p; } + + virtual double imagePlane_toCameraPlaneError(double value) const override + { + return value / focal(); + } + + virtual Mat34 get_projective_equivalent(const geometry::Pose3 & pose) const override + { + Mat34 P; + P_From_KRt(K(), pose.rotation(), pose.translation(), &P); + return P; + } + + // Data wrapper for non linear optimization (get data) + std::vector getParams() const override + { + std::vector params = {_K(0,0), _K(0,2), _K(1,2)}; + params.insert(params.end(), _distortionParams.begin(), _distortionParams.end()); + return params; + } + + bool hasDistortion() const override + { + for(double d: _distortionParams) + if(d != 0.0) + return true; + return false; + } + + const std::vector& getDistortionParams() const + { + return _distortionParams; + } + + void setDistortionParams(const std::vector& distortionParams) + { + if(distortionParams.size() != _distortionParams.size()) + { + std::stringstream s; + s << "Pinhole::setDistortionParams: wrong number of distortion parameters (expected: " << _distortionParams.size() << ", given:" << distortionParams.size() << ")."; + throw std::runtime_error(s.str()); + } + _distortionParams = distortionParams; + } + + // Data wrapper for non linear optimization (update from data) + bool updateFromParams(const std::vector& params) override + { + if (params.size() != (3 + _distortionParams.size())) + return false; + + this->setK(params[0], params[1], params[2]); + setDistortionParams({params.begin() + 3, params.end()}); + + return true; + } + + /** + * @brief Return true if this ray should be visible in the image + * @return true if this ray is visible theorically + */ + virtual bool isVisibleRay(const Vec3 & ray) const override { + + if (ray(2) < 0) { + return false; + } + + return true; + } + + /// Return the un-distorted pixel (with removed distortion) + virtual Vec2 get_ud_pixel(const Vec2& p) const override {return p;} + + /// Return the distorted pixel (with added distortion) + virtual Vec2 get_d_pixel(const Vec2& p) const override {return p;} + +private: + // Focal & principal point are embed into the calibration matrix K + Mat3 _K, _Kinv; +protected: + std::vector _distortionParams; +}; + +} // namespace camera +} // namespace aliceVision diff --git a/src/aliceVision/camera/IntrinsicBase.hpp b/src/aliceVision/camera/IntrinsicBase.hpp index 5dff0b76a4..d4e68178fc 100644 --- a/src/aliceVision/camera/IntrinsicBase.hpp +++ b/src/aliceVision/camera/IntrinsicBase.hpp @@ -124,6 +124,20 @@ struct IntrinsicBase return this->cam2ima( X.head<2>()/X(2) ); } + inline Vec3 backproject(const geometry::Pose3& pose, const Vec2& pt2D, double depth, bool applyUndistortion = true) const + { + Vec2 pt2DCam; + if (applyUndistortion && this->have_disto()) // apply disto & intrinsics + pt2DCam = this->ima2cam(this->remove_disto(pt2D)); + else + pt2DCam = this->ima2cam(pt2D); + + const Vec3 pt2DCamN = pt2DCam.homogeneous().normalized(); + const Vec3 pt3d = depth * pt2DCamN; + const Vec3 output = pose.inverse()(pt3d); + return output; + } + /** * @brief Compute the residual between the 3D projected point X and an image observation x * @param[in] pose The pose @@ -335,6 +349,40 @@ struct IntrinsicBase return false; } + /** + * @brief Return true if this ray should be visible in the image + * @param ray input ray to check for visibility + * @return true if this ray is visible theorically + */ + virtual bool isVisibleRay(const Vec3 & ray) const = 0; + + /** + * @brief Return true if these pixel coordinates should be visible in the image + * @param pix input pixel coordinates to check for visibility + * @return true if visible + */ + virtual bool isVisible(const Vec2 & pix) const { + + if (pix(0) < 0 || pix(0) >= _w || pix(1) < 0 || pix(1) >= _h) { + return false; + } + + return true; + } + + /** + * @brief Assuming the distortion is a function of radius, estimate the + * maximal undistorted radius for a range of distorted radius. + * @param min_radius the minimal radius to consider + * @param max_radius the maximal radius to consider + * @return the maximal undistorted radius + */ + virtual float getMaximalDistortion(double min_radius, double max_radius) const { + + /*Without distortion, obvious*/ + return max_radius; + } + /** * @brief Generate an unique Hash from the camera parameters (used for grouping) * @return Unique Hash from the camera parameters @@ -352,6 +400,16 @@ struct IntrinsicBase return seed; } + /** + * @brief Rescale intrinsics to reflect a rescale of the camera image + * @param factor a scale factor + */ + virtual void rescale(float factor) { + + _w = (unsigned int)(floor(float(_w) * factor)); + _h = (unsigned int)(floor(float(_h) * factor)); + } + private: /// initialization mode @@ -415,5 +473,6 @@ inline double AngleBetweenRays(const geometry::Pose3& pose1, return AngleBetweenRays(ray1, ray2); } + } // namespace camera } // namespace aliceVision diff --git a/src/aliceVision/camera/Pinhole.hpp b/src/aliceVision/camera/Pinhole.hpp index 05373af6ec..20977700d6 100644 --- a/src/aliceVision/camera/Pinhole.hpp +++ b/src/aliceVision/camera/Pinhole.hpp @@ -49,12 +49,12 @@ class Pinhole : public IntrinsicBase virtual ~Pinhole() {} - virtual Pinhole* clone() const { return new Pinhole(*this); } - virtual void assign(const IntrinsicBase& other) { *this = dynamic_cast(other); } + virtual Pinhole* clone() const override { return new Pinhole(*this); } + virtual void assign(const IntrinsicBase& other) override { *this = dynamic_cast(other); } - virtual bool isValid() const { return focal() > 0 && IntrinsicBase::isValid(); } + virtual bool isValid() const override { return focal() > 0 && IntrinsicBase::isValid(); } - virtual EINTRINSIC getType() const { return PINHOLE_CAMERA; } + virtual EINTRINSIC getType() const override { return PINHOLE_CAMERA; } std::string getTypeStr() const { return EINTRINSIC_enumToString(getType()); } double getFocalLengthPix() const { return _K(0,0); } @@ -74,36 +74,36 @@ class Pinhole : public IntrinsicBase inline Vec2 principal_point() const {return Vec2(_K(0,2), _K(1,2));} // Get bearing vector of p point (image coord) - Vec3 operator () (const Vec2& p) const + Vec3 operator () (const Vec2& p) const override { Vec3 p3(p(0),p(1),1.0); return (_Kinv * p3).normalized(); } // Transform a point from the camera plane to the image plane - Vec2 cam2ima(const Vec2& p) const + Vec2 cam2ima(const Vec2& p) const override { return focal() * p + principal_point(); } // Transform a point from the image plane to the camera plane - Vec2 ima2cam(const Vec2& p) const + Vec2 ima2cam(const Vec2& p) const override { return ( p - principal_point() ) / focal(); } - virtual bool have_disto() const { return false; } + virtual bool have_disto() const override { return false; } - virtual Vec2 add_disto(const Vec2& p) const { return p; } + virtual Vec2 add_disto(const Vec2& p) const override { return p; } - virtual Vec2 remove_disto(const Vec2& p) const { return p; } + virtual Vec2 remove_disto(const Vec2& p) const override { return p; } - virtual double imagePlane_toCameraPlaneError(double value) const + virtual double imagePlane_toCameraPlaneError(double value) const override { return value / focal(); } - virtual Mat34 get_projective_equivalent(const geometry::Pose3 & pose) const + virtual Mat34 get_projective_equivalent(const geometry::Pose3 & pose) const override { Mat34 P; P_From_KRt(K(), pose.rotation(), pose.translation(), &P); @@ -111,7 +111,7 @@ class Pinhole : public IntrinsicBase } // Data wrapper for non linear optimization (get data) - std::vector getParams() const + std::vector getParams() const override { std::vector params = {_K(0,0), _K(0,2), _K(1,2)}; params.insert(params.end(), _distortionParams.begin(), _distortionParams.end()); @@ -143,7 +143,7 @@ class Pinhole : public IntrinsicBase } // Data wrapper for non linear optimization (update from data) - bool updateFromParams(const std::vector& params) + bool updateFromParams(const std::vector& params) override { if (params.size() != (3 + _distortionParams.size())) return false; @@ -154,11 +154,62 @@ class Pinhole : public IntrinsicBase return true; } + /** + * @brief Return true if this ray should be visible in the image + * @return true if this ray is visible theorically + */ + virtual bool isVisibleRay(const Vec3 & ray) const override { + + if (ray(2) < 0) { + return false; + } + + Vec2 proj = ray.head(2) / ray(2); + + double xmin; + double ymin; + double xmax; + double ymax; + + Vec2 p1 = remove_disto(ima2cam(Vec2(0,0))); + Vec2 p2 = remove_disto(ima2cam(Vec2(_w,0))); + Vec2 p3 = remove_disto(ima2cam(Vec2(_w,_h))); + Vec2 p4 = remove_disto(ima2cam(Vec2(0,_h))); + + xmin = std::min(p4(0), (std::min(p3(0), std::min(p1(0), p2(0))))); + ymin = std::min(p4(1), (std::min(p3(1), std::min(p1(1), p2(1))))); + xmax = std::max(p4(0), (std::max(p3(0), std::max(p1(0), p2(0))))); + ymax = std::max(p4(1), (std::max(p3(1), std::max(p1(1), p2(1))))); + + if (proj(0) < xmin || proj(0) > xmax || proj(1) < ymin || proj(1) > ymax) { + return false; + } + + return true; + } + /// Return the un-distorted pixel (with removed distortion) - virtual Vec2 get_ud_pixel(const Vec2& p) const {return p;} + virtual Vec2 get_ud_pixel(const Vec2& p) const override {return p;} /// Return the distorted pixel (with added distortion) - virtual Vec2 get_d_pixel(const Vec2& p) const {return p;} + virtual Vec2 get_d_pixel(const Vec2& p) const override {return p;} + + /** + * @brief Rescale intrinsics to reflect a rescale of the camera image + * @param factor a scale factor + */ + virtual void rescale(float factor) override { + + IntrinsicBase::rescale(factor); + + Mat3 scale; + scale.setIdentity(); + scale(0, 0) = factor; + scale(1, 1) = factor; + + _K = scale * _K; + _Kinv = _K.inverse(); + } private: // Focal & principal point are embed into the calibration matrix K diff --git a/src/aliceVision/camera/PinholeBrown.hpp b/src/aliceVision/camera/PinholeBrown.hpp index 04f8bba2f1..e07dc4ad0a 100644 --- a/src/aliceVision/camera/PinholeBrown.hpp +++ b/src/aliceVision/camera/PinholeBrown.hpp @@ -31,14 +31,14 @@ class PinholeBrownT2 : public Pinhole { } - PinholeBrownT2* clone() const { return new PinholeBrownT2(*this); } - void assign(const IntrinsicBase& other) { *this = dynamic_cast(other); } + PinholeBrownT2* clone() const override { return new PinholeBrownT2(*this); } + void assign(const IntrinsicBase& other) override { *this = dynamic_cast(other); } - EINTRINSIC getType() const { return PINHOLE_CAMERA_BROWN; } + EINTRINSIC getType() const override { return PINHOLE_CAMERA_BROWN; } - virtual bool have_disto() const { return true;} + virtual bool have_disto() const override { return true;} - virtual Vec2 add_disto(const Vec2 & p) const{ + virtual Vec2 add_disto(const Vec2 & p) const override{ return (p + distoFunction(_distortionParams, p)); } @@ -46,7 +46,7 @@ class PinholeBrownT2 : public Pinhole // Heikkila J (2000) Geometric Camera Calibration Using Circular Control Points. // IEEE Trans. Pattern Anal. Mach. Intell., 22:1066-1077 - virtual Vec2 remove_disto(const Vec2 & p) const{ + virtual Vec2 remove_disto(const Vec2 & p) const override{ const double epsilon = 1e-8; //criteria to stop the iteration Vec2 p_u = p; @@ -59,13 +59,13 @@ class PinholeBrownT2 : public Pinhole } /// Return the un-distorted pixel (with removed distortion) - virtual Vec2 get_ud_pixel(const Vec2& p) const + virtual Vec2 get_ud_pixel(const Vec2& p) const override { return cam2ima( remove_disto(ima2cam(p)) ); } /// Return the distorted pixel (with added distortion) - virtual Vec2 get_d_pixel(const Vec2& p) const + virtual Vec2 get_d_pixel(const Vec2& p) const override { return cam2ima( add_disto(ima2cam(p)) ); } diff --git a/src/aliceVision/camera/PinholeFisheye.hpp b/src/aliceVision/camera/PinholeFisheye.hpp index da9362c33b..abcc7f8e60 100644 --- a/src/aliceVision/camera/PinholeFisheye.hpp +++ b/src/aliceVision/camera/PinholeFisheye.hpp @@ -33,14 +33,14 @@ class PinholeFisheye : public Pinhole { } - PinholeFisheye* clone() const { return new PinholeFisheye(*this); } - void assign(const IntrinsicBase& other) { *this = dynamic_cast(other); } + PinholeFisheye* clone() const override { return new PinholeFisheye(*this); } + void assign(const IntrinsicBase& other) override { *this = dynamic_cast(other); } - EINTRINSIC getType() const { return PINHOLE_CAMERA_FISHEYE; } + EINTRINSIC getType() const override { return PINHOLE_CAMERA_FISHEYE; } - virtual bool have_disto() const { return true;} + virtual bool have_disto() const override { return true;} - virtual Vec2 add_disto(const Vec2 & p) const + virtual Vec2 add_disto(const Vec2 & p) const override { const std::vector& distortionParams = getDistortionParams(); const double eps = 1e-8; @@ -62,7 +62,7 @@ class PinholeFisheye : public Pinhole return p*cdist; } - virtual Vec2 remove_disto(const Vec2 & p) const + virtual Vec2 remove_disto(const Vec2 & p) const override { const double eps = 1e-8; double scale = 1.0; @@ -89,16 +89,24 @@ class PinholeFisheye : public Pinhole } /// Return the un-distorted pixel (with removed distortion) - virtual Vec2 get_ud_pixel(const Vec2& p) const + virtual Vec2 get_ud_pixel(const Vec2& p) const override { return cam2ima( remove_disto(ima2cam(p)) ); } /// Return the distorted pixel (with added distortion) - virtual Vec2 get_d_pixel(const Vec2& p) const + virtual Vec2 get_d_pixel(const Vec2& p) const override { return cam2ima( add_disto(ima2cam(p)) ); } + + virtual bool isVisibleRay(const Vec3 & ray) const override { + if (ray(2) < 0.0) { + return false; + } + + return true; + } }; } // namespace camera diff --git a/src/aliceVision/camera/PinholeFisheye1.hpp b/src/aliceVision/camera/PinholeFisheye1.hpp index 3f946cd948..a742480fb1 100644 --- a/src/aliceVision/camera/PinholeFisheye1.hpp +++ b/src/aliceVision/camera/PinholeFisheye1.hpp @@ -34,14 +34,14 @@ class PinholeFisheye1 : public Pinhole { } - PinholeFisheye1* clone() const { return new PinholeFisheye1(*this); } - void assign(const IntrinsicBase& other) { *this = dynamic_cast(other); } + PinholeFisheye1* clone() const override { return new PinholeFisheye1(*this); } + void assign(const IntrinsicBase& other) override { *this = dynamic_cast(other); } - EINTRINSIC getType() const { return PINHOLE_CAMERA_FISHEYE1; } + EINTRINSIC getType() const override { return PINHOLE_CAMERA_FISHEYE1; } - virtual bool have_disto() const { return true;} + virtual bool have_disto() const override { return true;} - virtual Vec2 add_disto(const Vec2 & p) const + virtual Vec2 add_disto(const Vec2 & p) const override { const double k1 = _distortionParams.at(0); const double r = std::hypot(p(0), p(1)); @@ -49,7 +49,7 @@ class PinholeFisheye1 : public Pinhole return p * coef; } - virtual Vec2 remove_disto(const Vec2 & p) const + virtual Vec2 remove_disto(const Vec2 & p) const override { const double k1 = _distortionParams.at(0); const double r = std::hypot(p(0), p(1)); @@ -58,13 +58,13 @@ class PinholeFisheye1 : public Pinhole } /// Return the un-distorted pixel (with removed distortion) - virtual Vec2 get_ud_pixel(const Vec2& p) const + virtual Vec2 get_ud_pixel(const Vec2& p) const override { return cam2ima( remove_disto(ima2cam(p)) ); } /// Return the distorted pixel (with added distortion) - virtual Vec2 get_d_pixel(const Vec2& p) const + virtual Vec2 get_d_pixel(const Vec2& p) const override { return cam2ima( add_disto(ima2cam(p)) ); } diff --git a/src/aliceVision/camera/PinholeRadial.hpp b/src/aliceVision/camera/PinholeRadial.hpp index 991382e2a5..6e5fc945e6 100644 --- a/src/aliceVision/camera/PinholeRadial.hpp +++ b/src/aliceVision/camera/PinholeRadial.hpp @@ -60,15 +60,15 @@ class PinholeRadialK1 : public Pinhole { } - PinholeRadialK1* clone() const { return new PinholeRadialK1(*this); } - void assign(const IntrinsicBase& other) { *this = dynamic_cast(other); } + PinholeRadialK1* clone() const override { return new PinholeRadialK1(*this); } + void assign(const IntrinsicBase& other) override { *this = dynamic_cast(other); } - EINTRINSIC getType() const { return PINHOLE_CAMERA_RADIAL1; } + EINTRINSIC getType() const override { return PINHOLE_CAMERA_RADIAL1; } - virtual bool have_disto() const { return true; } + virtual bool have_disto() const override { return true; } /// Add distortion to the point p (assume p is in the camera frame [normalized coordinates]) - virtual Vec2 add_disto(const Vec2 & p) const { + virtual Vec2 add_disto(const Vec2 & p) const override{ const double k1 = _distortionParams.at(0); @@ -79,7 +79,7 @@ class PinholeRadialK1 : public Pinhole } /// Remove distortion (return p' such that disto(p') = p) - virtual Vec2 remove_disto(const Vec2& p) const { + virtual Vec2 remove_disto(const Vec2& p) const override { // Compute the radius from which the point p comes from thanks to a bisection // Minimize disto(radius(p')^2) == actual Squared(radius(p)) @@ -90,18 +90,35 @@ class PinholeRadialK1 : public Pinhole return radius * p; } + /** + * @brief Assuming the distortion is a function of radius, estimate the + * maximal undistorted radius for a range of distorted radius. + * @param min_radius the minimal radius to consider + * @param max_radius the maximal radius to consider + * @return the maximal undistorted radius + */ + virtual float getMaximalDistortion(double min_radius, double max_radius) const override { + + float ud = std::sqrt(radial_distortion::bisection_Radius_Solve(_distortionParams, max_radius * max_radius, distoFunctor)); + + + /*Without distortion, obvious*/ + return ud; + } + /// Return the un-distorted pixel (with removed distortion) - virtual Vec2 get_ud_pixel(const Vec2& p) const + virtual Vec2 get_ud_pixel(const Vec2& p) const override { return cam2ima( remove_disto(ima2cam(p)) ); } /// Return the distorted pixel (with added distortion) - virtual Vec2 get_d_pixel(const Vec2& p) const + virtual Vec2 get_d_pixel(const Vec2& p) const override { return cam2ima( add_disto(ima2cam(p)) ); } + private: /// Functor to solve Square(disto(radius(p'))) = r^2 @@ -126,15 +143,15 @@ class PinholeRadialK3 : public Pinhole { } - PinholeRadialK3* clone() const { return new PinholeRadialK3(*this); } - void assign(const IntrinsicBase& other) { *this = dynamic_cast(other); } + PinholeRadialK3* clone() const override { return new PinholeRadialK3(*this); } + void assign(const IntrinsicBase& other) override { *this = dynamic_cast(other); } - EINTRINSIC getType() const { return PINHOLE_CAMERA_RADIAL3; } + EINTRINSIC getType() const override { return PINHOLE_CAMERA_RADIAL3; } - virtual bool have_disto() const { return true; } + virtual bool have_disto() const override { return true; } /// Add distortion to the point p (assume p is in the camera frame [normalized coordinates]) - virtual Vec2 add_disto(const Vec2 & p) const + virtual Vec2 add_disto(const Vec2 & p) const override { const double k1 = _distortionParams[0], k2 = _distortionParams[1], k3 = _distortionParams[2]; @@ -147,7 +164,7 @@ class PinholeRadialK3 : public Pinhole } /// Remove distortion (return p' such that disto(p') = p) - virtual Vec2 remove_disto(const Vec2& p) const { + virtual Vec2 remove_disto(const Vec2& p) const override { // Compute the radius from which the point p comes from thanks to a bisection // Minimize disto(radius(p')^2) == actual Squared(radius(p)) @@ -159,13 +176,13 @@ class PinholeRadialK3 : public Pinhole } /// Return the un-distorted pixel (with removed distortion) - virtual Vec2 get_ud_pixel(const Vec2& p) const + virtual Vec2 get_ud_pixel(const Vec2& p) const override { return cam2ima( remove_disto(ima2cam(p)) ); } /// Return the distorted pixel (with added distortion) - virtual Vec2 get_d_pixel(const Vec2& p) const + virtual Vec2 get_d_pixel(const Vec2& p) const override { return cam2ima( add_disto(ima2cam(p)) ); } diff --git a/src/aliceVision/hdr/CMakeLists.txt b/src/aliceVision/hdr/CMakeLists.txt index 0a4685fd38..b657154ee1 100644 --- a/src/aliceVision/hdr/CMakeLists.txt +++ b/src/aliceVision/hdr/CMakeLists.txt @@ -1,6 +1,8 @@ # Headers set(hdr_files_headers + LaguerreBACalibration.hpp + sampling.hpp rgbCurve.hpp RobertsonCalibrate.hpp hdrMerge.hpp @@ -11,6 +13,8 @@ set(hdr_files_headers # Sources set(hdr_files_sources + LaguerreBACalibration.cpp + sampling.cpp rgbCurve.cpp RobertsonCalibrate.cpp hdrMerge.cpp @@ -25,6 +29,7 @@ alicevision_add_library(aliceVision_hdr aliceVision_system aliceVision_image ${Boost_FILESYSTEM_LIBRARY} + ${CERES_LIBRARIES} ) # Unit tests diff --git a/src/aliceVision/hdr/DebevecCalibrate.cpp b/src/aliceVision/hdr/DebevecCalibrate.cpp index 1106acff59..cca17cb04e 100644 --- a/src/aliceVision/hdr/DebevecCalibrate.cpp +++ b/src/aliceVision/hdr/DebevecCalibrate.cpp @@ -10,152 +10,219 @@ #include #include #include +#include +#include + +#include + namespace aliceVision { namespace hdr { using T = Eigen::Triplet; -void DebevecCalibrate::process(const std::vector< std::vector< image::Image > > &ldrImageGroups, +bool DebevecCalibrate::process(const std::vector< std::vector> & imagePathsGroups, const std::size_t channelQuantization, - const std::vector< std::vector > ×, + const std::vector > ×, const int nbPoints, + const int calibrationDownscale, const bool fisheye, const rgbCurve &weight, const float lambda, rgbCurve &response) { - const int nbGroups = ldrImageGroups.size(); - const int nbImages = ldrImageGroups.front().size(); + const int nbGroups = imagePathsGroups.size(); + const int nbImages = imagePathsGroups.front().size(); const int samplesPerImage = nbPoints / (nbGroups*nbImages); - //set channels count always RGB - static const std::size_t channels = 3; + // Always 3 channels for the input images + static const std::size_t channelsCount = 3; - //initialize response + // Initialize response response = rgbCurve(channelQuantization); - for(unsigned int channel=0; channel tripletList_array[channelsCount]; - std::vector tripletList; + // Initialize intermediate buffers + for(unsigned int channel=0; channel < channelsCount; ++channel) + { + Vec & b = b_array[channel]; + b = Vec::Zero(nbPoints + channelQuantization + 1); + std::vector & tripletList = tripletList_array[channel]; tripletList.reserve(2 * nbPoints + 1 + 3 * channelQuantization); + } - ALICEVISION_LOG_TRACE("filling A and b matrices"); - - for(unsigned int g=0; g &imagePaths = imagePathsGroups[g]; + std::vector> ldrImagesGroup(imagePaths.size()); + + for (int i = 0; i < imagePaths.size(); i++) { - const std::vector< image::Image > &ldrImagesGroup = ldrImageGroups[g]; - const std::vector &ldrTimes = times[g]; - const std::size_t width = ldrImagesGroup.front().Width(); - const std::size_t height = ldrImagesGroup.front().Height(); - - // include the data-fitting equations - // if images are fisheye, we take only pixels inside a disk with a radius of image's minimum side - if(fisheye) + image::readImage(imagePaths[i], ldrImagesGroup[i], image::EImageColorSpace::SRGB); + if (calibrationDownscale != 1.0f) { - const std::size_t minSize = std::min(width, height) * 0.97; - const Vec2i center(width/2, height/2); + image::Image& img = ldrImagesGroup[i]; + unsigned int w = img.Width(); + unsigned int h = img.Height(); + unsigned int nw = (unsigned int)(floor(float(w) / calibrationDownscale)); + unsigned int nh = (unsigned int)(floor(float(h) / calibrationDownscale)); - const int xMin = std::ceil(center(0) - minSize/2); - const int yMin = std::ceil(center(1) - minSize/2); - const int xMax = std::floor(center(0) + minSize/2); - const int yMax = std::floor(center(1) + minSize/2); - const std::size_t maxDist2 = pow(minSize * 0.5, 2); + image::Image rescaled(nw, nh); - const int step = std::ceil(minSize / sqrt(samplesPerImage)); - for(unsigned int j=0; j & ldrTimes = times[g]; + const std::size_t width = ldrImagesGroup.front().Width(); + const std::size_t height = ldrImagesGroup.front().Height(); + + // If images are fisheye, we take only pixels inside a disk with a radius of image's minimum side + if(fisheye) + { + const std::size_t minSize = std::min(width, height) * 0.97; + const Vec2i center(width/2, height/2); + + const int xMin = std::ceil(center(0) - minSize/2); + const int yMin = std::ceil(center(1) - minSize/2); + const int xMax = std::floor(center(0) + minSize/2); + const int yMax = std::floor(center(1) + minSize/2); + const std::size_t maxDist2 = pow(minSize * 0.5, 2); - const image::Image &image = ldrImagesGroup.at(j); - const float time = std::log(ldrTimes.at(j)); - for(int y = yMin; y < yMax-step; y+=step) + const int step = std::ceil(minSize / sqrt(samplesPerImage)); + for(unsigned int j=0; j &image = ldrImagesGroup.at(j); + const float time = std::log(ldrTimes.at(j)); + for(int y = yMin; y < yMax-step; y+=step) + { + for(int x = xMin; x < xMax-step; x+=step) { - for(int x = xMin; x < xMax-step; x+=step) + std::size_t dist2 = pow(center(0)-x, 2) + pow(center(1)-y, 2); + + if(dist2 > maxDist2) { - std::size_t dist2 = pow(center(0)-x, 2) + pow(center(1)-y, 2); - if(dist2 > maxDist2) continue; + } + for (int channel = 0; channel < channelsCount; channel++) + { float sample = clamp(image(y, x)(channel), 0.f, 1.f); float w_ij = weight(sample, channel); std::size_t index = std::round(sample * (channelQuantization - 1)); - tripletList.push_back(T(count, index, w_ij)); - tripletList.push_back(T(count, channelQuantization + g*samplesPerImage + countValidPixels, -w_ij)); + tripletList_array[channel].push_back(T(count, index, w_ij)); + tripletList_array[channel].push_back(T(count, channelQuantization + g * samplesPerImage + countValidPixels, -w_ij)); - b(count) = w_ij * time; - count += 1; - countValidPixels += 1; + b_array[channel](count) = w_ij * time; } + + count += 1; + countValidPixels += 1; } } } - else + } + else + { + const int step = std::floor(width*height / samplesPerImage); + for(unsigned int j=0; j &image = ldrImagesGroup.at(j); + const float time = std::log(ldrTimes.at(j)); + + for(unsigned int i=0; i &image = ldrImagesGroup.at(j); - const float time = std::log(ldrTimes.at(j)); - for(unsigned int i=0; i> solver; solver.compute(A); - if(solver.info() != Eigen::Success) return; // decomposition failed - Vec x = solver.solve(b); - if(solver.info() != Eigen::Success) return; // solving failed - ALICEVISION_LOG_TRACE("system solved"); + // Check solver failure + if (solver.info() != Eigen::Success) + { + return false; + } - double relative_error = (A*x - b).norm() / b.norm(); - ALICEVISION_LOG_DEBUG("relative error is : " << relative_error); + Vec x = solver.solve(b_array[channel]); - for(std::size_t k=0; k > > &ldrImageGroups, + bool process(const std::vector> &ldrImageGroups, const std::size_t channelQuantization, - const std::vector< std::vector > ×, + const std::vector> ×, const int nbPoints, + const int calibrationDownscale, const bool fisheye, const rgbCurve &weight, const float lambda, diff --git a/src/aliceVision/hdr/GrossbergCalibrate.cpp b/src/aliceVision/hdr/GrossbergCalibrate.cpp index 5c821bbfad..59d0977332 100644 --- a/src/aliceVision/hdr/GrossbergCalibrate.cpp +++ b/src/aliceVision/hdr/GrossbergCalibrate.cpp @@ -21,15 +21,15 @@ GrossbergCalibrate::GrossbergCalibrate(const unsigned int dimension) _dimension = dimension; } -void GrossbergCalibrate::process(const std::vector< std::vector< image::Image > > &ldrImageGroups, +void GrossbergCalibrate::process(const std::vector>& imagePathsGroups, const std::size_t channelQuantization, const std::vector< std::vector > ×, const int nbPoints, const bool fisheye, rgbCurve &response) { - const int nbGroups = ldrImageGroups.size(); - const int nbImages = ldrImageGroups.front().size(); + const int nbGroups = imagePathsGroups.size(); + const int nbImages = imagePathsGroups.front().size(); const int samplesPerImage = nbPoints / (nbGroups*nbImages); //set channels count always RGB @@ -49,46 +49,46 @@ void GrossbergCalibrate::process(const std::vector< std::vector< image::Image channelQuantization) { - std::vector emorH; - emorH.assign(h, h + emorSize); - std::vector h0 = std::vector(emorH.begin(), emorH.end()); + std::vector emorH; + emorH.assign(h, h + emorSize); + std::vector h0 = std::vector(emorH.begin(), emorH.end()); - std::size_t step = emorSize/channelQuantization; - for(std::size_t k = 0; k emorH; - emorH.assign(h, h + emorSize); - std::vector h0 = std::vector(emorH.begin(), emorH.end()); + std::vector emorH; + emorH.assign(h, h + emorSize); + std::vector h0 = std::vector(emorH.begin(), emorH.end()); - std::size_t step = channelQuantization/emorSize; - hCurves[i].resize(channelQuantization, 0.0); - for(std::size_t k = 0; k(hCurves[i].data(), channelQuantization); @@ -103,7 +103,14 @@ void GrossbergCalibrate::process(const std::vector< std::vector< image::Image > &ldrImagesGroup = ldrImageGroups[g]; + const std::vector &imagePaths = imagePathsGroups[g]; + std::vector> ldrImagesGroup(imagePaths.size()); + + for (int i = 0; i < imagePaths.size(); i++) + { + image::readImage(imagePaths[i], ldrImagesGroup[i], image::EImageColorSpace::SRGB); + } + const std::vector &ldrTimes= times[g]; const std::size_t width = ldrImagesGroup.front().Width(); @@ -158,12 +165,19 @@ void GrossbergCalibrate::process(const std::vector< std::vector< image::Image > &ldrImagesGroup = ldrImageGroups[g]; + const std::vector &imagePaths = imagePathsGroups[g]; + std::vector> ldrImagesGroup(imagePaths.size()); + + for (int i = 0; i < imagePaths.size(); i++) + { + ALICEVISION_LOG_INFO("Load " << imagePaths[i]); + image::readImage(imagePaths[i], ldrImagesGroup[i], image::EImageColorSpace::SRGB); + } + const std::vector &ldrTimes= times[g]; const std::size_t width = ldrImagesGroup.front().Width(); @@ -184,7 +198,6 @@ void GrossbergCalibrate::process(const std::vector< std::vector< image::Image > > &ldrImageGroups, + void process(const std::vector>& imagePathsGroups, const std::size_t channelQuantization, const std::vector< std::vector > ×, const int nbPoints, diff --git a/src/aliceVision/hdr/LaguerreBACalibration.cpp b/src/aliceVision/hdr/LaguerreBACalibration.cpp new file mode 100644 index 0000000000..e5a7e41965 --- /dev/null +++ b/src/aliceVision/hdr/LaguerreBACalibration.cpp @@ -0,0 +1,309 @@ +// This file is part of the AliceVision project. +// Copyright (c) 2019 AliceVision contributors. +// This Source Code Form is subject to the terms of the Mozilla Public License, +// v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +#include "LaguerreBACalibration.hpp" +#include "sampling.hpp" + +#include +#include + +#include + +#include + +#include +#include +#include +#include + + +namespace aliceVision { +namespace hdr { + +using namespace aliceVision::image; + +LaguerreBACalibration::LaguerreBACalibration() +{ +} + +template +T laguerreFunction(const T& a, const T& x) +{ + // https://www.desmos.com/calculator/ib1y06t4pe + using namespace boost::math::constants; + constexpr double c = 2.0 / pi(); + return x + c * atan((a * sin(pi() * x)) / (1.0 - a * cos(pi() * x))); +} +template +T laguerreFunctionInv(const T& a, const T& x) +{ + return laguerreFunction(-a, x); +} + +/** + * + */ +struct HdrResidual +{ + HdrResidual(const Rgb& a, const Rgb& b) + : _colorA(a) + , _colorB(b) + {} + + template + bool operator()(const T* const laguerreParam, const T* const relativeWB_R, const T* const relativeWB_B, const T* const expA, const T* const expB, T* residual) const + { + static const int red = 0; + static const int green = 1; + static const int blue = 2; + /* + { + // GREEN + T greenParam = laguerreParam[green]; + T a = laguerreFunction(greenParam, T(_colorA(green))); + T b = laguerreFunction(greenParam, T(_colorB(green))); + residual[green] = abs(a * (*expB) / (*expA) - b) + abs(a - b * (*expA) / (*expB)); + } + { + // RED + T redParam = laguerreParam[green] + laguerreParam[red]; + T a = *relativeWB_R + laguerreFunction(redParam, T(_colorA(red))); + T b = *relativeWB_R + laguerreFunction(redParam, T(_colorB(red))); + residual[red] = abs(a * (*expB) / (*expA) - b) + abs(a - b * (*expA) / (*expB)); + } + { + // BLUE + T blueParam = laguerreParam[green] + laguerreParam[blue]; + T a = *relativeWB_B + laguerreFunction(blueParam, T(_colorA(blue))); + T b = *relativeWB_B + laguerreFunction(blueParam, T(_colorB(blue))); + residual[blue] = abs(a * (*expB) / (*expA) - b) + abs(a - b * (*expA) / (*expB)); + } + */ + { + // GREEN + T greenParam = laguerreParam[green]; + T colorA = T(_colorA(green)); + T colorB = T(_colorB(green)); + // T a = laguerreFunction(greenParam, colorA); + // T b = laguerreFunction(greenParam, colorB); + T errorCost = abs(laguerreFunctionInv(greenParam, laguerreFunction(greenParam, colorA) * (*expB) / (*expA)) - colorB) + + abs(laguerreFunctionInv(greenParam, laguerreFunction(greenParam, colorB) * (*expA) / (*expB)) - colorA); + residual[green] = errorCost; + } + { + // RED + T redParam = laguerreParam[green] + laguerreParam[red]; + T colorA = T(_colorA(red)); + T colorB = T(_colorB(red)); + // T a = *relativeWB_R * laguerreFunction(redParam, colorA); + // T b = *relativeWB_R * laguerreFunction(redParam, colorB); + T errorCost = abs(laguerreFunctionInv(redParam, laguerreFunction(redParam, colorA) * (*expB) / (*expA)) - colorB) + + abs(laguerreFunctionInv(redParam, laguerreFunction(redParam, colorB) * (*expA) / (*expB)) - colorA); + residual[red] = errorCost; + } + { + // BLUE + T blueParam = laguerreParam[green] + laguerreParam[blue]; + T colorA = T(_colorA(blue)); + T colorB = T(_colorB(blue)); + // T a = *relativeWB_B * laguerreFunction(blueParam, colorA); + // T b = *relativeWB_B * laguerreFunction(blueParam, colorB); + T errorCost = abs(laguerreFunctionInv(blueParam, laguerreFunction(blueParam, colorA) * (*expB) / (*expA)) - colorB) + + abs(laguerreFunctionInv(blueParam, laguerreFunction(blueParam, colorB) * (*expA) / (*expB)) - colorA); + residual[blue] = errorCost; + } + + return true; + } + +private: + const Rgb& _colorA; // TODO: T[3] + const Rgb& _colorB; +}; + +void LaguerreBACalibration::process( + const std::vector>& imagePathsGroups, + const std::size_t channelQuantization, + std::vector>& cameraExposures, + int nbPoints, + int imageDownscale, + bool fisheye, + bool refineExposures, + rgbCurve &response) +{ + ALICEVISION_LOG_DEBUG("Extract color samples"); + std::vector> samples; + extractSamples(samples, imagePathsGroups, cameraExposures, nbPoints, imageDownscale, fisheye); + + ALICEVISION_LOG_DEBUG("Create exposure list"); + std::map, double> exposures; + for(int i = 0; i < cameraExposures.size(); ++i) + { + const std::vector& camExp = cameraExposures[i]; + for (int j = 0; j < camExp.size(); ++j) + { + const auto& exp = camExp[j]; + + ALICEVISION_LOG_TRACE(" * " << imagePathsGroups[i][j] << ": " << exp); + + // TODO: camId + exposures[std::make_pair(0, exp)] = exp; + } + } + std::array laguerreParam = { 0.0, 0.0, 0.0 }; + std::array relativeWB = { 1.0, 1.0, 1.0 }; + + ALICEVISION_LOG_DEBUG("Create BA problem"); + ceres::Problem problem; + + // Should we expose the LOSS function parameter? + ceres::LossFunction* lossFunction = new ceres::HuberLoss(Square(0.12)); // 0.12 ~= 30/255 + + // In the Bundle Adjustment each image is optimized relativelty to the next one. The images are ordered by exposures. + // But the exposures need to be different between 2 consecutive images to get constraints. + // So if multiple images have been taken with the same parameters, we search for the next closest exposure (larger or smaller). + // For example a dataset of 6 images with 4 different exposures: + // 0 1 2 3 4 5 + // 1/800 1/800 -- 1/400 -- 1/200 1/200 -- 1/100 + // \_________________/ \________________/ + // Without duplicates the relative indexes would be: [1, 2, 3, 4, 5, 4] + // In this example with 2 duplicates, the relative indexes are: [2, 2, 3, 5, 5, 3] + std::vector> closestRelativeExpIndex; + { + closestRelativeExpIndex.resize(cameraExposures.size()); + for(int i = 0; i < cameraExposures.size(); ++i) + { + const std::vector& camExp = cameraExposures[i]; + std::vector& camRelativeExpIndex = closestRelativeExpIndex[i]; + camRelativeExpIndex.resize(camExp.size(), -1); + for(int j = 0; j < camExp.size(); ++j) + { + // Search in the following indexes + for (int rj = j + 1; rj < camExp.size(); ++rj) + { + if (camExp[rj] != camExp[j]) + { + camRelativeExpIndex[j] = rj; + break; + } + } + if(camRelativeExpIndex[j] != -1) + continue; + // Search in backward direction + for (int rj = j - 1; rj >= 0; --rj) + { + if (camExp[rj] != camExp[j]) + { + camRelativeExpIndex[j] = rj; + break; + } + } + } + } + /* + // Log relative indexes + for (int i = 0; i < closestRelativeExpIndex.size(); ++i) + { + std::vector& camRelativeExpIndex = closestRelativeExpIndex[i]; + for (int j = 0; j < camRelativeExpIndex.size(); ++j) + { + ALICEVISION_LOG_TRACE(" * closestRelativeExpIndex[" << i << "][" << j << "] = " << closestRelativeExpIndex[i][j]); + } + }*/ + } + + // Convert selected samples into residual blocks + for (int g = 0; g < samples.size(); ++g) + { + std::vector& hdrSamples = samples[g]; + + ALICEVISION_LOG_TRACE("Group: " << g << ", hdr brakets: " << hdrSamples.size() << ", nb color samples: " << hdrSamples[0].colors.size()); + + for (int i = 0; i < hdrSamples[0].colors.size(); ++i) + { + for (int h = 0; h < hdrSamples.size(); ++h) + { + int hNext = closestRelativeExpIndex[g][h]; + if(h == hNext) + throw std::runtime_error("Error in exposure chain. Relative exposure refer to itself (h: " + std::to_string(h) + ")."); + if (hNext < h) + { + // The residual cost is bidirectional. So if 2 elements refer to each other, it will be a duplicates, so we have to skip one of them. + int hNextNext = closestRelativeExpIndex[g][hNext]; + if(hNextNext == h) + continue; + } + ceres::CostFunction* cost_function = new ceres::AutoDiffCostFunction( + new HdrResidual(hdrSamples[h].colors[i], hdrSamples[hNext].colors[i])); + + double& expA = exposures[std::make_pair(0, cameraExposures[g][h])]; + double& expB = exposures[std::make_pair(0, cameraExposures[g][hNext])]; + + problem.AddResidualBlock(cost_function, lossFunction, laguerreParam.data(), &relativeWB[0], &relativeWB[2], &expA, &expB); + } + } + } + + if (!refineExposures) + { + for(auto& exp: exposures) + { + problem.SetParameterBlockConstant(&exp.second); + } + } + + ALICEVISION_LOG_INFO("BA Solve"); + + ceres::Solver::Options solverOptions; + solverOptions.linear_solver_type = ceres::SPARSE_NORMAL_CHOLESKY; + solverOptions.minimizer_progress_to_stdout = true; + + ceres::Solver::Summary summary; + ceres::Solve(solverOptions, &problem, &summary); + + ALICEVISION_LOG_TRACE(summary.BriefReport()); + + ALICEVISION_LOG_INFO("Laguerre params: " << laguerreParam); + ALICEVISION_LOG_INFO("Relative WB: " << relativeWB); + ALICEVISION_LOG_INFO("Exposures:"); + for (const auto& expIt: exposures) + { + ALICEVISION_LOG_INFO(" * [" << expIt.first.first << ", " << expIt.first.second << "]: " << expIt.second); + } + + for(unsigned int channel = 0; channel < 3; ++channel) + { + std::vector& curve = response.getCurve(channel); + const double step = 1.0 / double(curve.size()); + for (unsigned int i = 0; i < curve.size(); ++i) + { + const double offset = ((channel == 1) ? 0 : laguerreParam[1]); // non-green channels are relative to green channel + curve[i] = relativeWB[channel] * laguerreFunction(offset + laguerreParam[channel], i * step); + } + } + + if (refineExposures) + { + { + // TODO: realign exposures on input values? + } + + + // Update input exposures with optimized exposure values + for (int i = 0; i < cameraExposures.size(); ++i) + { + std::vector& camExp = cameraExposures[i]; + for (int j = 0; j < camExp.size(); ++j) + { + camExp[j] = exposures[std::make_pair(0, camExp[j])]; + } + } + } +} + + +} // namespace hdr +} // namespace aliceVision diff --git a/src/aliceVision/hdr/LaguerreBACalibration.hpp b/src/aliceVision/hdr/LaguerreBACalibration.hpp new file mode 100644 index 0000000000..c4951b53ba --- /dev/null +++ b/src/aliceVision/hdr/LaguerreBACalibration.hpp @@ -0,0 +1,55 @@ +// This file is part of the AliceVision project. +// Copyright (c) 2019 AliceVision contributors. +// This Source Code Form is subject to the terms of the Mozilla Public License, +// v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +#pragma once + +#include +#include +#include "emorCurve.hpp" +#include "rgbCurve.hpp" + +namespace aliceVision { +namespace hdr { + +/** + * The implementation is based on the following paper: + * "Mapping Colour in Image Stitching Applications", David Hasler, Sabine Susstrunk, 2003, JVCIR-2004 + * https://infoscience.epfl.ch/record/50201/files/hs03_JVCIR.pdf + * Itself based on: + * "Radiometric Self Calibration", Tomoo Mitsunaga, Shree K. Nayar, CVPR-1999 + * http://www.cs.columbia.edu/CAVE/publications/pdfs/Mitsunaga_CVPR99.pdf + * + * Some precisions are also provided in: + * "Radiometric alignment and vignetting calibration", Pablo d'Angelo, ICVS 2007 + * http://hugin.sourceforge.net/tech/icvs2007_final.pdf + */ +class LaguerreBACalibration +{ +public: + explicit LaguerreBACalibration(); + + /** + * @brief + * @param[in] LDR images groups + * @param[in] channel quantization + * @param[in] exposure times + * @param[in] number of samples + * @param[in] calibration weight function + * @param[out] camera response function + */ + void process( + const std::vector>& imagePathsGroups, + const std::size_t channelQuantization, + std::vector>& cameraExposures, + int nbPoints, + int imageDownscale, + bool fisheye, + bool refineExposures, + rgbCurve &response); +}; + +} // namespace hdr +} // namespace aliceVision diff --git a/src/aliceVision/hdr/RobertsonCalibrate.cpp b/src/aliceVision/hdr/RobertsonCalibrate.cpp index f582c76848..a7cc36127a 100644 --- a/src/aliceVision/hdr/RobertsonCalibrate.cpp +++ b/src/aliceVision/hdr/RobertsonCalibrate.cpp @@ -124,7 +124,7 @@ void RobertsonCalibrate::process(const std::vector< std::vector< image::Image #include #include + #include +#include namespace aliceVision { namespace hdr { - + +/** + * f(x)=min + (max-min) * \frac{1}{1 + e^{10 * (x - mid) / width}} + * https://www.desmos.com/calculator/xamvguu8zw + * ____ + * sigmoid: \________ + * sigMid + */ +inline float sigmoid(float zeroVal, float endVal, float sigwidth, float sigMid, float xval) +{ + return zeroVal + (endVal - zeroVal) * (1.0f / (1.0f + expf(10.0f * ((xval - sigMid) / sigwidth)))); +} + +/** + * https://www.desmos.com/calculator/cvu8s3rlvy + * + * ____ + * sigmoid inv: _______/ + * sigMid + */ +inline float sigmoidInv(float zeroVal, float endVal, float sigwidth, float sigMid, float xval) +{ + return zeroVal + (endVal - zeroVal) * (1.0f / (1.0f + expf(10.0f * ((sigMid - xval) / sigwidth)))); +} + void hdrMerge::process(const std::vector< image::Image > &images, - const std::vector ×, - const rgbCurve &weight, - const rgbCurve &response, - image::Image &radiance, - float targetTime, - bool robCalibrate, - float clampedValueCorrection) + const std::vector ×, + const rgbCurve &weight, + const rgbCurve &response, + image::Image &radiance, + float targetCameraExposure) { //checks assert(!response.isEmpty()); assert(!images.empty()); assert(images.size() == times.size()); - //reset radiance image - radiance.fill(image::RGBfColor(0.f, 0.f, 0.f)); - - //get images width, height + // get images width, height const std::size_t width = images.front().Width(); const std::size_t height = images.front().Height(); -// //min and max trusted values -// const float minTrustedValue = 0.0f - std::numeric_limits::epsilon(); -// const float maxTrustedValue = 1.0f + std::numeric_limits::epsilon(); + // resize and reset radiance image to 0.0 + radiance.resize(width, height, true, image::RGBfColor(0.f, 0.f, 0.f)); + + ALICEVISION_LOG_TRACE("[hdrMerge] Images to fuse:"); + for(int i = 0; i < images.size(); ++i) + { + ALICEVISION_LOG_TRACE(images[i].Width() << "x" << images[i].Height() << ", time: " << times[i]); + } - const float maxLum = 1000.0; - const float minLum = 0.0001; + rgbCurve weightShortestExposure = weight; + weightShortestExposure.freezeSecondPartValues(); + rgbCurve weightLongestExposure = weight; + weightLongestExposure.freezeFirstPartValues(); #pragma omp parallel for for(int y = 0; y < height; ++y) @@ -52,81 +80,146 @@ void hdrMerge::process(const std::vector< image::Image > &imag //for each pixels image::RGBfColor &radianceColor = radiance(y, x); -// double highValue = images.at(0)(y, x).maxCoeff(); -// double lowValue = images.at(images.size()-1)(y, x).minCoeff(); - for(std::size_t channel = 0; channel < 3; ++channel) { double wsum = 0.0; double wdiv = 0.0; - double highValue = images.at(0)(y, x)(channel); - double lowValue = images.at(images.size()-1)(y, x)(channel); -// float minTimeSaturation = std::numeric_limits::max(); -// float maxTimeSaturation = std::numeric_limits::min(); + // Merge shortest exposure + { + int exposureIndex = 0; + + // for each image + const double value = images[exposureIndex](y, x)(channel); + const double time = times[exposureIndex]; + // + // weightShortestExposure: _______ + // _______/ + // 0 1 + double w = std::max(0.001f, weightShortestExposure(value, channel)); + const double r = response(value, channel); - for(std::size_t i = 0; i < images.size(); ++i) + wsum += w * r / time; + wdiv += w; + } + // Merge intermediate exposures + for(std::size_t i = 1; i < images.size() - 1; ++i) { - //for each images + // for each image const double value = images[i](y, x)(channel); const double time = times[i]; - double w = std::max(0.f, weight(value, channel) - weight(0.05, 0)); + // + // weight: ____ + // _______/ \________ + // 0 1 + double w = std::max(0.001f, weight(value, channel)); const double r = response(value, channel); - wsum += w * r / time; wdiv += w; } + // Merge longest exposure + { + int exposureIndex = images.size() - 1; - double clampedHighValue = 1.0 - (1.0 / (1.0 + expf(10.0 * ((highValue - 0.9) / 0.2)))); - double clampedLowValue = 1.0 / (1.0 + expf(10.0 * ((lowValue - 0.005) / 0.01))); + // for each image + const double value = images[exposureIndex](y, x)(channel); + const double time = times[exposureIndex]; + // + // weightLongestExposure: ____________ + // \_______ + // 0 1 + double w = std::max(0.001f, weightLongestExposure(value, channel)); + const double r = response(value, channel); - if(!robCalibrate && clampedValueCorrection != 0.f) - { - radianceColor(channel) = (1.0 - clampedHighValue - clampedLowValue) * wsum / std::max(0.001, wdiv) * targetTime + clampedHighValue * maxLum * clampedValueCorrection + clampedLowValue * minLum * clampedValueCorrection; -// radianceColor(channel) = (1.0 - clampedHighValue - clampedLowValue) * wsum / std::max(0.001, wdiv) + clampedHighValue * maxLum * clampedValueCorrection + clampedLowValue * minLum * clampedValueCorrection; - } - else - { - radianceColor(channel) = wsum / std::max(0.001, wdiv) * targetTime; -// radianceColor(channel) = wsum / std::max(0.001, wdiv); + wsum += w * r / time; + wdiv += w; } + radianceColor(channel) = wsum / std::max(0.001, wdiv) * targetCameraExposure; + } + } + } +} +void hdrMerge::postProcessHighlight(const std::vector< image::Image > &images, + const std::vector ×, + const rgbCurve &weight, + const rgbCurve &response, + image::Image &radiance, + float targetCameraExposure, + float highlightCorrectionFactor, + float highlightTargetLux) +{ + //checks + assert(!response.isEmpty()); + assert(!images.empty()); + assert(images.size() == times.size()); -// //saturation detection -// if(value > maxTrustedValue) -// { -// minTimeSaturation = std::min(minTimeSaturation, time); -// } -// -// if(value < minTrustedValue) -// { -// maxTimeSaturation = std::max(maxTimeSaturation, time); -// } - -// //saturation correction -// if((wdiv == 0.0f) && -// (maxTimeSaturation > std::numeric_limits::min())) -// { -// wsum = minTrustedValue; -// wdiv = maxTimeSaturation; -// } -//2 -// if((wdiv == 0.0f) && -// (minTimeSaturation < std::numeric_limits::max())) -// { -// wsum = maxTrustedValue; -// wdiv = minTimeSaturation; -// } + if (highlightCorrectionFactor == 0.0f) + return; - } + const image::Image& inputImage = images.front(); + // Target Camera Exposure = 1 for EV-0 (iso=100, shutter=1, fnumber=1) => 2.5 lux + float highlightTarget = highlightTargetLux * targetCameraExposure * 2.5; + + // get images width, height + const std::size_t width = inputImage.Width(); + const std::size_t height = inputImage.Height(); + + image::Image isPixelClamped(width, height); + +#pragma omp parallel for + for (int y = 0; y < height; ++y) + { + for (int x = 0; x < width; ++x) + { + //for each pixels + image::RGBfColor &radianceColor = radiance(y, x); + + float& isClamped = isPixelClamped(y, x); + isClamped = 0.0f; + + for (std::size_t channel = 0; channel < 3; ++channel) + { + const float value = inputImage(y, x)(channel); + + // https://www.desmos.com/calculator/vpvzmidy1a + // ____ + // sigmoid inv: _______/ + // 0 1 + const float isChannelClamped = sigmoidInv(0.0f, 1.0f, /*sigWidth=*/0.08f, /*sigMid=*/0.95f, value); + isClamped += isChannelClamped; + } + isPixelClamped(y, x) /= 3.0; + } + } + + image::Image isPixelClamped_g(width, height); + image::ImageGaussianFilter(isPixelClamped, 1.0f, isPixelClamped_g, 3, 3); +#pragma omp parallel for + for (int y = 0; y < height; ++y) + { + for (int x = 0; x < width; ++x) + { + image::RGBfColor& radianceColor = radiance(y, x); + + double clampingCompensation = highlightCorrectionFactor * (isPixelClamped_g(y, x) / 3.0); + double clampingCompensationInv = (1.0 - clampingCompensation); + assert(clampingCompensation <= 1.0); + + for (std::size_t channel = 0; channel < 3; ++channel) + { + if(highlightTarget > radianceColor(channel)) + { + radianceColor(channel) = clampingCompensation * highlightTarget + clampingCompensationInv * radianceColor(channel); + } + } + } } - } } - } // namespace hdr } // namespace aliceVision diff --git a/src/aliceVision/hdr/hdrMerge.hpp b/src/aliceVision/hdr/hdrMerge.hpp index 95c4f29fd1..e30be56c2a 100644 --- a/src/aliceVision/hdr/hdrMerge.hpp +++ b/src/aliceVision/hdr/hdrMerge.hpp @@ -21,7 +21,7 @@ class hdrMerge { * @param images * @param radiance * @param times - * @param targetTime + * @param targetCameraExposure * @param response */ void process(const std::vector< image::Image > &images, @@ -29,53 +29,17 @@ class hdrMerge { const rgbCurve &weight, const rgbCurve &response, image::Image &radiance, - float targetTime, - bool robCalibrate = false, - float clampedValueCorrection = 1.f); - - /** - * @brief This function obtains the "average scene luminance" EV value - * from an image file. - * - * "average scene luminance" is the L (aka B) value mentioned in [1] - * We return the log2f value to get an EV value. - * We are using K=12.07488f and the exif-implied value of N=1/3.125 (see [1]). - * K=12.07488f is the 1.0592f * 11.4f value in pfscalibration's - * pfshdrcalibrate.cpp file. - * Based on [3] we can say that the value can also be 12.5 or even 14. - * Another reference for APEX is [4] where N is 0.3, closer to the APEX - * specification of 2^(-7/4)=0.2973. - * - * [1] http://en.wikipedia.org/wiki/APEX_system - * [2] http://en.wikipedia.org/wiki/Exposure_value - * [3] http://en.wikipedia.org/wiki/Light_meter - * [4] http://doug.kerr.home.att.net/pumpkin/#APEX - * - * This function tries first to obtain the shutter speed from either of - * two exif tags (there is no standard between camera manifacturers): - * ExposureTime or ShutterSpeedValue. - * Same thing for f-number: it can be found in FNumber or in ApertureValue. - * - * F-number and shutter speed are mandatory in exif data for EV - * calculation, iso is not. - * - * Function from Luminance HDR - * - * @param shutter - * @param iso - * @param aperture - * @return "average scene luminance" - */ - static float getExposure(float shutter, float iso, float aperture) - { - //reflected-light meter calibration constant - const float K = 12.07488f; - return std::log2((shutter * iso) / (aperture * aperture * K)); + float targetCameraExposure); + + void postProcessHighlight(const std::vector< image::Image > &images, + const std::vector ×, + const rgbCurve &weight, + const rgbCurve &response, + image::Image &radiance, + float clampedValueCorrection, + float targetCameraExposure, + float highlightMaxLumimance); - //EV = log2 (pow2(fstop) / shutter time) - //LV = LV = EV + log2 (ISO / 100) (LV light Value as exposure) - //return std::log2( ((aperture * aperture)/shutter) * (iso / 100) ); - } }; } // namespace hdr diff --git a/src/aliceVision/hdr/rgbCurve.cpp b/src/aliceVision/hdr/rgbCurve.cpp index 3d65ec261d..8ebc690981 100644 --- a/src/aliceVision/hdr/rgbCurve.cpp +++ b/src/aliceVision/hdr/rgbCurve.cpp @@ -9,8 +9,12 @@ #include #include #include +#include + #include +#include + namespace aliceVision { namespace hdr { @@ -36,7 +40,7 @@ void rgbCurve::setFunction(EFunctionType functionType) case EFunctionType::LINEAR: setLinear(); return; case EFunctionType::GAUSSIAN: setGaussian(); return; case EFunctionType::TRIANGLE: setTriangular(); return; - case EFunctionType::PLATEAU: setPlateau(); return; + case EFunctionType::PLATEAU: setPlateauSigmoid(); return; case EFunctionType::GAMMA: setGamma(); return; case EFunctionType::LOG10: setLog10(); return; } @@ -100,23 +104,13 @@ void rgbCurve::setEmor() } } -//void rgbCurve::setGaussian(double size) -//{ -// const float coefficient = 1.f / (static_cast(getSize() - 1) / 4.0f); -// for(std::size_t i = 0; i < getSize(); ++i) -// { -// float factor = i / size * coefficient - 2.0f / size; -// setAllChannels(i, std::exp( -factor * factor )); -// } -//} - void rgbCurve::setGaussian(double mu, double sigma) { + // https://www.desmos.com/calculator/s3q3ow1mpy for(std::size_t i = 0; i < getSize(); ++i) { float factor = i / (static_cast(getSize() - 1)) - mu; setAllChannels(i, std::exp( -factor * factor / (2.0 * sigma * sigma))); - // setAllChannels(i, std::max(0.0, std::exp( -factor * factor / (2.0 * sigma * sigma)) - 0.005)); } } @@ -131,26 +125,42 @@ void rgbCurve::setRobertsonWeight() void rgbCurve::setTriangular() { - const float coefficient = 1.f / static_cast(getSize() - 1); + const float coefficient = 2.f / static_cast(getSize() - 1); for(std::size_t i = 0; i < getSize(); ++i) { float value = i * coefficient; - if (value > 0.5f) + if (value > 1.0f) { - value = 1.0f - value; + value = 2.0f - value; } - setAllChannels(i, 2.f * value); + setAllChannels(i, value); } } -void rgbCurve::setPlateau() +void rgbCurve::setPlateau(float weight) { + // https://www.desmos.com/calculator/mouwyuvjvw + const float coefficient = 1.f / static_cast(getSize() - 1); for(std::size_t i = 0; i < getSize(); ++i) { - setAllChannels(i, 1.0f - std::pow((2.0f * i * coefficient - 1.0f), 12.0f)); - // setAllChannels(i, 1.0f - std::pow((2.0f * i * coefficient - 1.0f), 4.0f)); + setAllChannels(i, 1.0f - std::pow((2.0f * i * coefficient - 1.0f), weight)); + } +} + +inline float plateauSigmoidFunction(float cA, float wA, float cB, float wB, float x) +{ + // https://www.desmos.com/calculator/aoojidncmi + return 1.0 / (1.0 + std::exp(10.0 * (x - cB) / wB)) - 1.0 / (1.0 + std::exp(10.0 * (x - cA) / wA)); +} + +void rgbCurve::setPlateauSigmoid(float cA, float wA, float cB, float wB) +{ + const float coefficient = 1.f / static_cast(getSize() - 1); + for (std::size_t i = 0; i < getSize(); ++i) + { + setAllChannels(i, plateauSigmoidFunction(cA, wA, cB, wB, i * coefficient)); } } @@ -179,6 +189,41 @@ void rgbCurve::inverseAllValues() } } +void rgbCurve::freezeFirstPartValues() +{ + for (auto &curve : _data) + { + std::size_t midIndex = (curve.size() / 2); + for (std::size_t i = 0; i < midIndex; ++i) + { + curve[i] = curve[midIndex]; + } + } +} + +void rgbCurve::freezeSecondPartValues() +{ + for (auto &curve : _data) + { + std::size_t midIndex = (curve.size() / 2); + for (std::size_t i = midIndex + 1; i < curve.size(); ++i) + { + curve[i] = curve[midIndex]; + } + } +} + +void rgbCurve::invertAndScaleSecondPart(float scale) +{ + for (auto &curve : _data) + { + for (std::size_t i = curve.size()/2; i < curve.size(); ++i) + { + curve[i] = (1.f - curve[i]) * scale; + } + } +} + void rgbCurve::setAllAbsolute() { for(auto &curve : _data) @@ -190,6 +235,53 @@ void rgbCurve::setAllAbsolute() } } +inline float gammaFunction(float value, float gamma) +{ + // 1/0.45 = 2.22 + if (value < 0.018) + { + return 4.5 * value; + } + else + { + return 1.099 * std::pow(value, 0.45) - 0.099; + } +} + +inline float inverseGammaFunction(float value, float gamma) +{ + if (value <= 0.0812f) + { + return value / 4.5f; + } + else + { + return pow((value + 0.099f) / 1.099f, gamma); + } +} + +void rgbCurve::applyGamma(float gamma) +{ + for (auto &curve : _data) + { + for (auto &value : curve) + { + value = gammaFunction(value, gamma); + } + } +} + +void rgbCurve::applyGammaInv(float gamma) +{ + for (auto &curve : _data) + { + for (auto &value : curve) + { + value = inverseGammaFunction(value, gamma); + } + } +} + void rgbCurve::normalize() { for(auto &curve : _data) @@ -388,6 +480,33 @@ void rgbCurve::write(const std::string &path, const std::string &name) const file.close(); } +void rgbCurve::writeHtml(const std::string& path, const std::string& title) const +{ + using namespace htmlDocument; + + std::vector xBin(getCurveRed().size()); + std::iota(xBin.begin(), xBin.end(), 0); + + std::pair< std::pair, std::pair > range = autoJSXGraphViewport(xBin, getCurveRed()); + + JSXGraphWrapper jsxGraph; + jsxGraph.init(title, 800, 600); + + jsxGraph.addXYChart(xBin, getCurveRed(), "line", "ff0000"); + jsxGraph.addXYChart(xBin, getCurveGreen(), "line", "00ff00"); + jsxGraph.addXYChart(xBin, getCurveBlue(), "line", "0000ff"); + + jsxGraph.UnsuspendUpdate(); + jsxGraph.setViewport(range); + jsxGraph.close(); + + // save the reconstruction Log + std::ofstream htmlFileStream(path.c_str()); + htmlFileStream << htmlDocumentStream(title).getDoc(); + htmlFileStream << jsxGraph.toStr(); +} + + void rgbCurve::read(const std::string &path) { std::ifstream file(path); diff --git a/src/aliceVision/hdr/rgbCurve.hpp b/src/aliceVision/hdr/rgbCurve.hpp index 30171dad18..6b6f192e77 100644 --- a/src/aliceVision/hdr/rgbCurve.hpp +++ b/src/aliceVision/hdr/rgbCurve.hpp @@ -164,7 +164,6 @@ class rgbCurve /** *@brief Set curves to gaussian */ -// void setGaussian(double mu = 0.5, double sigma = 1.0 / (4.0 * sqrt(2.0))); void setGaussian(double mu = 0.5, double sigma = 1.0 / (5.0 * sqrt(2.0))); /** @@ -178,9 +177,11 @@ class rgbCurve void setTriangular(); /** - *@brief Set curves to plateau + * @brief Set curves to plateau */ - void setPlateau(); + void setPlateau(float weight = 8.0f); + + void setPlateauSigmoid(float cA = 0.2, float wA = 0.5, float cB = 0.85, float wB = 0.22); /** *@brief Set curves to log10 @@ -193,11 +194,21 @@ class rgbCurve */ void inverseAllValues(); + void freezeFirstPartValues(); + + void freezeSecondPartValues(); + + void invertAndScaleSecondPart(float scale); + /** * @brief change all value of the image by their absolute value */ void setAllAbsolute(); + void applyGamma(float gamma = 2.2); + + void applyGammaInv(float gamma = 2.2); + /** * @brief normalize the curve */ @@ -293,6 +304,12 @@ class rgbCurve */ void write(const std::string &path, const std::string &name = "rgbCurve") const; + /** + * @brief Write in an html file + * @param[in] path + */ + void writeHtml(const std::string &path, const std::string& title) const; + /** * @brief Read and fill curves from a csv file * @param[in] path diff --git a/src/aliceVision/hdr/sampling.cpp b/src/aliceVision/hdr/sampling.cpp new file mode 100644 index 0000000000..2e2a1420e3 --- /dev/null +++ b/src/aliceVision/hdr/sampling.cpp @@ -0,0 +1,117 @@ +// This file is part of the AliceVision project. +// Copyright (c) 2019 AliceVision contributors. +// This Source Code Form is subject to the terms of the Mozilla Public License, +// v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +#include "sampling.hpp" + +#include +#include + +#include + + +namespace aliceVision { +namespace hdr { + +using namespace aliceVision::image; + +void extractSamples( + std::vector>& out_samples, + const std::vector>& imagePathsGroups, + const std::vector< std::vector >& cameraExposures, + int nbPoints, + int calibrationDownscale, + bool fisheye + ) +{ + const int nbGroups = imagePathsGroups.size(); + out_samples.resize(nbGroups); + + int averallNbImages = 0; + for (const auto& imgPaths : imagePathsGroups) + { + averallNbImages += imgPaths.size(); + } + const int samplesPerImage = nbPoints / averallNbImages; + + ALICEVISION_LOG_TRACE("samplesPerImage: " << samplesPerImage); + + #pragma omp parallel for num_threads(3) + for (int g = 0; g& out_hdrSamples = out_samples[g]; + + const std::vector &imagePaths = imagePathsGroups[g]; + out_hdrSamples.resize(imagePaths.size()); + + const std::vector& exposures = cameraExposures[g]; + + for (unsigned int i = 0; i < imagePaths.size(); ++i) + { + out_hdrSamples[i].exposure = exposures[i]; + out_hdrSamples[i].colors.reserve(samplesPerImage); + std::vector>& colors = out_hdrSamples[i].colors; + + Image img; + readImage(imagePaths[i], img, EImageColorSpace::LINEAR); + if (calibrationDownscale != 1.0f) + { + unsigned int w = img.Width(); + unsigned int h = img.Height(); + unsigned int nw = (unsigned int)(floor(float(w) / calibrationDownscale)); + unsigned int nh = (unsigned int)(floor(float(h) / calibrationDownscale)); + + image::Image rescaled(nw, nh); + + oiio::ImageSpec imageSpecResized(nw, nh, 3, oiio::TypeDesc::FLOAT); + oiio::ImageSpec imageSpecOrigin(w, h, 3, oiio::TypeDesc::FLOAT); + oiio::ImageBuf bufferOrigin(imageSpecOrigin, img.data()); + oiio::ImageBuf bufferResized(imageSpecResized, rescaled.data()); + oiio::ImageBufAlgo::resample(bufferResized, bufferOrigin); + + const oiio::ImageBuf inBuf(oiio::ImageSpec(w, h, 3, oiio::TypeDesc::FLOAT), img.data()); + oiio::ImageBuf outBuf(oiio::ImageSpec(nw, nh, 3, oiio::TypeDesc::FLOAT), rescaled.data()); + + oiio::ImageBufAlgo::resize(outBuf, inBuf); + img.swap(rescaled); + } + + const std::size_t width = img.Width(); + const std::size_t height = img.Height(); + + const std::size_t minSize = std::min(width, height) * 0.97; + const Vec2i center(width / 2, height / 2); + + const int xMin = std::ceil(center(0) - minSize / 2); + const int yMin = std::ceil(center(1) - minSize / 2); + const int xMax = std::floor(center(0) + minSize / 2); + const int yMax = std::floor(center(1) + minSize / 2); + const std::size_t maxDist2 = pow(minSize * 0.5, 2); + + const int step = std::ceil(minSize / sqrt(samplesPerImage)); + + // extract samples + for (int y = yMin; y <= yMax - step; y += step) + { + for (int x = xMin; x <= xMax - step; x += step) + { + if (fisheye) + { + std::size_t dist2 = pow(center(0) - x, 2) + pow(center(1) - y, 2); + if (dist2 > maxDist2) + continue; + } + RGBfColor& c = img(y, x); + colors.push_back(Rgb(c(0), c(1), c(2))); + } + } + } + } +} + + + +} // namespace hdr +} // namespace aliceVision diff --git a/src/aliceVision/hdr/sampling.hpp b/src/aliceVision/hdr/sampling.hpp new file mode 100644 index 0000000000..c7795f5932 --- /dev/null +++ b/src/aliceVision/hdr/sampling.hpp @@ -0,0 +1,34 @@ +// This file is part of the AliceVision project. +// Copyright (c) 2019 AliceVision contributors. +// This Source Code Form is subject to the terms of the Mozilla Public License, +// v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +#pragma once + +#include +#include + +namespace aliceVision { +namespace hdr { + + +struct ImageSamples +{ + std::vector> colors; + int camId = 0; + double exposure = 0.0; +}; + + +void extractSamples( + std::vector>& out_samples, + const std::vector>& imagePathsGroups, + const std::vector< std::vector >& cameraExposures, + int nbPoints, + int calibrationDownscale, + bool fisheye); + + +} // namespace hdr +} // namespace aliceVision diff --git a/src/aliceVision/image/Image.hpp b/src/aliceVision/image/Image.hpp index fafb7ce2b8..7d69d0db63 100644 --- a/src/aliceVision/image/Image.hpp +++ b/src/aliceVision/image/Image.hpp @@ -182,6 +182,10 @@ namespace aliceVision { return ( *this ); } + inline Base& GetMat() + { + return (*this); + } //-- accessors/getters methods //------------------------------ diff --git a/src/aliceVision/image/Sampler.hpp b/src/aliceVision/image/Sampler.hpp index ef0be3204a..7e83a8423b 100644 --- a/src/aliceVision/image/Sampler.hpp +++ b/src/aliceVision/image/Sampler.hpp @@ -430,7 +430,8 @@ struct Sampler2d _sampler( dx , coefs_x ) ; _sampler( dy , coefs_y ) ; - typename RealPixel::real_type res(0) ; + // Default color constructor init all channels to zero + typename RealPixel::real_type res; // integer position of sample (x,y) const int grid_x = static_cast( floor( x ) ); @@ -476,7 +477,15 @@ struct Sampler2d // If value too small, it should be so instable, so return the sampled value if( total_weight <= 0.2 ) { - return T(); + int row = floor(y); + int col = floor(x); + + if (row < 0) row = 0; + if (col < 0) col = 0; + if (row >= im_height) row = im_height - 1; + if (col >= im_width) col = im_width - 1; + + return src(row, col); } if( total_weight != 1.0 ) diff --git a/src/aliceVision/image/image_test.cpp b/src/aliceVision/image/image_test.cpp index 3122ede4fc..971b575529 100644 --- a/src/aliceVision/image/image_test.cpp +++ b/src/aliceVision/image/image_test.cpp @@ -71,7 +71,7 @@ BOOST_AUTO_TEST_CASE(Image_PixelTypes) // RGBColor c(0); // Not accepted because can cause bad pixel affectation value (mixed type...) // The following issue must used : (at your own risk) RGBColor b(static_cast(0)); - RGBAColor d(BLACK); + RGBAColor d(BLACK, 255); } BOOST_AUTO_TEST_CASE(Image_ImageConverter) diff --git a/src/aliceVision/image/io.cpp b/src/aliceVision/image/io.cpp index 2974290228..dc30c448e0 100644 --- a/src/aliceVision/image/io.cpp +++ b/src/aliceVision/image/io.cpp @@ -74,6 +74,27 @@ std::istream& operator>>(std::istream& in, EImageFileType& imageFileType) return in; } +bool isSupported(const std::string &ext) +{ + static const std::string extensionList = oiio::get_string_attribute("extension_list"); + std::vector supportedExtensions; + + std::vector supportedFormat; + boost::split(supportedFormat, extensionList, boost::is_any_of(";"), boost::token_compress_on); + for(const std::string& format: supportedFormat) + { + std::vector extensions; + const std::string str = format.substr(format.find(":")+1); + boost::split(extensions, str, boost::is_any_of(","), boost::token_compress_on); + for(std::string& extension: extensions) + supportedExtensions.push_back(extension.insert(0, ".")); + } + + const auto start = supportedExtensions.begin(); + const auto end = supportedExtensions.end(); + return (std::find(start, end, boost::to_lower_copy(ext)) != end); +} + // Warning: type conversion problems from string to param value, we may lose some metadata with string maps oiio::ParamValueList getMetadataFromMap(const std::map& metadataMap) { @@ -147,6 +168,11 @@ void getBufferFromImage(Image& image, oiio::ImageBuf& buffer) getBufferFromImage(image, oiio::TypeDesc::UINT8, 1, buffer); } +void getBufferFromImage(Image& image, oiio::ImageBuf& buffer) +{ + getBufferFromImage(image, oiio::TypeDesc::FLOAT, 4, buffer); +} + void getBufferFromImage(Image& image, oiio::ImageBuf& buffer) { getBufferFromImage(image, oiio::TypeDesc::UINT8, 4, buffer); @@ -348,6 +374,11 @@ void readImage(const std::string& path, Image& image, EImageColor readImage(path, oiio::TypeDesc::UINT8, 1, image, imageColorSpace); } +void readImage(const std::string& path, Image& image, EImageColorSpace imageColorSpace) +{ + readImage(path, oiio::TypeDesc::FLOAT, 4, image, imageColorSpace); +} + void readImage(const std::string& path, Image& image, EImageColorSpace imageColorSpace) { readImage(path, oiio::TypeDesc::UINT8, 4, image, imageColorSpace); @@ -368,6 +399,11 @@ void writeImage(const std::string& path, const Image& image, EIma writeImage(path, oiio::TypeDesc::UINT8, 1, image, imageColorSpace, metadata); } +void writeImage(const std::string& path, const Image& image, EImageColorSpace imageColorSpace, const oiio::ParamValueList& metadata) +{ + writeImage(path, oiio::TypeDesc::FLOAT, 4, image, imageColorSpace, metadata); +} + void writeImage(const std::string& path, const Image& image, EImageColorSpace imageColorSpace, const oiio::ParamValueList& metadata) { writeImage(path, oiio::TypeDesc::UINT8, 4, image, imageColorSpace, metadata); @@ -378,6 +414,11 @@ void writeImage(const std::string& path, const Image& image, EImageCo writeImage(path, oiio::TypeDesc::FLOAT, 3, image, imageColorSpace, metadata); } +void writeImage(const std::string& path, const Image& image, EImageColorSpace imageColorSpace, const oiio::ParamValueList& metadata) +{ + writeImage(path, oiio::TypeDesc::FLOAT, 1, image, imageColorSpace, metadata); +} + void writeImage(const std::string& path, const Image& image, EImageColorSpace imageColorSpace, const oiio::ParamValueList& metadata) { writeImage(path, oiio::TypeDesc::UINT8, 3, image, imageColorSpace, metadata); diff --git a/src/aliceVision/image/io.hpp b/src/aliceVision/image/io.hpp index 47e8974455..b00d56ec72 100644 --- a/src/aliceVision/image/io.hpp +++ b/src/aliceVision/image/io.hpp @@ -13,6 +13,7 @@ #include #include +#include #include namespace oiio = OIIO; @@ -78,6 +79,13 @@ std::ostream& operator<<(std::ostream& os, EImageFileType imageFileType); */ std::istream& operator>>(std::istream& in, EImageFileType& imageFileType); +/** + * @brief Check if input image extension is supported by openImageIO ie exists in extension_list from imageio.h + * @param[in] ext - image extension + * @return true if valid extension + */ +bool isSupported(const std::string &ext); + /** * @brief convert a metadata string map into an oiio::ParamValueList * @param[in] metadataMap string map @@ -117,6 +125,7 @@ void readImageMetadata(const std::string& path, int& width, int& height, std::ma */ void getBufferFromImage(Image& image, oiio::ImageBuf& buffer); void getBufferFromImage(Image& image, oiio::ImageBuf& buffer); +void getBufferFromImage(Image& image, oiio::ImageBuf& buffer); void getBufferFromImage(Image& image, oiio::ImageBuf& buffer); void getBufferFromImage(Image& image, oiio::ImageBuf& buffer); void getBufferFromImage(Image& image, oiio::ImageBuf& buffer); @@ -129,6 +138,7 @@ void getBufferFromImage(Image& image, oiio::ImageBuf& buffer); */ void readImage(const std::string& path, Image& image, EImageColorSpace imageColorSpace); void readImage(const std::string& path, Image& image, EImageColorSpace imageColorSpace); +void readImage(const std::string& path, Image& image, EImageColorSpace imageColorSpace); void readImage(const std::string& path, Image& image, EImageColorSpace imageColorSpace); void readImage(const std::string& path, Image& image, EImageColorSpace imageColorSpace); void readImage(const std::string& path, Image& image, EImageColorSpace imageColorSpace); @@ -138,7 +148,9 @@ void readImage(const std::string& path, Image& image, EImageColorSpace * @param[in] path The given path to the image * @param[in] image The output image buffer */ +void writeImage(const std::string& path, const Image& image, EImageColorSpace imageColorSpace, const oiio::ParamValueList& metadata = oiio::ParamValueList()); void writeImage(const std::string& path, const Image& image, EImageColorSpace imageColorSpace, const oiio::ParamValueList& metadata = oiio::ParamValueList()); +void writeImage(const std::string& path, const Image& image, EImageColorSpace imageColorSpace, const oiio::ParamValueList& metadata = oiio::ParamValueList()); void writeImage(const std::string& path, const Image& image, EImageColorSpace imageColorSpace, const oiio::ParamValueList& metadata = oiio::ParamValueList()); void writeImage(const std::string& path, const Image& image, EImageColorSpace imageColorSpace, const oiio::ParamValueList& metadata = oiio::ParamValueList()); void writeImage(const std::string& path, const Image& image, EImageColorSpace imageColorSpace, const oiio::ParamValueList& metadata = oiio::ParamValueList()); diff --git a/src/aliceVision/image/pixelTypes.hpp b/src/aliceVision/image/pixelTypes.hpp index 4b2850d2dc..ad8c9036e7 100644 --- a/src/aliceVision/image/pixelTypes.hpp +++ b/src/aliceVision/image/pixelTypes.hpp @@ -21,8 +21,8 @@ namespace aliceVision template class Rgb : public Eigen::Matrix { - typedef Eigen::Matrix Base; - typedef T TBase; + using Base = Eigen::Matrix; + using TBase = T; public: //------------------------------ @@ -147,6 +147,29 @@ namespace aliceVision return os; } + /** + * @brief Elementwise substraction + * @param other the other element to substract + * @return Rgb color after substraction + * @note This does not modify the Rgb value (ie: only return a modified copy) + */ + inline Rgb operator -( const Rgb& other ) const + { + return Rgb( ((*this)(0) - other(0)), ((*this)(1) - other(1)), ((*this)(2) - other(2))); + } + + /** + * @brief Elementwise addition + * @param other the other element to substract + * @return Rgb color after addition + * @note This does not modify the Rgb value (ie: only return a modified copy) + */ + inline Rgb operator +( const Rgb& other ) const + { + return Rgb( ((*this)(0) + other(0)), ((*this)(1) + other(1)), ((*this)(2) + other(2))); + } + + /** * @brief scalar division * @param val Scalar divisor factor @@ -177,9 +200,9 @@ namespace aliceVision }; /// Instantiation for unsigned char color component - typedef Rgb RGBColor; + using RGBColor = Rgb; /// Instantiation for float color component - typedef Rgb RGBfColor; + using RGBfColor = Rgb; /** * @brief RGBA templated pixel type @@ -187,7 +210,7 @@ namespace aliceVision template class Rgba : public Eigen::Matrix { - typedef Eigen::Matrix Base; + using Base = Eigen::Matrix; public: //------------------------------ @@ -199,10 +222,9 @@ namespace aliceVision * @param blue component value * @param alpha component value */ - inline Rgba( const T red, const T green, const T blue, const T alpha = static_cast( 1 ) ) + inline Rgba( const T red, const T green, const T blue, const T alpha = T(1) ) : Base( red, green, blue, alpha ) { - } /** @@ -212,29 +234,38 @@ namespace aliceVision explicit inline Rgba( const Base& val ) : Base( val ) { - } /** - * @brief RGB constructor with default alpha value + * @brief RGBA constructor with default alpha value to 1 * @param val Value to set in each RGB component * @note This is equivalent to RGBA( val , val , val , 1 ) */ - explicit inline Rgba( const T val = 0 ) - : Base( val, val, val, static_cast( 1 ) ) + explicit inline Rgba( const T val ) + : Base( val, val, val, T(1) ) { + } + /** + * @brief Default RGBA constructor set all channels to zero (including the alpha channel) + * @warning The alpha channel is initialized to 0. + * It is used in generic/templated code like "sampler" + * which creates an empty color and accumulate color contributions. + */ + explicit inline Rgba() + : Base( T(0), T(0), T(0), T(0) ) + { } /** * @brief Copy constructor * @param val Source RGBA value */ - inline Rgba( const RGBColor & val ) - : Base( val.r(), val.g(), val.b(), static_cast( 1 ) ) + inline Rgba( const RGBColor & val, const T alpha ) + : Base( val.r(), val.g(), val.b(), alpha ) { - } + //-- construction method //------------------------------ @@ -362,7 +393,11 @@ namespace aliceVision T( ( Z )( *this )( 3 ) * val ) ); } }; - typedef Rgba RGBAColor; + + /// Instantiation for unsigned char color component + using RGBAColor = Rgba; + /// Instantiation for float color component + using RGBAfColor = Rgba; const RGBColor WHITE( 255, 255, 255 ); const RGBColor BLACK( 0, 0, 0 ); diff --git a/src/aliceVision/multiview/homographyKernelSolver.cpp b/src/aliceVision/multiview/homographyKernelSolver.cpp index 17921982ba..9d88b9fa22 100644 --- a/src/aliceVision/multiview/homographyKernelSolver.cpp +++ b/src/aliceVision/multiview/homographyKernelSolver.cpp @@ -7,6 +7,7 @@ // You can obtain one at https://mozilla.org/MPL/2.0/. #include "homographyKernelSolver.hpp" +#include namespace aliceVision { namespace homography { @@ -14,37 +15,62 @@ namespace kernel { /// Setup the Direct Linear Transform. /// Use template in order to support fixed or dynamic sized matrix. -/// Allow solve H as homogeneous(x2) = H homogeneous(x1) +/// Allow solve H as homogeneous(p2) = H homogeneous(p1) template -void BuildActionMatrix(Matrix & L, const Mat &x, const Mat &y) { +void BuildActionMatrix(Matrix & L, const Mat &p1, const Mat &p2) { - const Mat::Index n = x.cols(); + const Mat::Index n = p1.cols(); for (Mat::Index i = 0; i < n; ++i) { Mat::Index j = 2 * i; - L(j, 0) = x(0, i); - L(j, 1) = x(1, i); + L(j, 0) = p1(0, i); + L(j, 1) = p1(1, i); L(j, 2) = 1.0; - L(j, 6) = -y(0, i) * x(0, i); - L(j, 7) = -y(0, i) * x(1, i); - L(j, 8) = -y(0, i); + L(j, 6) = -p2(0, i) * p1(0, i); + L(j, 7) = -p2(0, i) * p1(1, i); + L(j, 8) = -p2(0, i); ++j; - L(j, 3) = x(0, i); - L(j, 4) = x(1, i); + L(j, 3) = p1(0, i); + L(j, 4) = p1(1, i); L(j, 5) = 1.0; - L(j, 6) = -y(1, i) * x(0, i); - L(j, 7) = -y(1, i) * x(1, i); - L(j, 8) = -y(1, i); + L(j, 6) = -p2(1, i) * p1(0, i); + L(j, 7) = -p2(1, i) * p1(1, i); + L(j, 8) = -p2(1, i); } } -void FourPointSolver::Solve(const Mat &x, const Mat &y, vector *Hs) { - assert(2 == x.rows()); - assert(4 <= x.cols()); - assert(x.rows() == y.rows()); - assert(x.cols() == y.cols()); +void FourPointSolver::Solve(const Mat &p1, const Mat &p2, vector *Hs) { + assert(2 == p1.rows()); + assert(4 <= p2.cols()); + assert(p1.rows() == p2.rows()); + assert(p1.cols() == p2.cols()); - Mat::Index n = x.cols(); + Mat::Index n = p1.cols(); + + /* Normalize input */ + Mat p1_local = p1; + Mat p2_local = p2; + + auto p1x = p1_local.block(0, 0, 1, n); + auto p1y = p1_local.block(1, 0, 1, n); + auto p2x = p2_local.block(0, 0, 1, n); + auto p2y = p2_local.block(1, 0, 1, n); + + double p1x_mean = p1x.mean(); + double p1y_mean = p1y.mean(); + double p2x_mean = p2x.mean(); + double p2y_mean = p2y.mean(); + + p1x.array() -= p1x_mean; + p1y.array() -= p1y_mean; + p2x.array() -= p2x_mean; + p2y.array() -= p2y_mean; + + double p1_dist = sqrt((p1x * p1x.transpose() + p1y * p1y.transpose())(0,0)); + double p2_dist = sqrt((p2x * p2x.transpose() + p2y * p2y.transpose())(0,0)); + + p1_local /= p1_dist; + p2_local /= p2_dist; Vec9 h; if (n == 4) { @@ -52,15 +78,38 @@ void FourPointSolver::Solve(const Mat &x, const Mat &y, vector *Hs) { // Eigen and the compiler doing the maximum of optimization. typedef Eigen::Matrix Mat16_9; Mat16_9 L = Mat::Zero(16, 9); - BuildActionMatrix(L, x, y); + BuildActionMatrix(L, p1_local, p2_local); Nullspace(&L, &h); } else { MatX9 L = Mat::Zero(n * 2, 9); - BuildActionMatrix(L, x, y); + BuildActionMatrix(L, p1_local, p2_local); Nullspace(&L, &h); } - Mat3 H = Map(h.data()); // map the linear vector as the H matrix + + /*Build G matrix from vector*/ + Mat3 G = Map(h.data()); + + /* + p2_local = G * p1_local + Kp2 * p2 = G * Kp1 * p1 + p2 = Kp2^-1 * G * Kp1 * p1 + H = Kp2^-1 * G * Kp1 + */ + Eigen::Matrix3d Kp1 = Eigen::Matrix3d::Identity(); + Kp1(0, 0) = 1.0 / p1_dist; + Kp1(0, 2) = - p1x_mean / p1_dist; + Kp1(1, 1) = 1.0 / p1_dist; + Kp1(1, 2) = - p1y_mean / p1_dist; + + Eigen::Matrix3d Kp2 = Eigen::Matrix3d::Identity(); + Kp2(0, 0) = 1.0 / p2_dist; + Kp2(0, 2) = - p2x_mean / p2_dist; + Kp2(1, 1) = 1.0 / p2_dist; + Kp2(1, 2) = - p2y_mean / p2_dist; + + Eigen::Matrix3d H = Kp2.inverse() * G * Kp1; + Hs->push_back(H); } diff --git a/src/aliceVision/multiview/homographyKernelSolver.hpp b/src/aliceVision/multiview/homographyKernelSolver.hpp index e69840830f..5e91b5c600 100644 --- a/src/aliceVision/multiview/homographyKernelSolver.hpp +++ b/src/aliceVision/multiview/homographyKernelSolver.hpp @@ -23,24 +23,24 @@ struct FourPointSolver { enum { MINIMUM_SAMPLES = 4 }; enum { MAX_MODELS = 1 }; /** - * Computes the homography that transforms x to y with the Direct Linear + * Computes the homography that transforms p1 to p2 with the Direct Linear * Transform (DLT). * - * \param x A 2xN matrix of column vectors. - * \param y A 2xN matrix of column vectors. + * \param p1 A 2xN matrix of column vectors. + * \param p2 A 2xN matrix of column vectors. * \param Hs A vector into which the computed homography is stored. * - * The estimated homography should approximately hold the condition y = H x. + * The estimated homography should approximately hold the condition p2 = H p1. */ - static void Solve(const Mat &x, const Mat &y, vector *Hs); + static void Solve(const Mat &p1, const Mat &p2, vector *Hs); }; // Should be distributed as Chi-squared with k = 2. struct AsymmetricError { - static double Error(const Mat &H, const Vec2 &x1, const Vec2 &x2) { - Vec3 x2h_est = H * EuclideanToHomogeneous(x1); - Vec2 x2_est = x2h_est.head<2>() / x2h_est[2]; - return (x2 - x2_est).squaredNorm(); + static double Error(const Mat &H, const Vec2 &p1, const Vec2 &p2) { + Vec3 p2h_est = H * EuclideanToHomogeneous(p1); + Vec2 p2_est = p2h_est.head<2>() / p2h_est[2]; + return (p2 - p2_est).squaredNorm(); } }; diff --git a/src/aliceVision/prettyprint.hpp b/src/aliceVision/prettyprint.hpp index 01846d73a1..557be4773f 100644 --- a/src/aliceVision/prettyprint.hpp +++ b/src/aliceVision/prettyprint.hpp @@ -427,12 +427,20 @@ bucket_print(const T & m, typename T::size_type n) // Main magic entry point: An overload snuck into namespace std. // Can we do better? +namespace Eigen { +template +class EigenBase; +} + namespace std { // Prints a container to the stream using default delimiters + template + struct is_eigen_object : std::is_base_of, T> + {}; template - inline typename enable_if< ::pretty_print::is_container::value, + inline typename enable_if< ::pretty_print::is_container::value && !is_eigen_object::value, basic_ostream &>::type operator<<(basic_ostream & stream, const T & container) { diff --git a/src/aliceVision/sfm/BundleAdjustmentCeres.cpp b/src/aliceVision/sfm/BundleAdjustmentCeres.cpp index d3f6361081..fbddfa745d 100644 --- a/src/aliceVision/sfm/BundleAdjustmentCeres.cpp +++ b/src/aliceVision/sfm/BundleAdjustmentCeres.cpp @@ -7,17 +7,20 @@ #include #include +#include +#include #include #include #include - #include #include #include + + namespace fs = boost::filesystem; namespace aliceVision { @@ -80,10 +83,33 @@ ceres::CostFunction* createRigCostFunctionFromIntrinsics(const IntrinsicBase* in } } +/** + * @brief Create the appropriate cost functor according the provided input camera intrinsic model + * @param[in] intrinsicPtr The intrinsic pointer + * @param[in] observation The corresponding observation + * @return cost functor + */ +ceres::CostFunction* createConstraintsCostFunctionFromIntrinsics(const IntrinsicBase* intrinsicPtr, const Vec2& observation_first, const Vec2& observation_second) +{ + switch(intrinsicPtr->getType()) + { + case PINHOLE_CAMERA: + return new ceres::AutoDiffCostFunction(new ResidualErrorConstraintFunctor_Pinhole(observation_first.homogeneous(), observation_second.homogeneous())); + case PINHOLE_CAMERA_RADIAL1: + return new ceres::AutoDiffCostFunction(new ResidualErrorConstraintFunctor_PinholeRadialK1(observation_first.homogeneous(), observation_second.homogeneous())); + case PINHOLE_CAMERA_RADIAL3: + return new ceres::AutoDiffCostFunction(new ResidualErrorConstraintFunctor_PinholeRadialK3(observation_first.homogeneous(), observation_second.homogeneous())); + case PINHOLE_CAMERA_FISHEYE: + return new ceres::AutoDiffCostFunction(new ResidualErrorConstraintFunctor_PinholeFisheye(observation_first.homogeneous(), observation_second.homogeneous())); + default: + throw std::logic_error("Cannot create cost function, unrecognized intrinsic type in BA."); + } +} + void BundleAdjustmentCeres::CeresOptions::setDenseBA() { // default configuration use a DENSE representation - preconditionerType = ceres::JACOBI; + preconditionerType = ceres::JACOBI; linearSolverType = ceres::DENSE_SCHUR; sparseLinearAlgebraLibraryType = ceres::SUITE_SPARSE; // not used but just to avoid a warning in ceres ALICEVISION_LOG_DEBUG("BundleAdjustment[Ceres]: DENSE_SCHUR"); @@ -248,6 +274,8 @@ void BundleAdjustmentCeres::setSolverOptions(ceres::Solver::Options& solverOptio solverOptions.minimizer_progress_to_stdout = _ceresOptions.verbose; solverOptions.logging_type = ceres::SILENT; solverOptions.num_threads = _ceresOptions.nbThreads; + + #if CERES_VERSION_MAJOR < 2 solverOptions.num_linear_solver_threads = _ceresOptions.nbThreads; #endif @@ -579,6 +607,57 @@ void BundleAdjustmentCeres::addLandmarksToProblem(const sfmData::SfMData& sfmDat } } +void BundleAdjustmentCeres::addConstraints2DToProblem(const sfmData::SfMData& sfmData, ERefineOptions refineOptions, ceres::Problem& problem) +{ + // set a LossFunction to be less penalized by false measurements. + // note: set it to NULL if you don't want use a lossFunction. + ceres::LossFunction* lossFunction = new ceres::HuberLoss(Square(4.0)); // TODO: make the LOSS function and the parameter an option + + for (const auto & constraint : sfmData.getConstraints2D()) { + const sfmData::View& view_1 = sfmData.getView(constraint.ViewFirst); + const sfmData::View& view_2 = sfmData.getView(constraint.ViewSecond); + + assert(getPoseState(view_1.getPoseId()) != EParameterState::IGNORED); + assert(getIntrinsicState(view_1.getIntrinsicId()) != EParameterState::IGNORED); + assert(getPoseState(view_2.getPoseId()) != EParameterState::IGNORED); + assert(getIntrinsicState(view_2.getIntrinsicId()) != EParameterState::IGNORED); + + double * poseBlockPtr_1 = _posesBlocks.at(view_1.getPoseId()).data(); + double * poseBlockPtr_2 = _posesBlocks.at(view_2.getPoseId()).data(); + + double * intrinsicBlockPtr_1 = _intrinsicsBlocks.at(view_1.getIntrinsicId()).data(); + double * intrinsicBlockPtr_2 = _intrinsicsBlocks.at(view_2.getIntrinsicId()).data(); + + //For the moment assume a unique camera + assert(intrinsicBlockPtr_1 == intrinsicBlockPtr_2); + + ceres::CostFunction* costFunction = createConstraintsCostFunctionFromIntrinsics(sfmData.getIntrinsicPtr(view_1.getIntrinsicId()), constraint.ObservationFirst.x, constraint.ObservationSecond.x); + problem.AddResidualBlock(costFunction, lossFunction, intrinsicBlockPtr_1, poseBlockPtr_1, poseBlockPtr_2); + } +} + +void BundleAdjustmentCeres::addRotationPriorsToProblem(const sfmData::SfMData& sfmData, ERefineOptions refineOptions, ceres::Problem& problem) +{ + // set a LossFunction to be less penalized by false measurements. + // note: set it to NULL if you don't want use a lossFunction. + ceres::LossFunction* lossFunction = nullptr; + + for (const auto & prior : sfmData.getRotationPriors()) { + const sfmData::View& view_1 = sfmData.getView(prior.ViewFirst); + const sfmData::View& view_2 = sfmData.getView(prior.ViewSecond); + + assert(getPoseState(view_1.getPoseId()) != EParameterState::IGNORED); + assert(getPoseState(view_2.getPoseId()) != EParameterState::IGNORED); + + double * poseBlockPtr_1 = _posesBlocks.at(view_1.getPoseId()).data(); + double * poseBlockPtr_2 = _posesBlocks.at(view_2.getPoseId()).data(); + + + ceres::CostFunction* costFunction = new ceres::AutoDiffCostFunction(new ResidualErrorRotationPriorFunctor(prior._second_R_first)); + problem.AddResidualBlock(costFunction, lossFunction, poseBlockPtr_1, poseBlockPtr_2); + } +} + void BundleAdjustmentCeres::createProblem(const sfmData::SfMData& sfmData, ERefineOptions refineOptions, ceres::Problem& problem) @@ -598,6 +677,12 @@ void BundleAdjustmentCeres::createProblem(const sfmData::SfMData& sfmData, // add SfM landmarks to the Ceres problem addLandmarksToProblem(sfmData, refineOptions, problem); + + // add 2D constraints to the Ceres problem + addConstraints2DToProblem(sfmData, refineOptions, problem); + + // add rotation priors to the Ceres problem + addRotationPriorsToProblem(sfmData, refineOptions, problem); } void BundleAdjustmentCeres::updateFromSolution(sfmData::SfMData& sfmData, ERefineOptions refineOptions) const @@ -713,12 +798,12 @@ bool BundleAdjustmentCeres::adjust(sfmData::SfMData& sfmData, ERefineOptions ref setSolverOptions(options); // solve BA - ceres::Solver::Summary summary; + ceres::Solver::Summary summary; ceres::Solve(options, &problem, &summary); // print summary if(_ceresOptions.summary) - ALICEVISION_LOG_DEBUG(summary.FullReport()); + ALICEVISION_LOG_INFO(summary.FullReport()); // solution is not usable if(!summary.IsSolutionUsable()) diff --git a/src/aliceVision/sfm/BundleAdjustmentCeres.hpp b/src/aliceVision/sfm/BundleAdjustmentCeres.hpp index 0b63fa6208..1708425e20 100644 --- a/src/aliceVision/sfm/BundleAdjustmentCeres.hpp +++ b/src/aliceVision/sfm/BundleAdjustmentCeres.hpp @@ -206,6 +206,22 @@ class BundleAdjustmentCeres : public BundleAdjustment */ void addLandmarksToProblem(const sfmData::SfMData& sfmData, ERefineOptions refineOptions, ceres::Problem& problem); + /** + * @brief Create a residual block for each 2D constraints + * @param[in] sfmData The input SfMData contains all the information about the reconstruction, notably the intrinsics + * @param[in] refineOptions The chosen refine flag + * @param[out] problem The Ceres bundle adjustement problem + */ + void addConstraints2DToProblem(const sfmData::SfMData& sfmData, ERefineOptions refineOptions, ceres::Problem& problem); + + /** + * @brief Create a residual block for each rotation priors + * @param[in] sfmData The input SfMData contains all the information about the reconstruction, notably the intrinsics + * @param[in] refineOptions The chosen refine flag + * @param[out] problem The Ceres bundle adjustement problem + */ + void addRotationPriorsToProblem(const sfmData::SfMData& sfmData, ERefineOptions refineOptions, ceres::Problem& problem); + /** * @brief Create the Ceres bundle adjustement problem with: * - extrincics and intrinsics parameters blocks. @@ -280,6 +296,7 @@ class BundleAdjustmentCeres : public BundleAdjustment /// rig sub-poses blocks wrapper /// block: ceres angleAxis(3) + translation(3) HashMap>> _rigBlocks; + }; } // namespace sfm diff --git a/src/aliceVision/sfm/CMakeLists.txt b/src/aliceVision/sfm/CMakeLists.txt index 5edb444e49..c334811c40 100644 --- a/src/aliceVision/sfm/CMakeLists.txt +++ b/src/aliceVision/sfm/CMakeLists.txt @@ -14,6 +14,7 @@ set(sfm_files_headers pipeline/pairwiseMatchesIO.hpp pipeline/RelativePoseInfo.hpp pipeline/structureFromKnownPoses/StructureEstimationFromKnownPoses.hpp + pipeline/panorama/ReconstructionEngine_panorama.hpp pipeline/regionsIO.hpp utils/alignment.hpp utils/statistics.hpp @@ -41,6 +42,7 @@ set(sfm_files_sources pipeline/RigSequence.cpp pipeline/RelativePoseInfo.cpp pipeline/structureFromKnownPoses/StructureEstimationFromKnownPoses.cpp + pipeline/panorama/ReconstructionEngine_panorama.cpp pipeline/regionsIO.cpp utils/alignment.cpp utils/statistics.cpp diff --git a/src/aliceVision/sfm/ResidualErrorConstraintFunctor.hpp b/src/aliceVision/sfm/ResidualErrorConstraintFunctor.hpp new file mode 100644 index 0000000000..1aa4a0ee6e --- /dev/null +++ b/src/aliceVision/sfm/ResidualErrorConstraintFunctor.hpp @@ -0,0 +1,581 @@ +#pragma once + +#include +#include + +#include + + + +namespace aliceVision { +namespace sfm { + +template +double getJetValue(const T & val) { + return val.a; +} + +template <> +double getJetValue(const double & val) { + return val; +} + +/** + * @brief Ceres functor to use a pair of pinhole on a pure rotation 2D constraint. + * + * Data parameter blocks are the following <2,3,6,6> + * - 2 => dimension of the residuals, + * - 3 => the intrinsic data block for the first view [focal, principal point x, principal point y], + * - 3 => the camera extrinsic data block for the first view + * - 3 => the camera extrinsic data block for the second view + * + */ +struct ResidualErrorConstraintFunctor_Pinhole +{ + ResidualErrorConstraintFunctor_Pinhole(const Vec3 & pos_2dpoint_first, const Vec3 & pos_2dpoint_second) + : m_pos_2dpoint_first(pos_2dpoint_first), m_pos_2dpoint_second(pos_2dpoint_second) + { + } + + // Enum to map intrinsics parameters between aliceVision & ceres camera data parameter block. + enum { + OFFSET_FOCAL_LENGTH = 0, + OFFSET_PRINCIPAL_POINT_X = 1, + OFFSET_PRINCIPAL_POINT_Y = 2 + }; + + template + void lift(const T* const cam_K, const Vec3 pt, Eigen::Matrix< T, 3, 1> & out) const + { + const T& focal = cam_K[OFFSET_FOCAL_LENGTH]; + const T& principal_point_x = cam_K[OFFSET_PRINCIPAL_POINT_X]; + const T& principal_point_y = cam_K[OFFSET_PRINCIPAL_POINT_Y]; + + + out(0) = (pt(0) - principal_point_x) / focal; + out(1) = (pt(1) - principal_point_y) / focal; + out(2) = static_cast(1.0); + } + + template + void unlift(const T* const cam_K, const Eigen::Matrix< T, 3, 1> & pt, Eigen::Matrix< T, 3, 1> & out) const + { + const T& focal = cam_K[OFFSET_FOCAL_LENGTH]; + const T& principal_point_x = cam_K[OFFSET_PRINCIPAL_POINT_X]; + const T& principal_point_y = cam_K[OFFSET_PRINCIPAL_POINT_Y]; + + Eigen::Matrix< T, 3, 1> proj_pt = pt / pt(2); + + out(0) = proj_pt(0) * focal + principal_point_x; + out(1) = proj_pt(1) * focal + principal_point_y; + out(2) = static_cast(1.0); + } + + /** + * @param[in] cam_K: Camera intrinsics( focal, principal point [x,y] ) + * @param[in] cam_Rt: Camera parameterized using one block of 6 parameters [R;t]: + * - 3 for rotation(angle axis), 3 for translation + * @param[in] pos_3dpoint + * @param[out] out_residuals + */ + template + bool operator()( + const T* const cam_K, + const T* const cam_R1, + const T* const cam_R2, + T* out_residuals) const + { + Eigen::Matrix oneRo, twoRo, twoRone; + + Eigen::Matrix< T, 3, 1> pt3d_1; + + lift(cam_K, m_pos_2dpoint_first, pt3d_1); + + ceres::AngleAxisToRotationMatrix(cam_R1, oneRo.data()); + ceres::AngleAxisToRotationMatrix(cam_R2, twoRo.data()); + + twoRone = twoRo * oneRo.transpose(); + + Eigen::Matrix< T, 3, 1> pt3d_2_est = twoRone * pt3d_1; + + Eigen::Matrix< T, 3, 1> pt2d_2_est; + unlift(cam_K, pt3d_2_est, pt2d_2_est); + + Eigen::Matrix< T, 3, 1> residual = pt2d_2_est - m_pos_2dpoint_second; + + out_residuals[0] = residual(0); + out_residuals[1] = residual(1); + + return true; + } + + Vec3 m_pos_2dpoint_first; // The 2D observation in first view + Vec3 m_pos_2dpoint_second; // The 2D observation in second view +}; + +/** + * @brief Ceres functor to use a pair of pinhole on a pure rotation 2D constraint. + * + * Data parameter blocks are the following <2,4,6,6> + * - 2 => dimension of the residuals, + * - 4 => the intrinsic data block for the first view [focal, principal point x, principal point y], + * - 3 => the camera extrinsic data block for the first view + * - 3 => the camera extrinsic data block for the second view + * + */ +struct ResidualErrorConstraintFunctor_PinholeRadialK1 +{ + ResidualErrorConstraintFunctor_PinholeRadialK1(const Vec3 & pos_2dpoint_first, const Vec3 & pos_2dpoint_second) + : m_pos_2dpoint_first(pos_2dpoint_first), m_pos_2dpoint_second(pos_2dpoint_second) + { + } + + // Enum to map intrinsics parameters between aliceVision & ceres camera data parameter block. + enum { + OFFSET_FOCAL_LENGTH = 0, + OFFSET_PRINCIPAL_POINT_X = 1, + OFFSET_PRINCIPAL_POINT_Y = 2, + OFFSET_DISTO_K1 = 3 + }; + + static double distoFunctor(const std::vector & params, double r2) + { + const double k1 = params[0]; + return r2 * Square(1.+r2*k1); + } + + template + void lift(const T* const cam_K, const Vec3 pt, Eigen::Matrix< T, 3, 1> & out) const + { + const T& focal = cam_K[OFFSET_FOCAL_LENGTH]; + const T& principal_point_x = cam_K[OFFSET_PRINCIPAL_POINT_X]; + const T& principal_point_y = cam_K[OFFSET_PRINCIPAL_POINT_Y]; + const T& k1 = cam_K[OFFSET_DISTO_K1]; + + //Unshift then unscale back to meters + T xd = (pt(0) - principal_point_x) / focal; + T yd = (pt(1) - principal_point_y) / focal; + T distorted_radius = sqrt(xd*xd + yd*yd); + + /*A hack to obtain undistorted point even if using automatic diff*/ + double xd_real, yd_real, k1_real; + xd_real = getJetValue(xd); + yd_real = getJetValue(yd); + k1_real = getJetValue(k1); + + /*get rescaler*/ + std::vector distortionParams = {k1_real}; + double distorted_radius_square = xd_real * xd_real + yd_real * yd_real; + double rescaler = ::sqrt(camera::radial_distortion::bisection_Radius_Solve(distortionParams, distorted_radius_square, distoFunctor));; + + if (distorted_radius < 1e-12) { + out(0) = xd; + out(1) = yd; + out(2) = static_cast(1.0); + } + else { + out(0) = (xd / distorted_radius) * static_cast(rescaler); + out(1) = (yd / distorted_radius) * static_cast(rescaler); + out(2) = static_cast(1.0); + } + } + + template + void unlift(const T* const cam_K, const Eigen::Matrix< T, 3, 1> & pt, Eigen::Matrix< T, 3, 1> & out) const + { + const T& focal = cam_K[OFFSET_FOCAL_LENGTH]; + const T& principal_point_x = cam_K[OFFSET_PRINCIPAL_POINT_X]; + const T& principal_point_y = cam_K[OFFSET_PRINCIPAL_POINT_Y]; + const T& k1 = cam_K[OFFSET_DISTO_K1]; + + //Project on plane + Eigen::Matrix< T, 3, 1> proj_pt = pt / pt(2); + + //Apply distortion + const T r2 = proj_pt(0)*proj_pt(0) + proj_pt(1)*proj_pt(1); + const T r_coeff = (T(1) + k1*r2); + const T x_d = proj_pt(0) * r_coeff; + const T y_d = proj_pt(1) * r_coeff; + + //Scale and shift + out(0) = x_d * focal + principal_point_x; + out(1) = y_d * focal + principal_point_y; + out(2) = static_cast(1.0); + } + + /** + * @param[in] cam_K: Camera intrinsics( focal, principal point [x,y] ) + * @param[in] cam_Rt: Camera parameterized using one block of 6 parameters [R;t]: + * - 3 for rotation(angle axis), 3 for translation + * @param[in] pos_3dpoint + * @param[out] out_residuals + */ + template + bool operator()( + const T* const cam_K, + const T* const cam_R1, + const T* const cam_R2, + T* out_residuals) const + { + Eigen::Matrix oneRo, twoRo, twoRone; + + Eigen::Matrix pt3d_1; + + //From pixel to meters + lift(cam_K, m_pos_2dpoint_first, pt3d_1); + + //Build relative rotation + ceres::AngleAxisToRotationMatrix(cam_R1, oneRo.data()); + ceres::AngleAxisToRotationMatrix(cam_R2, twoRo.data()); + twoRone = twoRo * oneRo.transpose(); + + //Transform point + Eigen::Matrix< T, 3, 1> pt3d_2_est = twoRone * pt3d_1; + + //Project back to image space in pixels + Eigen::Matrix< T, 3, 1> pt2d_2_est; + unlift(cam_K, pt3d_2_est, pt2d_2_est); + + //Compute residual + Eigen::Matrix< T, 3, 1> residual = pt2d_2_est - m_pos_2dpoint_second; + + out_residuals[0] = residual(0); + out_residuals[1] = residual(1); + + return true; + } + + Vec3 m_pos_2dpoint_first; // The 2D observation in first view + Vec3 m_pos_2dpoint_second; // The 2D observation in second view +}; + +/** + * @brief Ceres functor to use a pair of pinhole on a pure rotation 2D constraint. + * + * Data parameter blocks are the following <2,6,6,6> + * - 2 => dimension of the residuals, + * - 4 => the intrinsic data block for the first view [focal, principal point x, principal point y, K1, K2, K3], + * - 3 => the camera extrinsic data block for the first view + * - 3 => the camera extrinsic data block for the second view + * + */ + + +struct ResidualErrorConstraintFunctor_PinholeRadialK3 +{ + ResidualErrorConstraintFunctor_PinholeRadialK3(const Vec3 & pos_2dpoint_first, const Vec3 & pos_2dpoint_second) + : m_pos_2dpoint_first(pos_2dpoint_first), m_pos_2dpoint_second(pos_2dpoint_second) + { + } + + // Enum to map intrinsics parameters between aliceVision & ceres camera data parameter block. + enum { + OFFSET_FOCAL_LENGTH = 0, + OFFSET_PRINCIPAL_POINT_X = 1, + OFFSET_PRINCIPAL_POINT_Y = 2, + OFFSET_DISTO_K1 = 3, + OFFSET_DISTO_K2 = 4, + OFFSET_DISTO_K3 = 5 + }; + + static double distoFunctor(const std::vector & params, double r2) + { + const double k1 = params[0]; + const double k2 = params[1]; + const double k3 = params[2]; + return r2 * Square(1.+r2*(k1+r2*(k2+r2*k3))); + } + + template + void lift(const T* const cam_K, const Vec3 pt, Eigen::Matrix< T, 3, 1> & out) const + { + const T& focal = cam_K[OFFSET_FOCAL_LENGTH]; + const T& principal_point_x = cam_K[OFFSET_PRINCIPAL_POINT_X]; + const T& principal_point_y = cam_K[OFFSET_PRINCIPAL_POINT_Y]; + const T& k1 = cam_K[OFFSET_DISTO_K1]; + const T& k2 = cam_K[OFFSET_DISTO_K2]; + const T& k3 = cam_K[OFFSET_DISTO_K3]; + + //Unshift then unscale back to meters + T xd = (pt(0) - principal_point_x) / focal; + T yd = (pt(1) - principal_point_y) / focal; + T distorted_radius = sqrt(xd*xd + yd*yd); + + /*A hack to obtain undistorted point even if using automatic diff*/ + double xd_real, yd_real, k1_real, k2_real, k3_real; + xd_real = getJetValue(xd); + yd_real = getJetValue(yd); + k1_real = getJetValue(k1); + k2_real = getJetValue(k2); + k3_real = getJetValue(k3); + + /*get rescaler*/ + std::vector distortionParams = {k1_real, k2_real, k3_real}; + double distorted_radius_square = xd_real * xd_real + yd_real * yd_real; + double rescaler = ::sqrt(camera::radial_distortion::bisection_Radius_Solve(distortionParams, distorted_radius_square, distoFunctor));; + + if (distorted_radius < 1e-12) { + out(0) = xd; + out(1) = yd; + out(2) = static_cast(1.0); + } + else { + out(0) = (xd / distorted_radius) * static_cast(rescaler); + out(1) = (yd / distorted_radius) * static_cast(rescaler); + out(2) = static_cast(1.0); + } + } + + template + void unlift(const T* const cam_K, const Eigen::Matrix< T, 3, 1> & pt, Eigen::Matrix< T, 3, 1> & out) const + { + const T& focal = cam_K[OFFSET_FOCAL_LENGTH]; + const T& principal_point_x = cam_K[OFFSET_PRINCIPAL_POINT_X]; + const T& principal_point_y = cam_K[OFFSET_PRINCIPAL_POINT_Y]; + const T& k1 = cam_K[OFFSET_DISTO_K1]; + const T& k2 = cam_K[OFFSET_DISTO_K2]; + const T& k3 = cam_K[OFFSET_DISTO_K3]; + + //Project on plane + Eigen::Matrix< T, 3, 1> proj_pt = pt / pt(2); + + //Apply distortion + const T r2 = proj_pt(0)*proj_pt(0) + proj_pt(1)*proj_pt(1); + const T r4 = r2 * r2; + const T r6 = r4 * r2; + const T r_coeff = (T(1) + k1*r2 + k2*r4 + k3*r6); + const T x_d = proj_pt(0) * r_coeff; + const T y_d = proj_pt(1) * r_coeff; + + //Scale and shift + out(0) = x_d * focal + principal_point_x; + out(1) = y_d * focal + principal_point_y; + out(2) = static_cast(1.0); + } + + /** + * @param[in] cam_K: Camera intrinsics( focal, principal point [x,y] ) + * @param[in] cam_Rt: Camera parameterized using one block of 6 parameters [R;t]: + * - 3 for rotation(angle axis), 3 for translation + * @param[in] pos_3dpoint + * @param[out] out_residuals + */ + template + bool operator()( + const T* const cam_K, + const T* const cam_R1, + const T* const cam_R2, + T* out_residuals) const + { + Eigen::Matrix oneRo, twoRo, twoRone; + + Eigen::Matrix pt3d_1; + + //From pixel to meters + lift(cam_K, m_pos_2dpoint_first, pt3d_1); + + //Build relative rotation + ceres::AngleAxisToRotationMatrix(cam_R1, oneRo.data()); + ceres::AngleAxisToRotationMatrix(cam_R2, twoRo.data()); + twoRone = twoRo * oneRo.transpose(); + + //Transform point + Eigen::Matrix< T, 3, 1> pt3d_2_est = twoRone * pt3d_1; + + //Project back to image space in pixels + Eigen::Matrix< T, 3, 1> pt2d_2_est; + unlift(cam_K, pt3d_2_est, pt2d_2_est); + + //Compute residual + Eigen::Matrix< T, 3, 1> residual = pt2d_2_est - m_pos_2dpoint_second; + + out_residuals[0] = residual(0); + out_residuals[1] = residual(1); + + return true; + } + + Vec3 m_pos_2dpoint_first; // The 2D observation in first view + Vec3 m_pos_2dpoint_second; // The 2D observation in second view +}; + + +/** + * @brief Ceres functor to use a pair of fisheye on a pure rotation 2D constraint. + * + * Data parameter blocks are the following <2,7,6,6> + * - 2 => dimension of the residuals, + * - 4 => the intrinsic data block for the first view , + * - 3 => the camera extrinsic data block for the first view + * - 3 => the camera extrinsic data block for the second view + * + */ + +struct ResidualErrorConstraintFunctor_PinholeFisheye +{ + ResidualErrorConstraintFunctor_PinholeFisheye(const Vec3 & pos_2dpoint_first, const Vec3 & pos_2dpoint_second) + : m_pos_2dpoint_first(pos_2dpoint_first), m_pos_2dpoint_second(pos_2dpoint_second) + { + } + + enum { + OFFSET_FOCAL_LENGTH = 0, + OFFSET_PRINCIPAL_POINT_X = 1, + OFFSET_PRINCIPAL_POINT_Y = 2, + OFFSET_DISTO_K1 = 3, + OFFSET_DISTO_K2 = 4, + OFFSET_DISTO_K3 = 5, + OFFSET_DISTO_K4 = 6, + }; + + template + void lift(const T* const cam_K, const Vec3 pt, Eigen::Matrix< T, 3, 1> & out) const + { + const T& focal = cam_K[OFFSET_FOCAL_LENGTH]; + const T& principal_point_x = cam_K[OFFSET_PRINCIPAL_POINT_X]; + const T& principal_point_y = cam_K[OFFSET_PRINCIPAL_POINT_Y]; + const T& k1 = cam_K[OFFSET_DISTO_K1]; + const T& k2 = cam_K[OFFSET_DISTO_K2]; + const T& k3 = cam_K[OFFSET_DISTO_K3]; + const T& k4 = cam_K[OFFSET_DISTO_K4]; + + //Unshift then unscale back to meters + T xd = (pt(0) - principal_point_x) / focal; + T yd = (pt(1) - principal_point_y) / focal; + T distorted_radius = sqrt(xd*xd + yd*yd); + + /*A hack to obtain undistorted point even if using automatic diff*/ + double xd_real, yd_real, k1_real, k2_real, k3_real, k4_real; + xd_real = getJetValue(xd); + yd_real = getJetValue(yd); + k1_real = getJetValue(k1); + k2_real = getJetValue(k2); + k3_real = getJetValue(k3); + k4_real = getJetValue(k4); + + double scale = 1.0; + { + const double eps = 1e-8; + const double theta_dist = sqrt(xd_real * xd_real + yd_real * yd_real); + if (theta_dist > eps) + { + double theta = theta_dist; + for (int j = 0; j < 10; ++j) + { + const double theta2 = theta*theta; + const double theta4 = theta2*theta2; + const double theta6 = theta4*theta2; + const double theta8 = theta6*theta2; + + double theta_fix = (theta * (1.0 + k1_real * theta2 + k2_real * theta4 + k3_real * theta6 + k4_real * theta8) - theta_dist) / (1.0 + 3.0 * k1_real * theta2 + 5.0 * k2_real * theta4 + 7.0 * k3_real * theta6 + 9.0 *k4_real * theta8); + + theta = theta - theta_fix; + } + + scale = std::tan(theta); + } + } + + if (distorted_radius < 1e-12) { + out(0) = xd; + out(1) = yd; + out(2) = static_cast(1.0); + } + else { + out(0) = (xd / distorted_radius) * static_cast(scale); + out(1) = (yd / distorted_radius) * static_cast(scale); + out(2) = static_cast(1.0); + } + } + + template + void unlift(const T* const cam_K, const Eigen::Matrix< T, 3, 1> & pt, Eigen::Matrix< T, 3, 1> & out) const + { + const T& focal = cam_K[OFFSET_FOCAL_LENGTH]; + const T& principal_point_x = cam_K[OFFSET_PRINCIPAL_POINT_X]; + const T& principal_point_y = cam_K[OFFSET_PRINCIPAL_POINT_Y]; + const T& k1 = cam_K[OFFSET_DISTO_K1]; + const T& k2 = cam_K[OFFSET_DISTO_K2]; + const T& k3 = cam_K[OFFSET_DISTO_K3]; + const T& k4 = cam_K[OFFSET_DISTO_K4]; + + //Project on plane + Eigen::Matrix< T, 3, 1> proj_pt = pt / pt(2); + + //Apply distortion + const T x_u = proj_pt(0); + const T y_u = proj_pt(1); + const T dist = sqrt(x_u*x_u + y_u*y_u); + const T theta = atan(dist); + const T theta2 = theta*theta; + const T theta3 = theta2*theta; + const T theta4 = theta2*theta2; + const T theta5 = theta4*theta; + const T theta6 = theta3*theta3; + const T theta7 = theta6*theta; + const T theta8 = theta4*theta4; + const T theta9 = theta8*theta; + + const T theta_dist = theta + k1*theta3 + k2*theta5 + k3*theta7 + k4*theta9; + + const T inv_r = dist > T(1e-8) ? T(1.0)/dist : T(1.0); + const T cdist = dist > T(1e-8) ? theta_dist * inv_r : T(1); + + const T x_d = x_u * cdist; + const T y_d = y_u * cdist; + + //Scale and shift + out(0) = x_d * focal + principal_point_x; + out(1) = y_d * focal + principal_point_y; + out(2) = static_cast(1.0); + } + + /** + * @param[in] cam_K: Camera intrinsics( focal, principal point [x,y] ) + * @param[in] cam_Rt: Camera parameterized using one block of 6 parameters [R;t]: + * - 3 for rotation(angle axis), 3 for translation + * @param[in] pos_3dpoint + * @param[out] out_residuals + */ + template + bool operator()( + const T* const cam_K, + const T* const cam_R1, + const T* const cam_R2, + T* out_residuals) const + { + Eigen::Matrix oneRo, twoRo, twoRone; + + Eigen::Matrix< T, 3, 1> pt3d_1; + + //From pixel to meters + lift(cam_K, m_pos_2dpoint_first, pt3d_1); + + //Build relative rotation + ceres::AngleAxisToRotationMatrix(cam_R1, oneRo.data()); + ceres::AngleAxisToRotationMatrix(cam_R2, twoRo.data()); + twoRone = twoRo * oneRo.transpose(); + + //Transform point + Eigen::Matrix< T, 3, 1> pt3d_2_est = twoRone * pt3d_1; + + //Project back to image space in pixels + Eigen::Matrix< T, 3, 1> pt2d_2_est; + unlift(cam_K, pt3d_2_est, pt2d_2_est); + + //Compute residual + Eigen::Matrix< T, 3, 1> residual = pt2d_2_est - m_pos_2dpoint_second; + + out_residuals[0] = residual(0); + out_residuals[1] = residual(1); + + return true; + } + + Vec3 m_pos_2dpoint_first; // The 2D observation in first view + Vec3 m_pos_2dpoint_second; // The 2D observation in second view +}; + + +} // namespace sfm +} // namespace aliceVision diff --git a/src/aliceVision/sfm/ResidualErrorRotationPriorFunctor.hpp b/src/aliceVision/sfm/ResidualErrorRotationPriorFunctor.hpp new file mode 100644 index 0000000000..d57aef4440 --- /dev/null +++ b/src/aliceVision/sfm/ResidualErrorRotationPriorFunctor.hpp @@ -0,0 +1,59 @@ +#pragma once + +#include +#include + + + +namespace aliceVision { +namespace sfm { + +/** + * @brief Ceres functor to use a pair of pinhole on a pure rotation 2D constraint. + * + * Data parameter blocks are the following <3,6,6> + * - 3 => dimension of the residuals, + * - 6 => the camera extrinsic data block for the first view + * - 6 => the camera extrinsic data block for the second view + */ +struct ResidualErrorRotationPriorFunctor +{ + explicit ResidualErrorRotationPriorFunctor(const Eigen::Matrix3d & two_R_one) + : m_two_R_one(two_R_one) + { + } + + /** + * @param[in] cam_Rt: Camera parameterized using one block of 6 parameters [R;t]: + * - 3 for rotation(angle axis), 3 for translation + * @param[in] pos_3dpoint + * @param[out] out_residuals + */ + template + bool operator()( + const T* const cam_R1, + const T* const cam_R2, + T* out_residuals) const + { + Eigen::Matrix oneRo, twoRo, twoRone, R_error; + + ceres::AngleAxisToRotationMatrix(cam_R1, oneRo.data()); + ceres::AngleAxisToRotationMatrix(cam_R2, twoRo.data()); + twoRone = twoRo * oneRo.transpose(); + R_error = twoRone * m_two_R_one.transpose(); + + ceres::RotationMatrixToAngleAxis(R_error.data(), out_residuals); + + out_residuals[0] *= 180.0 / M_PI; + out_residuals[1] *= 180.0 / M_PI; + out_residuals[2] *= 180.0 / M_PI; + + + return true; + } + + Eigen::Matrix3d m_two_R_one; +}; + +} // namespace sfm +} // namespace aliceVision diff --git a/src/aliceVision/sfm/pipeline/CMakeLists.txt b/src/aliceVision/sfm/pipeline/CMakeLists.txt index c16e3d9c97..32071ee9a5 100644 --- a/src/aliceVision/sfm/pipeline/CMakeLists.txt +++ b/src/aliceVision/sfm/pipeline/CMakeLists.txt @@ -1,3 +1,4 @@ add_subdirectory(sequential) add_subdirectory(global) +add_subdirectory(panorama) diff --git a/src/aliceVision/sfm/pipeline/RelativePoseInfo.cpp b/src/aliceVision/sfm/pipeline/RelativePoseInfo.cpp index b9c23f612b..12676625ad 100644 --- a/src/aliceVision/sfm/pipeline/RelativePoseInfo.cpp +++ b/src/aliceVision/sfm/pipeline/RelativePoseInfo.cpp @@ -102,8 +102,11 @@ bool robustRelativePose( max_iteration_count, &relativePose_info.essential_matrix, relativePose_info.initial_residual_tolerance); relativePose_info.found_residual_precision = acRansacOut.first; - if (relativePose_info.vec_inliers.size() < SolverType::MINIMUM_SAMPLES * ALICEVISION_MINIMUM_SAMPLES_COEF ) + if (relativePose_info.vec_inliers.size() < SolverType::MINIMUM_SAMPLES * ALICEVISION_MINIMUM_SAMPLES_COEF ) + { + ALICEVISION_LOG_INFO("robustRelativePose: no sufficient coverage (the model does not support enough samples): " << relativePose_info.vec_inliers.size()); return false; // no sufficient coverage (the model does not support enough samples) + } // estimation of the relative poses Mat3 R; @@ -111,7 +114,10 @@ bool robustRelativePose( if (!estimate_Rt_fromE( K1, K2, x1, x2, relativePose_info.essential_matrix, relativePose_info.vec_inliers, &R, &t)) + { + ALICEVISION_LOG_INFO("robustRelativePose: cannot find a valid [R|t] couple that makes the inliers in front of the camera."); return false; // cannot find a valid [R|t] couple that makes the inliers in front of the camera. + } // Store [R|C] for the second camera, since the first camera is [Id|0] relativePose_info.relativePose = geometry::Pose3(R, -R.transpose() * t); diff --git a/src/aliceVision/sfm/pipeline/global/GlobalSfMRotationAveragingSolver.cpp b/src/aliceVision/sfm/pipeline/global/GlobalSfMRotationAveragingSolver.cpp index f70f35bbd1..9f9f238ad0 100644 --- a/src/aliceVision/sfm/pipeline/global/GlobalSfMRotationAveragingSolver.cpp +++ b/src/aliceVision/sfm/pipeline/global/GlobalSfMRotationAveragingSolver.cpp @@ -27,12 +27,15 @@ bool GlobalSfMRotationAveragingSolver::Run( ERotationAveragingMethod eRotationAveragingMethod, ERelativeRotationInferenceMethod eRelativeRotationInferenceMethod, const RelativeRotations& relativeRot_In, + const double max_angular_error, HashMap& map_globalR ) const { RelativeRotations relativeRotations = relativeRot_In; // We work on a copy, since inference can remove some relative motions + ALICEVISION_LOG_DEBUG("GlobalSfMRotationAveragingSolver: A) relativeRotations.size(): " << relativeRotations.size()); + //-> Test there is only one graph and at least 3 camera? switch(eRelativeRotationInferenceMethod) { @@ -42,9 +45,13 @@ bool GlobalSfMRotationAveragingSolver::Run( // Triplet inference (test over the composition error) //------------------- PairSet pairs = getPairs(relativeRotations); + ALICEVISION_LOG_DEBUG("GlobalSfMRotationAveragingSolver: pairs.size(): " << pairs.size()); + std::vector< graph::Triplet > vec_triplets = graph::tripletListing(pairs); - //-- Rejection triplet that are 'not' identity rotation (error to identity > 5°) - TripletRotationRejection(5.0f, vec_triplets, relativeRotations); + ALICEVISION_LOG_DEBUG("GlobalSfMRotationAveragingSolver: vec_triplets.size(): " << vec_triplets.size()); + + //-- Rejection triplet that are 'not' identity rotation (error to identity > max_angular_error) + TripletRotationRejection(max_angular_error, vec_triplets, relativeRotations); pairs = getPairs(relativeRotations); const std::set set_remainingIds = graph::CleanGraph_KeepLargestBiEdge_Nodes(pairs); @@ -75,6 +82,9 @@ bool GlobalSfMRotationAveragingSolver::Run( //- B. solve global rotation computation bool bSuccess = false; std::vector vec_globalR(_reindexForward.size()); + + ALICEVISION_LOG_DEBUG("GlobalSfMRotationAveragingSolver: B) vec_globalR.size(): " << vec_globalR.size()); + switch(eRotationAveragingMethod) { case ROTATION_AVERAGING_L2: @@ -84,11 +94,16 @@ bool GlobalSfMRotationAveragingSolver::Run( _reindexForward.size(), relativeRotations, vec_globalR); + + ALICEVISION_LOG_DEBUG("rotationAveraging::l2::L2RotationAveraging: success: " << bSuccess); //- Non linear refinement of the global rotations if (bSuccess) + { bSuccess = rotationAveraging::l2::L2RotationAveraging_Refine( relativeRotations, vec_globalR); + ALICEVISION_LOG_DEBUG("rotationAveraging::l2::L2RotationAveraging_Refine: success: " << bSuccess); + } // save kept pairs (restore original pose indices using the backward reindexing) for(RelativeRotations::iterator iter = relativeRotations.begin(); iter != relativeRotations.end(); ++iter) @@ -110,6 +125,7 @@ bool GlobalSfMRotationAveragingSolver::Run( bSuccess = rotationAveraging::l1::GlobalRotationsRobust( relativeRotations, vec_globalR, nMainViewID, 0.0f, &vec_inliers); + ALICEVISION_LOG_DEBUG("rotationAveraging::l1::GlobalRotationsRobust: success: " << bSuccess); ALICEVISION_LOG_DEBUG("inliers: " << vec_inliers); // save kept pairs (restore original pose indices using the backward reindexing) @@ -170,6 +186,8 @@ void GlobalSfMRotationAveragingSolver::TripletRotationRejection( const graph::Triplet & triplet = vec_triplets[i]; const IndexT I = triplet.i, J = triplet.j , K = triplet.k; + ALICEVISION_LOG_DEBUG("GlobalSfMRotationAveragingSolver::TripletRotationRejection: i: " << i << ", (" << I << ", " << J << ", " << K << ")."); + //-- Find the three relative rotations const Pair ij(I,J), ji(J,I); const Mat3 RIJ = (map_relatives.count(ij)) ? @@ -206,6 +224,10 @@ void GlobalSfMRotationAveragingSolver::TripletRotationRejection( else map_relatives_validated[ik] = map_relatives.at(ik); } + else + { + ALICEVISION_LOG_DEBUG("GlobalSfMRotationAveragingSolver::TripletRotationRejection: i: " << i << ", angularErrorDegree: " << angularErrorDegree << ", max_angular_error: " << max_angular_error); + } } map_relatives = std::move(map_relatives_validated); diff --git a/src/aliceVision/sfm/pipeline/global/GlobalSfMRotationAveragingSolver.hpp b/src/aliceVision/sfm/pipeline/global/GlobalSfMRotationAveragingSolver.hpp index 16e7e2d8d4..cfa034e419 100644 --- a/src/aliceVision/sfm/pipeline/global/GlobalSfMRotationAveragingSolver.hpp +++ b/src/aliceVision/sfm/pipeline/global/GlobalSfMRotationAveragingSolver.hpp @@ -7,6 +7,10 @@ #pragma once +#include +#include +#include + namespace aliceVision { namespace sfm { @@ -22,6 +26,39 @@ enum ERelativeRotationInferenceMethod TRIPLET_ROTATION_INFERENCE_COMPOSITION_ERROR = 1 }; +inline std::string ERotationAveragingMethod_enumToString(ERotationAveragingMethod eRotationAveragingMethod) +{ + switch(eRotationAveragingMethod) + { + case ERotationAveragingMethod::ROTATION_AVERAGING_L1: + return "L1_minimization"; + case ERotationAveragingMethod::ROTATION_AVERAGING_L2: + return "L2_minimization"; + } + throw std::out_of_range("Invalid rotation averaging method type"); +} + +inline ERotationAveragingMethod ERotationAveragingMethod_stringToEnum(const std::string& RotationAveragingMethodName) +{ + if(RotationAveragingMethodName == "L1_minimization") return ERotationAveragingMethod::ROTATION_AVERAGING_L1; + if(RotationAveragingMethodName == "L2_minimization") return ERotationAveragingMethod::ROTATION_AVERAGING_L2; + + throw std::out_of_range("Invalid rotation averaging method name : '" + RotationAveragingMethodName + "'"); +} + +inline std::ostream& operator<<(std::ostream& os, ERotationAveragingMethod e) +{ + return os << ERotationAveragingMethod_enumToString(e); +} + +inline std::istream& operator>>(std::istream& in, ERotationAveragingMethod& rotationType) +{ + std::string token; + in >> token; + rotationType = ERotationAveragingMethod_stringToEnum(token); + return in; +} + } // namespace sfm } // namespace aliceVision @@ -42,6 +79,7 @@ class GlobalSfMRotationAveragingSolver bool Run(ERotationAveragingMethod eRotationAveragingMethod, ERelativeRotationInferenceMethod eRelativeRotationInferenceMethod, const rotationAveraging::RelativeRotations& relativeRot_In, + const double max_angular_error, HashMap& map_globalR) const; /** diff --git a/src/aliceVision/sfm/pipeline/global/GlobalSfMTranslationAveragingSolver.hpp b/src/aliceVision/sfm/pipeline/global/GlobalSfMTranslationAveragingSolver.hpp index 77026e4153..411bc498e3 100644 --- a/src/aliceVision/sfm/pipeline/global/GlobalSfMTranslationAveragingSolver.hpp +++ b/src/aliceVision/sfm/pipeline/global/GlobalSfMTranslationAveragingSolver.hpp @@ -24,6 +24,42 @@ enum ETranslationAveragingMethod TRANSLATION_AVERAGING_SOFTL1 = 3 }; +inline std::string ETranslationAveragingMethod_enumToString(ETranslationAveragingMethod eTranslationAveragingMethod) +{ + switch(eTranslationAveragingMethod) + { + case ETranslationAveragingMethod::TRANSLATION_AVERAGING_L1: + return "L1_minimization"; + case ETranslationAveragingMethod::TRANSLATION_AVERAGING_L2_DISTANCE_CHORDAL: + return "L2_minimization"; + case ETranslationAveragingMethod::TRANSLATION_AVERAGING_SOFTL1: + return "L1_soft_minimization"; + } + throw std::out_of_range("Invalid translation averaging method type"); +} + +inline ETranslationAveragingMethod ETranslationAveragingMethod_stringToEnum(const std::string& TranslationAveragingMethodName) +{ + if(TranslationAveragingMethodName == "L1_minimization") return ETranslationAveragingMethod::TRANSLATION_AVERAGING_L1; + if(TranslationAveragingMethodName == "L2_minimization") return ETranslationAveragingMethod::TRANSLATION_AVERAGING_L2_DISTANCE_CHORDAL; + if(TranslationAveragingMethodName == "L1_soft_minimization") return ETranslationAveragingMethod::TRANSLATION_AVERAGING_SOFTL1; + + throw std::out_of_range("Invalid translation averaging method name : '" + TranslationAveragingMethodName + "'"); +} + +inline std::ostream& operator<<(std::ostream& os, ETranslationAveragingMethod e) +{ + return os << ETranslationAveragingMethod_enumToString(e); +} + +inline std::istream& operator>>(std::istream& in, ETranslationAveragingMethod& translationType) +{ + std::string token; + in >> token; + translationType = ETranslationAveragingMethod_stringToEnum(token); + return in; +} + class GlobalSfMTranslationAveragingSolver { translationAveraging::RelativeInfoVec m_vec_initialRijTijEstimates; diff --git a/src/aliceVision/sfm/pipeline/global/ReconstructionEngine_globalSfM.cpp b/src/aliceVision/sfm/pipeline/global/ReconstructionEngine_globalSfM.cpp index 6657fbfb61..099698f053 100644 --- a/src/aliceVision/sfm/pipeline/global/ReconstructionEngine_globalSfM.cpp +++ b/src/aliceVision/sfm/pipeline/global/ReconstructionEngine_globalSfM.cpp @@ -197,7 +197,8 @@ bool ReconstructionEngine_globalSfM::Compute_Global_Rotations(const rotationAver const ERelativeRotationInferenceMethod eRelativeRotationInferenceMethod = TRIPLET_ROTATION_INFERENCE_COMPOSITION_ERROR; //TRIPLET_ROTATION_INFERENCE_NONE; GlobalSfMRotationAveragingSolver rotationAveraging_solver; - const bool b_rotationAveraging = rotationAveraging_solver.Run(_eRotationAveragingMethod, eRelativeRotationInferenceMethod, relatives_R, global_rotations); + //-- Rejection triplet that are 'not' identity rotation (error to identity > 5°) + const bool b_rotationAveraging = rotationAveraging_solver.Run(_eRotationAveragingMethod, eRelativeRotationInferenceMethod, relatives_R, 5.0, global_rotations); ALICEVISION_LOG_DEBUG("Found #global_rotations: " << global_rotations.size()); diff --git a/src/aliceVision/sfm/pipeline/panorama/CMakeLists.txt b/src/aliceVision/sfm/pipeline/panorama/CMakeLists.txt new file mode 100644 index 0000000000..1f876755f4 --- /dev/null +++ b/src/aliceVision/sfm/pipeline/panorama/CMakeLists.txt @@ -0,0 +1,6 @@ +alicevision_add_test(panoramaSfM_test.cpp + NAME "sfm_panorama" + LINKS aliceVision_sfm + aliceVision_feature + aliceVision_system +) diff --git a/src/aliceVision/sfm/pipeline/panorama/ReconstructionEngine_panorama.cpp b/src/aliceVision/sfm/pipeline/panorama/ReconstructionEngine_panorama.cpp new file mode 100644 index 0000000000..3a4d195ae9 --- /dev/null +++ b/src/aliceVision/sfm/pipeline/panorama/ReconstructionEngine_panorama.cpp @@ -0,0 +1,753 @@ +// This file is part of the AliceVision project. +// Copyright (c) 2016 AliceVision contributors. +// Copyright (c) 2012 openMVG contributors. +// This Source Code Form is subject to the terms of the Mozilla Public License, +// v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +#include "ReconstructionEngine_panorama.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include +#include +#include +#include +#include +#include +#include + + +#include + +#include + +#ifdef _MSC_VER +#pragma warning( once : 4267 ) //warning C4267: 'argument' : conversion from 'size_t' to 'const int', possible loss of data +#endif + +namespace aliceVision { +namespace sfm { + +using namespace aliceVision::camera; +using namespace aliceVision::geometry; +using namespace aliceVision::feature; +using namespace aliceVision::sfmData; + + + +using namespace aliceVision::robustEstimation; + +bool robustRelativeRotation_fromE( + const Mat3 & K1, const Mat3 & K2, + const Mat & x1, const Mat & x2, + const std::pair & size_ima1, + const std::pair & size_ima2, + RelativePoseInfo & relativePose_info, + const size_t max_iteration_count) +{ + // Use the 5 point solver to estimate E + typedef essential::kernel::FivePointKernel SolverType; + // Define the AContrario adaptor + typedef ACKernelAdaptorEssential< + SolverType, + aliceVision::fundamental::kernel::EpipolarDistanceError, + UnnormalizerT, + Mat3> + KernelType; + + KernelType kernel(x1, size_ima1.first, size_ima1.second, + x2, size_ima2.first, size_ima2.second, K1, K2); + + // Robustly estimation of the Essential matrix and its precision + const std::pair acRansacOut = ACRANSAC(kernel, relativePose_info.vec_inliers, + max_iteration_count, &relativePose_info.essential_matrix, relativePose_info.initial_residual_tolerance); + relativePose_info.found_residual_precision = acRansacOut.first; + + if (relativePose_info.vec_inliers.size() < SolverType::MINIMUM_SAMPLES * ALICEVISION_MINIMUM_SAMPLES_COEF ) + { + ALICEVISION_LOG_INFO("robustRelativePose: no sufficient coverage (the model does not support enough samples): " << relativePose_info.vec_inliers.size()); + return false; // no sufficient coverage (the model does not support enough samples) + } + + // estimation of the relative poses + Mat3 R; + Vec3 t; + if (!estimate_Rt_fromE( + K1, K2, x1, x2, + relativePose_info.essential_matrix, relativePose_info.vec_inliers, &R, &t)) + { + ALICEVISION_LOG_INFO("robustRelativePose: cannot find a valid [R|t] couple that makes the inliers in front of the camera."); + return false; // cannot find a valid [R|t] couple that makes the inliers in front of the camera. + } + t = Vec3(0.0, 0.0, 0.0); + + // Store [R|C] for the second camera, since the first camera is [Id|0] + relativePose_info.relativePose = geometry::Pose3(R, -R.transpose() * t); + return true; +} + + +/** + * @brief Decompose a homography given known calibration matrices, assuming a pure rotation between the two views. + * It is supposed that \f$ x_2 \sim H x_1 \f$ with \f$ H = K_2 * R * K_1^{-1} \f$ + * @param[in] homography 3x3 homography matrix H. + * @param[in] K1 3x3 calibration matrix of the first view. + * @param[in] K2 3x3 calibration matrix of the second view. + * @return The 3x3 rotation matrix corresponding to the pure rotation between the views. + */ +aliceVision::Mat3 decomposePureRotationHomography(const Mat3 &homography, const Mat3 &K1, + const Mat3 &K2) +{ + // G is the "calibrated" homography inv(K2) * H * K1 + const auto G = K2.inverse() * homography * K1; + // compute the scale factor lambda that makes det(lambda*G) = 1 + const auto lambda = std::pow(1 / G.determinant(), 1 / 3); + const auto rotation = lambda * G; + + //@fixme find possible bad cases? + + // compute and return the closest rotation matrix + Eigen::JacobiSVD usv(rotation, Eigen::ComputeFullU | Eigen::ComputeFullV); + const auto &u = usv.matrixU(); + const auto vt = usv.matrixV().transpose(); + return u * vt; +} + +/** + * @brief Estimate the homography between two views using corresponding points such that \f$ x_2 \sim H x_1 \f$ + * @param[in] x1 The points on the first image. + * @param[in] x2 The corresponding points on the second image. + * @param[in] imgSize1 The size of the first image. + * @param[in] imgSize2 The size of the second image. + * @param[out] H The estimated homography. + * @param[out] vec_inliers The inliers satisfying the homography as a list of indices. + * @return the status of the estimation. + */ +aliceVision::EstimationStatus robustHomographyEstimationAC(const Mat2X &x1, + const Mat2X &x2, + const std::pair &imgSize1, + const std::pair &imgSize2, + Mat3 &H, + std::vector &vec_inliers) +{ + using KernelType = robustEstimation::ACKernelAdaptor< + homography::kernel::FourPointSolver, + homography::kernel::AsymmetricError, + UnnormalizerI, + Mat3>; + + KernelType kernel(x1, imgSize1.first, imgSize1.second, + x2, imgSize2.first, imgSize2.second, + false); // configure as point to point error model. + + + const std::pair ACRansacOut = robustEstimation::ACRANSAC(kernel, vec_inliers, + 1024, + &H, + std::numeric_limits::infinity()); + + const bool valid{!vec_inliers.empty()}; + //@fixme + const bool hasStrongSupport{vec_inliers.size() > KernelType::MINIMUM_SAMPLES * 2.5}; + + return {valid, hasStrongSupport}; +} + +bool robustRelativeRotation_fromH(const Mat3 &K1, + const Mat3 &K2, + const Mat2X &x1, + const Mat2X &x2, + const std::pair &imgSize1, + const std::pair &imgSize2, + RelativeRotationInfo &relativeRotationInfo, + const size_t max_iteration_count) +{ + std::vector vec_inliers{}; + + // estimate the homography + const auto status = robustHomographyEstimationAC(x1, x2, imgSize1, imgSize2, relativeRotationInfo._homography, + relativeRotationInfo._inliers); + + if (!status.isValid && !status.hasStrongSupport) + { + return false; + } + + relativeRotationInfo._relativeRotation = decomposePureRotationHomography(relativeRotationInfo._homography, K1, K2); + //ALICEVISION_LOG_INFO("Found homography H:\n" << relativeRotationInfo._homography); + //ALICEVISION_LOG_INFO("Homography H decomposes to rotation R:\n" << relativeRotationInfo._relativeRotation); + + return true; +} + + + + +ReconstructionEngine_panorama::ReconstructionEngine_panorama(const SfMData& sfmData, + const std::string& outDirectory, + const std::string& loggingFile) + : ReconstructionEngine(sfmData, outDirectory) + , _loggingFile(loggingFile) + , _normalizedFeaturesPerView(nullptr) +{ + if(!_loggingFile.empty()) + { + // setup HTML logger + _htmlDocStream = std::make_shared("GlobalReconstructionEngine SFM report."); + _htmlDocStream->pushInfo(htmlDocument::htmlMarkup("h1", std::string("ReconstructionEngine_globalSfM"))); + _htmlDocStream->pushInfo("
"); + _htmlDocStream->pushInfo( "Dataset info:"); + _htmlDocStream->pushInfo( "Views count: " + htmlDocument::toString( sfmData.getViews().size()) + "
"); + } + + // Set default motion Averaging methods + _eRotationAveragingMethod = ROTATION_AVERAGING_L2; + + // Set default relative rotation methods + _eRelativeRotationMethod = RELATIVE_ROTATION_FROM_E; +} + +ReconstructionEngine_panorama::~ReconstructionEngine_panorama() +{ + if(!_loggingFile.empty()) + { + // Save the reconstruction Log + std::ofstream htmlFileStream(_loggingFile.c_str()); + htmlFileStream << _htmlDocStream->getDoc(); + } +} + +void ReconstructionEngine_panorama::SetFeaturesProvider(feature::FeaturesPerView* featuresPerView) +{ + _featuresPerView = featuresPerView; + + // Copy features and save a normalized version + _normalizedFeaturesPerView = std::make_shared(*featuresPerView); + #pragma omp parallel + for(MapFeaturesPerView::iterator iter = _normalizedFeaturesPerView->getData().begin(); + iter != _normalizedFeaturesPerView->getData().end(); ++iter) + { + #pragma omp single nowait + { + // get the related view & camera intrinsic and compute the corresponding bearing vectors + const View * view = _sfmData.getViews().at(iter->first).get(); + if(_sfmData.getIntrinsics().count(view->getIntrinsicId())) + { + const std::shared_ptr cam = _sfmData.getIntrinsics().find(view->getIntrinsicId())->second; + for(auto& iterFeatPerDesc: iter->second) + { + for (PointFeatures::iterator iterPt = iterFeatPerDesc.second.begin(); + iterPt != iterFeatPerDesc.second.end(); ++iterPt) + { + const Vec3 bearingVector = (*cam)(cam->get_ud_pixel(iterPt->coords().cast())); + iterPt->coords() << (bearingVector.head(2) / bearingVector(2)).cast(); + } + } + } + } + } +} + +void ReconstructionEngine_panorama::SetMatchesProvider(matching::PairwiseMatches* provider) +{ + _pairwiseMatches = provider; +} + +void ReconstructionEngine_panorama::SetRotationAveragingMethod(ERotationAveragingMethod eRotationAveragingMethod) +{ + _eRotationAveragingMethod = eRotationAveragingMethod; +} + +void ReconstructionEngine_panorama::SetRelativeRotationMethod(ERelativeRotationMethod eRelativeRotationMethod) +{ + _eRelativeRotationMethod = eRelativeRotationMethod; +} + +bool ReconstructionEngine_panorama::process() +{ + // keep only the largest biedge connected subgraph + { + const PairSet pairs = matching::getImagePairs(*_pairwiseMatches); + const std::set set_remainingIds = graph::CleanGraph_KeepLargestBiEdge_Nodes(pairs, _outputFolder); + if(set_remainingIds.empty()) + { + ALICEVISION_LOG_DEBUG("Invalid input image graph for panorama"); + return false; + } + KeepOnlyReferencedElement(set_remainingIds, *_pairwiseMatches); + } + + aliceVision::rotationAveraging::RelativeRotations relatives_R; + Compute_Relative_Rotations(relatives_R); + + HashMap global_rotations; + if(!Compute_Global_Rotations(relatives_R, global_rotations)) + { + ALICEVISION_LOG_WARNING("Panorama:: Rotation Averaging failure!"); + return false; + } + + // we set translation vector to zero + for(const auto& gR: global_rotations) + { + const Vec3 t(0.0, 0.0, 0.0); + const IndexT poseId = gR.first; + const Mat3 & Ri = gR.second; + _sfmData.setAbsolutePose(poseId, CameraPose(Pose3(Ri, t))); + } + + //-- Export statistics about the SfM process + if (!_loggingFile.empty()) + { + using namespace htmlDocument; + std::ostringstream os; + os << "Structure from Motion statistics."; + _htmlDocStream->pushInfo("
"); + _htmlDocStream->pushInfo(htmlMarkup("h1",os.str())); + + os.str(""); + os << "-------------------------------" << "
" + << "-- View count: " << _sfmData.getViews().size() << "
" + << "-- Intrinsic count: " << _sfmData.getIntrinsics().size() << "
" + << "-- Pose count: " << _sfmData.getPoses().size() << "
" + << "-- Track count: " << _sfmData.getLandmarks().size() << "
" + << "-------------------------------" << "
"; + _htmlDocStream->pushInfo(os.str()); + } + + return true; +} + +/// Compute from relative rotations the global rotations of the camera poses +bool ReconstructionEngine_panorama::Compute_Global_Rotations(const rotationAveraging::RelativeRotations& relatives_R, HashMap& global_rotations) +{ + if(relatives_R.empty()) { + return false; + } + + rotationAveraging::RelativeRotations local_relatives_R = relatives_R; + + + std::set set_pose_ids; + for (const auto & relative_R : local_relatives_R) + { + set_pose_ids.insert(relative_R.i); + set_pose_ids.insert(relative_R.j); + } + + ALICEVISION_LOG_INFO("Global rotations computation: " << "\n" + "\t- relative rotations: " << relatives_R.size() << "\n" + "\t- global rotations: " << set_pose_ids.size()); + + /* + If a view with a pose prior is not found in the relative rotation, + make sure we add a fake link to adjust globally everything. + */ + sfmData::Poses & poses = _sfmData.getPoses(); + if (poses.size() > 0) { + + IndexT firstViewId = *set_pose_ids.begin(); + IndexT firstPoseId = _sfmData.getView(firstViewId).getPoseId(); + + Eigen::Matrix3d i1Ro = poses[firstPoseId].getTransform().rotation(); + + for (auto & currentPose : _sfmData.getPoses()) { + + IndexT poseId = currentPose.first; + if (set_pose_ids.find(poseId) == set_pose_ids.end()) { + + set_pose_ids.insert(poseId); + + /*Add a fake relative pose between this pose and the first found pose*/ + Eigen::Matrix3d iRo = currentPose.second.getTransform().rotation(); + Eigen::Matrix3d iR1 = iRo * i1Ro.transpose(); + local_relatives_R.emplace_back(firstPoseId, currentPose.first, iR1, 1.0); + } + } + } + + // Global Rotation solver: + const ERelativeRotationInferenceMethod eRelativeRotationInferenceMethod = TRIPLET_ROTATION_INFERENCE_NONE; // TRIPLET_ROTATION_INFERENCE_COMPOSITION_ERROR; + + GlobalSfMRotationAveragingSolver rotationAveraging_solver; + //-- Rejection triplet that are 'not' identity rotation (error to identity > 50°) + const bool b_rotationAveraging = rotationAveraging_solver.Run(_eRotationAveragingMethod, eRelativeRotationInferenceMethod, local_relatives_R, 100.0, global_rotations); + + ALICEVISION_LOG_DEBUG("Found #global_rotations: " << global_rotations.size()); + + if(b_rotationAveraging) + { + // Log input graph to the HTML report + if(!_loggingFile.empty() && !_outputFolder.empty()) + { + // Log a relative pose graph + { + std::set set_pose_ids; + PairSet relative_pose_pairs; + for(const auto & view : _sfmData.getViews()) + { + const IndexT pose_id = view.second->getPoseId(); + set_pose_ids.insert(pose_id); + } + const std::string sGraph_name = "global_relative_rotation_pose_graph_final"; + graph::indexedGraph putativeGraph(set_pose_ids, rotationAveraging_solver.GetUsedPairs()); + graph::exportToGraphvizData((fs::path(_outputFolder) / (sGraph_name + ".dot")).string(), putativeGraph.g); + + /* + using namespace htmlDocument; + std::ostringstream os; + os << "
" << sGraph_name << "
" + << "\n"; + _htmlDocStream->pushInfo(os.str()); + */ + } + } + } + + return b_rotationAveraging; +} + +// Adjust the scene (& remove outliers) +bool ReconstructionEngine_panorama::Adjust() +{ + BundleAdjustmentCeres::CeresOptions options; + options.useParametersOrdering = false; + options.summary = true; + + /*Minimize only rotation first*/ + BundleAdjustmentCeres BA(options); + bool success = BA.adjust(_sfmData, BundleAdjustment::REFINE_ROTATION); + if(success) + { + ALICEVISION_LOG_INFO("Rotations successfully refined."); + } + else + { + ALICEVISION_LOG_INFO("Failed to refine the rotations only."); + } + + /*Minimize All then*/ + success = BA.adjust(_sfmData, BundleAdjustment::REFINE_ROTATION | BundleAdjustment::REFINE_INTRINSICS_ALL); + if(success) + { + ALICEVISION_LOG_INFO("Bundle successfully refined."); + } + else + { + ALICEVISION_LOG_INFO("Failed to refine Everything."); + } + + return success; +} + +void ReconstructionEngine_panorama::Compute_Relative_Rotations(rotationAveraging::RelativeRotations& vec_relatives_R) +{ + // + // Build the Relative pose graph from matches: + // + /// pairwise view relation between poseIds + typedef std::map PoseWiseMatches; + + sfmData::RotationPriors & rotationpriors = _sfmData.getRotationPriors(); + for (auto & iter_v1 :_sfmData.getViews()) { + + if (!_sfmData.isPoseAndIntrinsicDefined(iter_v1.first)) { + continue; + } + + for (auto & iter_v2 :_sfmData.getViews()) { + if (iter_v1.first == iter_v2.first) { + continue; + } + + if (!_sfmData.isPoseAndIntrinsicDefined(iter_v2.first)) { + continue; + } + + IndexT pid1 = iter_v1.second->getPoseId(); + IndexT pid2 = iter_v2.second->getPoseId(); + + CameraPose oneTo = _sfmData.getAbsolutePose(iter_v1.second->getPoseId()); + CameraPose twoTo = _sfmData.getAbsolutePose(iter_v2.second->getPoseId()); + Eigen::Matrix3d oneRo = oneTo.getTransform().rotation(); + Eigen::Matrix3d twoRo = twoTo.getTransform().rotation(); + Eigen::Matrix3d twoRone = twoRo * oneRo.transpose(); + + sfmData::RotationPrior prior(iter_v1.first, iter_v2.first, twoRone); + rotationpriors.push_back(prior); + } + } + + // List shared correspondences (pairs) between poses + PoseWiseMatches poseWiseMatches; + for (matching::PairwiseMatches::const_iterator iterMatches = _pairwiseMatches->begin(); iterMatches != _pairwiseMatches->end(); ++iterMatches) + { + const Pair pair = iterMatches->first; + const View* v1 = _sfmData.getViews().at(pair.first).get(); + const View* v2 = _sfmData.getViews().at(pair.second).get(); + poseWiseMatches[Pair(v1->getPoseId(), v2->getPoseId())].insert(pair); + } + + sfm::Constraints2D & constraints2d = _sfmData.getConstraints2D(); + std::map connection_size; + + ALICEVISION_LOG_INFO("Relative pose computation:"); + /*For each pair of views, compute the relative pose*/ + for (int i = 0; i < poseWiseMatches.size(); ++i) + { + { + PoseWiseMatches::const_iterator iter (poseWiseMatches.begin()); + std::advance(iter, i); + const auto& relative_pose_iterator(*iter); + const Pair relative_pose_pair = relative_pose_iterator.first; + const PairSet& match_pairs = relative_pose_iterator.second; + + // If a pair has the same ID, discard it + if (relative_pose_pair.first == relative_pose_pair.second) + { + continue; + } + + // Select common bearing vectors + if (match_pairs.size() > 1) + { + ALICEVISION_LOG_WARNING("Compute relative pose between more than two view is not supported"); + continue; + } + + const Pair pairIterator = *(match_pairs.begin()); + const IndexT I = pairIterator.first; + const IndexT J = pairIterator.second; + const View* view_I = _sfmData.views[I].get(); + const View* view_J = _sfmData.views[J].get(); + + //Check that valid cameras are existing for the pair of view + if (_sfmData.getIntrinsics().count(view_I->getIntrinsicId()) == 0 || _sfmData.getIntrinsics().count(view_J->getIntrinsicId()) == 0) { + continue; + } + + + /* Build a list of pairs in meters*/ + const matching::MatchesPerDescType & matchesPerDesc = _pairwiseMatches->at(pairIterator); + const std::size_t nbBearing = matchesPerDesc.getNbAllMatches(); + std::size_t iBearing = 0; + Mat x1(2, nbBearing), x2(2, nbBearing); + + for(const auto& matchesPerDescIt: matchesPerDesc) + { + const feature::EImageDescriberType descType = matchesPerDescIt.first; + assert(descType != feature::EImageDescriberType::UNINITIALIZED); + const matching::IndMatches & matches = matchesPerDescIt.second; + + for (const auto & match : matches) + { + x1.col(iBearing) = _normalizedFeaturesPerView->getFeatures(I, descType)[match._i].coords().cast(); + x2.col(iBearing++) = _normalizedFeaturesPerView->getFeatures(J, descType)[match._j].coords().cast(); + } + } + assert(nbBearing == iBearing); + + const IntrinsicBase* cam_I = _sfmData.getIntrinsics().at(view_I->getIntrinsicId()).get(); + const IntrinsicBase* cam_J = _sfmData.getIntrinsics().at(view_J->getIntrinsicId()).get(); + + RelativePoseInfo relativePose_info; + // Compute max authorized error as geometric mean of camera plane tolerated residual error + relativePose_info.initial_residual_tolerance = std::pow(cam_I->imagePlane_toCameraPlaneError(2.5) * cam_J->imagePlane_toCameraPlaneError(2.5), 1./2.); + + // Since we use normalized features, we will use unit image size and intrinsic matrix: + const std::pair imageSize(1., 1.); + const Mat3 K = Mat3::Identity(); + + switch(_eRelativeRotationMethod) + { + case RELATIVE_ROTATION_FROM_E: + { + if(!robustRelativeRotation_fromE(K, K, x1, x2, imageSize, imageSize, relativePose_info)) + { + ALICEVISION_LOG_INFO("Relative pose computation: i: " << i << ", (" << I << ", " << J <<") => FAILED"); + continue; + } + } + break; + case RELATIVE_ROTATION_FROM_H: + { + RelativeRotationInfo relativeRotation_info; + relativeRotation_info._initialResidualTolerance = std::pow(cam_I->imagePlane_toCameraPlaneError(2.5) * cam_J->imagePlane_toCameraPlaneError(2.5), 1./2.); + + if(!robustRelativeRotation_fromH(K, K, x1, x2, imageSize, imageSize, relativeRotation_info)) + { + ALICEVISION_LOG_INFO("Relative pose computation: i: " << i << ", (" << I << ", " << J <<") => FAILED"); + continue; + } + + relativePose_info.relativePose = geometry::Pose3(relativeRotation_info._relativeRotation, Vec3::Zero()); + relativePose_info.initial_residual_tolerance = relativeRotation_info._initialResidualTolerance; + relativePose_info.found_residual_precision = relativeRotation_info._foundResidualPrecision; + relativePose_info.vec_inliers = relativeRotation_info._inliers; + } + break; + default: + ALICEVISION_LOG_DEBUG( + "Unknown relative rotation method: " << ERelativeRotationMethod_enumToString(_eRelativeRotationMethod)); + } + + //ALICEVISION_LOG_INFO("Relative pose computation: i: " << i << ", (" << I << ", " << J <<") => SUCCESS"); + //ALICEVISION_LOG_INFO("Nb inliers: " << relativePose_info.vec_inliers.size() << ", initial_residual_tolerance: " << relativePose_info.initial_residual_tolerance << ", found_residual_precision: " << relativePose_info.found_residual_precision); + + + /* + If an existing prior on rotation exists, then make sure the found detected rotation is not stupid + */ + double weight = relativePose_info.vec_inliers.size(); + if (_sfmData.isPoseAndIntrinsicDefined(view_I) && _sfmData.isPoseAndIntrinsicDefined(view_J)) { + CameraPose iTo = _sfmData.getAbsolutePose(view_I->getPoseId()); + CameraPose jTo = _sfmData.getAbsolutePose(view_J->getPoseId()); + Eigen::Matrix3d iRo = iTo.getTransform().rotation(); + Eigen::Matrix3d jRo = jTo.getTransform().rotation(); + Eigen::Matrix3d jRi = jRo * iRo.transpose(); + + Eigen::Matrix3d jRi_est = relativePose_info.relativePose.rotation(); + + Eigen::AngleAxisd checker; + checker.fromRotationMatrix(jRi_est * jRi.transpose()); + if (std::abs(radianToDegree(checker.angle())) > 5) { + relativePose_info.relativePose = geometry::Pose3(jRi, Vec3::Zero()); + relativePose_info.vec_inliers.clear(); + weight = 1.0; + } + } + + /*Add connection to find best constraints*/ + if (connection_size.find(I) == connection_size.end()) { + connection_size[I] = 0; + } + connection_size[I] += relativePose_info.vec_inliers.size(); + if (connection_size.find(J) == connection_size.end()) { + connection_size[J] = 0; + } + connection_size[J] += relativePose_info.vec_inliers.size(); + + /*Sort all inliers by increasing ids*/ + if (relativePose_info.vec_inliers.size() > 0) { + std::sort(relativePose_info.vec_inliers.begin(), relativePose_info.vec_inliers.end()); + + size_t index = 0; + size_t index_inlier = 0; + for(const auto& matchesPerDescIt: matchesPerDesc) + { + const feature::EImageDescriberType descType = matchesPerDescIt.first; + const matching::IndMatches & matches = matchesPerDescIt.second; + + for (const auto & match : matches) + { + size_t next_inlier = relativePose_info.vec_inliers[index_inlier]; + if (index == next_inlier) { + + Vec2 pt1 = _featuresPerView->getFeatures(I, descType)[match._i].coords().cast(); + Vec2 pt2 = _featuresPerView->getFeatures(J, descType)[match._j].coords().cast(); + + sfm::Constraint2D constraint(I, sfm::Observation(pt1, 0), J, sfm::Observation(pt2, 0)); + constraints2d.push_back(constraint); + + index_inlier++; + } + + index++; + } + } + } + + // #pragma omp critical + { + // Add the relative rotation to the relative 'rotation' pose graph + using namespace aliceVision::rotationAveraging; + vec_relatives_R.emplace_back(relative_pose_pair.first, relative_pose_pair.second, relativePose_info.relativePose.rotation(), 1.0); + } + } + } // for all relative pose + + /* + // Find best connection with pose prior + size_t max_val = 0; + IndexT max_index = UndefinedIndexT; + for (auto & item : connection_size) { + if (_sfmData.isPoseAndIntrinsicDefined(item.first)) { + if (item.second > max_val) { + max_index = item.first; + max_val = item.second; + } + } + } + + // If a best view is defined, lock it + sfmData::Poses & poses = _sfmData.getPoses(); + if (max_index != UndefinedIndexT) { + sfmData::View & v = _sfmData.getView(max_index); + IndexT poseid = v.getPoseId(); + if (poseid != UndefinedIndexT) { + poses[v.getPoseId()].lock(); + } + } + */ + + /*Debug result*/ + ALICEVISION_LOG_DEBUG("Compute_Relative_Rotations: vec_relatives_R.size(): " << vec_relatives_R.size()); + for(rotationAveraging::RelativeRotation& rotation: vec_relatives_R) + { + ALICEVISION_LOG_DEBUG("Relative_Rotation:\n" << "i: " << rotation.i << ", j: " << rotation.j << ", weight: " << rotation.weight << "\n" << "Rij" << rotation.Rij); + } + + // Log input graph to the HTML report + if (!_loggingFile.empty() && !_outputFolder.empty()) + { + // Log a relative view graph + { + std::set set_ViewIds; + std::transform(_sfmData.getViews().begin(), _sfmData.getViews().end(), std::inserter(set_ViewIds, set_ViewIds.begin()), stl::RetrieveKey()); + graph::indexedGraph putativeGraph(set_ViewIds, getImagePairs(*_pairwiseMatches)); + graph::exportToGraphvizData((fs::path(_outputFolder) / "global_relative_rotation_view_graph.dot").string(), putativeGraph.g); + } + + // Log a relative pose graph + { + std::set set_pose_ids; + PairSet relative_pose_pairs; + for(const auto& relative_R : vec_relatives_R) + { + const Pair relative_pose_indices(relative_R.i, relative_R.j); + relative_pose_pairs.insert(relative_pose_indices); + set_pose_ids.insert(relative_R.i); + set_pose_ids.insert(relative_R.j); + } + const std::string sGraph_name = "global_relative_rotation_pose_graph"; + graph::indexedGraph putativeGraph(set_pose_ids, relative_pose_pairs); + graph::exportToGraphvizData((fs::path(_outputFolder) / (sGraph_name + ".dot")).string(), putativeGraph.g); + /* + using namespace htmlDocument; + std::ostringstream os; + + os << "
" << "global_relative_rotation_pose_graph" << "
" + << "\n"; + _htmlDocStream->pushInfo(os.str()); + */ + } + } +} + +} // namespace sfm +} // namespace aliceVision + diff --git a/src/aliceVision/sfm/pipeline/panorama/ReconstructionEngine_panorama.hpp b/src/aliceVision/sfm/pipeline/panorama/ReconstructionEngine_panorama.hpp new file mode 100644 index 0000000000..64f3789b98 --- /dev/null +++ b/src/aliceVision/sfm/pipeline/panorama/ReconstructionEngine_panorama.hpp @@ -0,0 +1,176 @@ +// This file is part of the AliceVision project. +// Copyright (c) 2016 AliceVision contributors. +// Copyright (c) 2012 openMVG contributors. +// This Source Code Form is subject to the terms of the Mozilla Public License, +// v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +#pragma once + +#include +#include +#include + +#include + +namespace aliceVision{ +namespace sfm{ + +enum ERelativeRotationMethod +{ + RELATIVE_ROTATION_FROM_E, + RELATIVE_ROTATION_FROM_H +}; + +inline std::string ERelativeRotationMethod_enumToString(const ERelativeRotationMethod rotationMethod) +{ + switch(rotationMethod) + { + case ERelativeRotationMethod::RELATIVE_ROTATION_FROM_E: return "essential_matrix"; + case ERelativeRotationMethod::RELATIVE_ROTATION_FROM_H: return "homography_matrix"; + } + throw std::out_of_range("Invalid method name enum"); +} + +inline ERelativeRotationMethod ERelativeRotationMethod_stringToEnum(const std::string& rotationMethodName) +{ + std::string methodName = rotationMethodName; + std::transform(methodName.begin(), methodName.end(), methodName.begin(), ::tolower); + + if(methodName == "essential_matrix") return ERelativeRotationMethod::RELATIVE_ROTATION_FROM_E; + if(methodName == "homography_matrix") return ERelativeRotationMethod::RELATIVE_ROTATION_FROM_H; + + throw std::out_of_range("Invalid method name : '" + rotationMethodName + "'"); +} + +inline std::ostream& operator<<(std::ostream& os, ERelativeRotationMethod rotationMethodName) +{ + os << ERelativeRotationMethod_enumToString(rotationMethodName); + return os; +} + +inline std::istream& operator>>(std::istream& in, ERelativeRotationMethod& rotationMethod) +{ + std::string token; + in >> token; + rotationMethod = ERelativeRotationMethod_stringToEnum(token); + return in; +} + + +/** + * @brief A struct containing the information of the relative rotation. + */ +struct RelativeRotationInfo +{ + /** + * @brief Default constructor. + */ + RelativeRotationInfo() = default; + + /// the homography. + Mat3 _homography{}; + /// the relative rotation. + Mat3 _relativeRotation{}; + /// the inliers. + std::vector _inliers{}; + /// initial threshold for the acransac process. + double _initialResidualTolerance{std::numeric_limits::infinity()}; + /// the estimated threshold found by acransac. + double _foundResidualPrecision{std::numeric_limits::infinity()}; + +}; + + +/** + * @brief Estimate the relative pose between two views. + * @param[in] K1 3x3 calibration matrix of the first view. + * @param[in] K2 3x3 calibration matrix of the second view. + * @param[in] x1 The points on the first image. + * @param[in] x2 The corresponding points on the second image. + * @param[in] imgSize1 The size of the first image. + * @param[in] imgSize2 The size of the second image. + * @param[out] relativePoseInfo Contains the result of the estimation. + * @param[in] maxIterationCount Max number of iteration for the ransac process. + * @return true if a homography has been estimated. + */ +bool robustRelativeRotation_fromE(const Mat3 & K1, const Mat3 & K2, + const Mat & x1, const Mat & x2, + const std::pair & size_ima1, + const std::pair & size_ima2, + RelativePoseInfo & relativePose_info, + const size_t max_iteration_count = 4096); + +/** + * @brief Estimate the relative rotation between two views related by a pure rotation. + * @param[in] K1 3x3 calibration matrix of the first view. + * @param[in] K2 3x3 calibration matrix of the second view. + * @param[in] x1 The points on the first image. + * @param[in] x2 The corresponding points on the second image. + * @param[in] imgSize1 The size of the first image. + * @param[in] imgSize2 The size of the second image. + * @param[out] relativeRotationInfo Contains the result of the estimation. + * @param[in] maxIterationCount Max number of iteration for the ransac process. + * @return true if a homography has been estimated. + */ +bool robustRelativeRotation_fromH(const Mat3 &K1, const Mat3 &K2, + const Mat2X &x1, const Mat2X &x2, + const std::pair &imgSize1, + const std::pair &imgSize2, + RelativeRotationInfo &relativeRotationInfo, + const size_t max_iteration_count = 4096); + + +/// Panorama Pipeline Reconstruction Engine. +/// - Method: Based on Global SfM but with no translations between cameras. +class ReconstructionEngine_panorama : public ReconstructionEngine +{ +public: + + ReconstructionEngine_panorama(const sfmData::SfMData& sfmData, + const std::string& outDirectory, + const std::string& loggingFile = ""); + + ~ReconstructionEngine_panorama(); + + void SetFeaturesProvider(feature::FeaturesPerView* featuresPerView); + void SetMatchesProvider(matching::PairwiseMatches* provider); + + void SetRotationAveragingMethod(ERotationAveragingMethod eRotationAveragingMethod); + void SetRelativeRotationMethod(ERelativeRotationMethod eRelativeRotationMethod); + + void setLockAllIntrinsics(bool v) { _lockAllIntrinsics = v; } + + virtual bool process(); + +protected: + /// Compute from relative rotations the global rotations of the camera poses + bool Compute_Global_Rotations(const aliceVision::rotationAveraging::RelativeRotations& vec_relatives_R, + HashMap& map_globalR); + +public: + /// Adjust the scene (& remove outliers) + bool Adjust(); + +private: + /// Compute relative rotations + void Compute_Relative_Rotations(aliceVision::rotationAveraging::RelativeRotations& vec_relatives_R); + + // Logger + std::shared_ptr _htmlDocStream; + std::string _loggingFile; + + // Parameter + ERotationAveragingMethod _eRotationAveragingMethod; + ERelativeRotationMethod _eRelativeRotationMethod; + bool _lockAllIntrinsics = false; + + // Data provider + feature::FeaturesPerView* _featuresPerView; + matching::PairwiseMatches* _pairwiseMatches; + + std::shared_ptr _normalizedFeaturesPerView; +}; + +} // namespace sfm +} // namespace aliceVision diff --git a/src/aliceVision/sfm/pipeline/panorama/panoramaSfM_test.cpp b/src/aliceVision/sfm/pipeline/panorama/panoramaSfM_test.cpp new file mode 100644 index 0000000000..71363b6213 --- /dev/null +++ b/src/aliceVision/sfm/pipeline/panorama/panoramaSfM_test.cpp @@ -0,0 +1,121 @@ +// This file is part of the AliceVision project. +// Copyright (c) 2016 AliceVision contributors. +// Copyright (c) 2012 openMVG contributors. +// This Source Code Form is subject to the terms of the Mozilla Public License, +// v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +#include +#include +#include +#include + +#include +#include +#include + +#define BOOST_TEST_MODULE PANORAMA_SFM +#include +#include +#include + +using namespace aliceVision; +using namespace aliceVision::camera; +using namespace aliceVision::geometry; +using namespace aliceVision::sfm; +using namespace aliceVision::sfmData; + +// Test summary: +// - Create a rotation matrix between two views +// - Create two matrices of calibration for two views + + +BOOST_AUTO_TEST_CASE(PANORAMA_SFM) +{ + // rotation between the two views + const Mat3 rotation = aliceVision::rotationXYZ(0.01, -0.001, -0.2); + ALICEVISION_LOG_INFO("Ground truth rotation:\n" << rotation); + + // some random 3D points on the unit sphere + const Mat3X pts3d = Mat3X::Random(3, 20).colwise().normalized(); + ALICEVISION_LOG_INFO("points 3d:\n" << pts3d); + + // calibration 1 + Mat3 K1; + K1 << 1200, 0, 960, + 0, 1200, 540, + 0, 0, 1; + ALICEVISION_LOG_INFO("K1:\n" << K1); + + // calibration 2 + Mat3 K2; + K2 << 1100, 0, 960, + 0, 1100, 540, + 0, 0, 1; + ALICEVISION_LOG_INFO("K2:\n" << K2); + + // 2d points on each side as projection of the 3D points + const Mat2X pts1 = (K1 * pts3d).colwise().hnormalized(); + // ALICEVISION_LOG_INFO("pts1:\n" << pts1); + const Mat2X pts2 = (K2 * rotation * pts3d).colwise().hnormalized(); + // ALICEVISION_LOG_INFO("pts2:\n" << pts2); + + // test the uncalibrated version, just pass the 2d points and compute R + + const double epsilon = 1e-4; + + // Relative Rotation from H + { + RelativeRotationInfo rotationInfo{}; + ALICEVISION_LOG_INFO("\n\n###########################################\nUncalibrated from H"); + robustRelativeRotation_fromH(K1, K2, pts1, pts2, + std::make_pair(1920, 1080), + std::make_pair(1920, 1080), + rotationInfo); + ALICEVISION_LOG_INFO("rotation.inverse() * rotationInfo._relativeRotation:\n" << rotation.inverse() * rotationInfo._relativeRotation); + + EXPECT_MATRIX_NEAR(rotation, rotationInfo._relativeRotation, epsilon); + + + // test the calibrated version, compute normalized points for each view (by multiplying by the inverse of K) and estimate H and R + ALICEVISION_LOG_INFO("\n\n###########################################\nCalibrated from H"); + robustRelativeRotation_fromH(Mat3::Identity(), Mat3::Identity(), + (K1.inverse()*pts1.colwise().homogeneous()).colwise().hnormalized(), + (K2.inverse()*pts2.colwise().homogeneous()).colwise().hnormalized(), + std::make_pair(1920, 1080), + std::make_pair(1920, 1080), + rotationInfo); + ALICEVISION_LOG_INFO("rotation.inverse() * rotationInfo._relativeRotation:\n" << rotation.inverse() * rotationInfo._relativeRotation); + + EXPECT_MATRIX_NEAR(rotation, rotationInfo._relativeRotation, epsilon); + } + + /* + // Relative Rotation from E + { + RelativePoseInfo rotationInfo{}; + ALICEVISION_LOG_INFO("\n\n###########################################\nUncalibrated from E"); + robustRelativeRotation_fromE(K1, K2, pts1, pts2, + std::make_pair(1920, 1080), + std::make_pair(1920, 1080), + rotationInfo); + ALICEVISION_LOG_INFO("rotation.inverse() * rotationInfo._relativeRotation:\n" << rotation.inverse() * rotationInfo.relativePose.rotation()); + + EXPECT_MATRIX_NEAR(rotation, rotationInfo.relativePose.rotation(), epsilon); + + + + // test the calibrated version, compute normalized points for each view (by multiplying by the inverse of K) and estimate H and R + ALICEVISION_LOG_INFO("\n\n###########################################\nCalibrated from E"); + robustRelativeRotation_fromE(Mat3::Identity(), Mat3::Identity(), + (K1.inverse()*pts1.colwise().homogeneous()).colwise().hnormalized(), + (K2.inverse()*pts2.colwise().homogeneous()).colwise().hnormalized(), + std::make_pair(1920, 1080), + std::make_pair(1920, 1080), + rotationInfo); + ALICEVISION_LOG_INFO("rotation.inverse() * rotationInfo._relativeRotation:\n" << rotation.inverse() * rotationInfo.relativePose.rotation()); + + EXPECT_MATRIX_NEAR(rotation, rotationInfo.relativePose.rotation(), epsilon); + } + */ +} diff --git a/src/aliceVision/sfm/sfm.hpp b/src/aliceVision/sfm/sfm.hpp index 697d4487ee..438b4a1235 100644 --- a/src/aliceVision/sfm/sfm.hpp +++ b/src/aliceVision/sfm/sfm.hpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include diff --git a/src/aliceVision/sfm/utils/alignment.cpp b/src/aliceVision/sfm/utils/alignment.cpp index c9b3327b2c..e32202537b 100644 --- a/src/aliceVision/sfm/utils/alignment.cpp +++ b/src/aliceVision/sfm/utils/alignment.cpp @@ -145,9 +145,8 @@ void computeNewCoordinateSystemFromSingleCamera(const sfmData::SfMData& sfmData, double& out_S, Mat3& out_R, Vec3& out_t) -{ +{ IndexT viewId = -1; - sfmData::EEXIFOrientation orientation = sfmData::EEXIFOrientation::UNKNOWN; try { @@ -166,7 +165,6 @@ void computeNewCoordinateSystemFromSingleCamera(const sfmData::SfMData& sfmData, { std::string path = view.second->getImagePath(); std::size_t found = path.find(camName); - orientation = view.second->getMetadataOrientation(); if (found!=std::string::npos) { viewId = view.second->getViewId(); @@ -175,31 +173,43 @@ void computeNewCoordinateSystemFromSingleCamera(const sfmData::SfMData& sfmData, } } - if(viewId == -1) throw std::invalid_argument("The camera name \"" + camName + "\" is not found in the sfmData."); else if(!sfmData.isPoseAndIntrinsicDefined(viewId)) throw std::invalid_argument("The camera \"" + camName + "\" exists in the sfmData but is not reconstructed."); + sfmData::EEXIFOrientation orientation = sfmData.getView(viewId).getMetadataOrientation(); + ALICEVISION_LOG_TRACE("computeNewCoordinateSystemFromSingleCamera orientation: " << int(orientation)); + switch(orientation) { case sfmData::EEXIFOrientation::RIGHT: - out_R = Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,1,0)) * Eigen::AngleAxisd(degreeToRadian(90.0), Vec3(0,0,1)) * sfmData.getAbsolutePose(viewId).getTransform().rotation(); + ALICEVISION_LOG_TRACE("computeNewCoordinateSystemFromSingleCamera orientation: RIGHT"); + out_R = Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,1,0)) + * Eigen::AngleAxisd(degreeToRadian(90.0), Vec3(0,0,1)) + * sfmData.getAbsolutePose(viewId).getTransform().rotation(); break; case sfmData::EEXIFOrientation::LEFT: - out_R = Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,1,0)) * Eigen::AngleAxisd(degreeToRadian(270.0), Vec3(0,0,1)) * sfmData.getAbsolutePose(viewId).getTransform().rotation(); + ALICEVISION_LOG_TRACE("computeNewCoordinateSystemFromSingleCamera orientation: LEFT"); + out_R = Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,1,0)) + * Eigen::AngleAxisd(degreeToRadian(270.0), Vec3(0,0,1)) + * sfmData.getAbsolutePose(viewId).getTransform().rotation(); break; case sfmData::EEXIFOrientation::UPSIDEDOWN: - out_R = Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,1,0)) * sfmData.getAbsolutePose(viewId).getTransform().rotation(); + ALICEVISION_LOG_TRACE("computeNewCoordinateSystemFromSingleCamera orientation: UPSIDEDOWN"); + out_R = Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,1,0)) + * sfmData.getAbsolutePose(viewId).getTransform().rotation(); break; case sfmData::EEXIFOrientation::NONE: - out_R = Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,1,0)) * Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,0,1)) * sfmData.getAbsolutePose(viewId).getTransform().rotation(); - break; + ALICEVISION_LOG_TRACE("computeNewCoordinateSystemFromSingleCamera orientation: NONE"); default: - out_R = Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,1,0)) * Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,0,1)) * sfmData.getAbsolutePose(viewId).getTransform().rotation(); + ALICEVISION_LOG_TRACE("computeNewCoordinateSystemFromSingleCamera orientation: default"); + out_R = Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,1,0)) + * Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,0,1)) + * sfmData.getAbsolutePose(viewId).getTransform().rotation(); break; } - + out_t = - out_R * sfmData.getAbsolutePose(viewId).getTransform().center(); out_S = 1; } diff --git a/src/aliceVision/sfmData/Constraint2D.hpp b/src/aliceVision/sfmData/Constraint2D.hpp new file mode 100644 index 0000000000..306802d864 --- /dev/null +++ b/src/aliceVision/sfmData/Constraint2D.hpp @@ -0,0 +1,45 @@ +// This file is part of the AliceVision project. +// Copyright (c) 2016 AliceVision contributors. +// Copyright (c) 2012 openMVG contributors. +// This Source Code Form is subject to the terms of the Mozilla Public License, +// v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +#pragma once + +#include + +namespace aliceVision { +namespace sfmData { + +/** + * @brief Constraint2D is a 3D point with its 2d observations. + */ +struct Constraint2D +{ + Constraint2D() = default; + + Constraint2D(IndexT view_first = UndefinedIndexT, const Observation & observation_first = Observation(), + IndexT view_second = UndefinedIndexT, const Observation & observation_second = Observation()) + : ViewFirst(view_first) + , ObservationFirst(observation_first) + , ViewSecond(view_second) + , ObservationSecond(observation_second) + {} + + IndexT ViewFirst; + IndexT ViewSecond; + Observation ObservationFirst; + Observation ObservationSecond; + + bool operator==(const Constraint2D& other) const + { + return (ViewFirst == other.ViewFirst) && + (ViewSecond == other.ViewSecond) && + (ObservationFirst == other.ObservationFirst) && + (ObservationSecond == other.ObservationSecond); + } +}; + +} // namespace sfmData +} // namespace aliceVision diff --git a/src/aliceVision/sfmData/RotationPrior.hpp b/src/aliceVision/sfmData/RotationPrior.hpp new file mode 100644 index 0000000000..9fe8573ac3 --- /dev/null +++ b/src/aliceVision/sfmData/RotationPrior.hpp @@ -0,0 +1,43 @@ +// This file is part of the AliceVision project. +// Copyright (c) 2016 AliceVision contributors. +// Copyright (c) 2012 openMVG contributors. +// This Source Code Form is subject to the terms of the Mozilla Public License, +// v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +#pragma once + +#include + +namespace aliceVision { +namespace sfmData { + +/** + * @brief RotationPrior is an information prior about the rotation. + */ +struct RotationPrior +{ + RotationPrior() = default; + + RotationPrior(IndexT view_first = UndefinedIndexT, + IndexT view_second = UndefinedIndexT, + Eigen::Matrix3d second_R_first = Eigen::Matrix3d::Identity()) + : ViewFirst(view_first), + ViewSecond(view_second), + _second_R_first(second_R_first) + {} + + IndexT ViewFirst; + IndexT ViewSecond; + Eigen::Matrix3d _second_R_first; + + bool operator==(const RotationPrior& other) const + { + return (ViewFirst == other.ViewFirst) && + (ViewSecond == other.ViewSecond) && + (_second_R_first == other._second_R_first); + } +}; + +} // namespace sfmData +} // namespace aliceVision diff --git a/src/aliceVision/sfmData/SfMData.cpp b/src/aliceVision/sfmData/SfMData.cpp index 7c5052e475..63d057c6a5 100644 --- a/src/aliceVision/sfmData/SfMData.cpp +++ b/src/aliceVision/sfmData/SfMData.cpp @@ -83,6 +83,15 @@ bool SfMData::operator==(const SfMData& other) const { if(control_points != other.control_points) return false; + + Constraints2D::const_iterator constraint2dIt = constraints2d.begin(); + Constraints2D::const_iterator otherconstraint2dIt = other.constraints2d.begin(); + for(; constraint2dIt != constraints2d.end() && otherconstraint2dIt != other.constraints2d.end(); ++constraint2dIt, ++otherconstraint2dIt) + { + if(!(*constraint2dIt == *otherconstraint2dIt)) + return false; + } + // Root path can be reseted during exports return true; @@ -248,6 +257,9 @@ void SfMData::combine(const SfMData& sfmData) // control points control_points.insert(sfmData.control_points.begin(), sfmData.control_points.end()); + + // constraints + constraints2d.insert(constraints2d.end(), sfmData.constraints2d.begin(), sfmData.constraints2d.end()); } } // namespace sfmData diff --git a/src/aliceVision/sfmData/SfMData.hpp b/src/aliceVision/sfmData/SfMData.hpp index c4cc864626..972c13c9d6 100644 --- a/src/aliceVision/sfmData/SfMData.hpp +++ b/src/aliceVision/sfmData/SfMData.hpp @@ -9,6 +9,8 @@ #include #include +#include +#include #include #include #include @@ -41,6 +43,12 @@ using PosesUncertainty = HashMap; /// Define uncertainty per landmark using LandmarksUncertainty = HashMap; +///Define a collection of constraints +using Constraints2D = std::vector; + +///Define a collection of rotation priors +using RotationPriors = std::vector; + /** * @brief SfMData container * Store structure and camera properties @@ -60,6 +68,10 @@ class SfMData PosesUncertainty _posesUncertainty; /// Uncertainty per landmark LandmarksUncertainty _landmarksUncertainty; + /// 2D Constraints + Constraints2D constraints2d; + /// Rotation priors + RotationPriors rotationpriors; // Operators @@ -102,6 +114,20 @@ class SfMData const Landmarks& getLandmarks() const {return structure;} Landmarks& getLandmarks() {return structure;} + /** + * @brief Get Constraints2D + * @return Constraints2D + */ + const Constraints2D& getConstraints2D() const {return constraints2d;} + Constraints2D& getConstraints2D() {return constraints2d;} + + /** + * @brief Get RotationPriors + * @return RotationPriors + */ + const RotationPriors& getRotationPriors() const {return rotationpriors;} + RotationPriors& getRotationPriors() {return rotationpriors;} + /** * @brief Get control points * @return control points @@ -298,29 +324,29 @@ class SfMData } /** - * @brief Get the median Exposure Value (Ev) of + * @brief Get the median Camera Exposure Setting * @return */ - float getMedianEv() const + float getMedianCameraExposureSetting() const { - std::vector evList; - evList.reserve(views.size()); + std::vector cameraExposureList; + cameraExposureList.reserve(views.size()); for(const auto& view : views) { - float ev = view.second->getEv(); - if(ev != -1.0f) + float ce = view.second->getCameraExposureSetting(); + if(ce != -1.0f) { - auto find = std::find(std::begin(evList), std::end(evList), ev); - if(find == std::end(evList)) - evList.emplace_back(ev); + auto find = std::find(std::begin(cameraExposureList), std::end(cameraExposureList), ce); + if(find == std::end(cameraExposureList)) + cameraExposureList.emplace_back(ce); } } - std::nth_element(evList.begin(), evList.begin() + evList.size()/2, evList.end()); - float evMedian = evList[evList.size()/2]; + std::nth_element(cameraExposureList.begin(), cameraExposureList.begin() + cameraExposureList.size()/2, cameraExposureList.end()); + float ceMedian = cameraExposureList[cameraExposureList.size()/2]; - return evMedian; + return ceMedian; } /** diff --git a/src/aliceVision/sfmData/View.hpp b/src/aliceVision/sfmData/View.hpp index e5afcfedb9..716e554167 100644 --- a/src/aliceVision/sfmData/View.hpp +++ b/src/aliceVision/sfmData/View.hpp @@ -191,34 +191,27 @@ class View return (!isPartOfRig() || _isIndependantPose); } - float getEv() const + /** + * + */ + float getCameraExposureSetting() const { - const float shutter = getMetadataShutter(); - const float aperture = getMetadataAperture(); - const float iso = static_cast(getMetadataISO()); + const float shutter = getMetadataShutter(); + const float fnumber = getMetadataFNumber(); + if (shutter < 0 || fnumber < 0) + return -1.f; - if(shutter < 0 || aperture < 0 || iso < 0) - return -1; + const float iso = getMetadataISO(); + const float isoRatio = (iso < 0.f) ? 1.0 : (iso / 100.f); - // WIKIPEDIA : + log2f(iso/100.f) - float ev = log2f(std::pow(aperture, 2.0f) / shutter) - log2f(iso/100.f); - return ev; + float cameraExposure = (shutter * isoRatio) / (fnumber * fnumber); + return cameraExposure; } - /** - * @brief Get the value of the gap bewteen the view's exposition and a reference exposition - * @param [refEv] the median exposition of all views - * @return the exposure compensation - */ - float getEvCompensation(float refEv) const - { - const float ev = getEv(); - if(ev == -1) - return 1.0f; - - return std::pow(2.0f, ev - refEv); - } - + float getEv() const + { + return std::log2(1.f/getCameraExposureSetting()); + } /** * @brief Return true if the given metadata name exists @@ -360,13 +353,21 @@ class View } /** - * @brief Get the corresponding "FNumber" (aperture) metadata value - * @return the metadata value float or -1 if no corresponding value - */ - float getMetadataAperture() const + * @brief Get the corresponding "FNumber" (relative aperture) metadata value + * @return the metadata value float or -1 if no corresponding value + */ + float getMetadataFNumber() const { if(hasDigitMetadata("FNumber")) + { return std::stof(getMetadata("FNumber")); + } + if (hasDigitMetadata("ApertureValue")) + { + const float aperture = std::stof(getMetadata("ApertureValue")); + // fnumber = 2^(aperture/2) + return std::pow(2.0f, aperture / 2.0f); + } return -1; } @@ -389,9 +390,18 @@ class View { if(hasDigitMetadata("Orientation")) return static_cast(std::stoi(getMetadata("Orientation"))); + if(hasDigitMetadata("Exif:Orientation")) + return static_cast(std::stoi(getMetadata("Exif:Orientation"))); return EEXIFOrientation::UNKNOWN; } + std::string getMetadataDateTimeOriginal() const + { + if(hasMetadata("Exif:DateTimeOriginal")) + return getMetadata("Exif:DateTimeOriginal"); + return ""; + } + /** * @brief Get the view metadata structure * @return the view metadata diff --git a/src/aliceVision/sfmDataIO/sfmDataIO.hpp b/src/aliceVision/sfmDataIO/sfmDataIO.hpp index 77535e22f5..cb9ad51a23 100644 --- a/src/aliceVision/sfmDataIO/sfmDataIO.hpp +++ b/src/aliceVision/sfmDataIO/sfmDataIO.hpp @@ -23,10 +23,11 @@ enum ESfMData CONTROL_POINTS = 64, LANDMARKS_UNCERTAINTY = 128, POSES_UNCERTAINTY = 256, + CONSTRAINTS2D = 512, UNCERTAINTY = LANDMARKS_UNCERTAINTY | POSES_UNCERTAINTY, - ALL_DENSE = VIEWS | EXTRINSICS | INTRINSICS | STRUCTURE | OBSERVATIONS | CONTROL_POINTS, - ALL = VIEWS | EXTRINSICS | INTRINSICS | STRUCTURE | OBSERVATIONS_WITH_FEATURES | CONTROL_POINTS | UNCERTAINTY + ALL_DENSE = VIEWS | EXTRINSICS | INTRINSICS | STRUCTURE | OBSERVATIONS | CONTROL_POINTS | CONSTRAINTS2D, + ALL = VIEWS | EXTRINSICS | INTRINSICS | STRUCTURE | OBSERVATIONS_WITH_FEATURES | CONTROL_POINTS | UNCERTAINTY | CONSTRAINTS2D }; /// check that each pose have a valid intrinsic and pose id in the existing View ids diff --git a/src/aliceVision/sfmDataIO/viewIO.cpp b/src/aliceVision/sfmDataIO/viewIO.cpp index 7f1e00fcd6..2001a67fad 100644 --- a/src/aliceVision/sfmDataIO/viewIO.cpp +++ b/src/aliceVision/sfmDataIO/viewIO.cpp @@ -153,12 +153,15 @@ std::shared_ptr getViewIntrinsic(const sfmData::View& vie { intrinsicType = camera::EINTRINSIC_stringToEnum(cameraModel); } + /* + // Warning: This resize heuristic is disabled as RAW images have a different size in metadata. else if(isResized) { // if the image has been resized, we assume that it has been undistorted // and we use a camera without lens distortion. intrinsicType = camera::PINHOLE_CAMERA; } + */ else if((focalLengthIn35mm > 0.0 && focalLengthIn35mm < 18.0) || (defaultFieldOfView > 100.0)) { // if the focal lens is short, the fisheye model should fit better. diff --git a/src/dependencies/htmlDoc/htmlDoc.hpp b/src/dependencies/htmlDoc/htmlDoc.hpp index 624703c6f9..9ff773980a 100644 --- a/src/dependencies/htmlDoc/htmlDoc.hpp +++ b/src/dependencies/htmlDoc/htmlDoc.hpp @@ -150,7 +150,7 @@ namespace htmlDocument << range.first.second << ","<< range.second.first <<"]);\n"; } - void addLine(double x0, double y0, double x1, double y1, std::string color ="00ff00") + void addLine(double x0, double y0, double x1, double y1, const std::string& color ="00ff00") { size_t index0 = cpt++; size_t index1 = cpt++; @@ -163,7 +163,7 @@ namespace htmlDocument template void addXYChart(const std::vector & vec_x, const std::vector & vec_y, - std::string stype) + const std::string& stype, const std::string& strokeColor = "0000ff") { size_t index0 = cpt++; size_t index1 = cpt++; @@ -182,11 +182,11 @@ namespace htmlDocument stream << "board.createElement('chart', " < - void addYChart(const std::vector & vec_y, std::string stype) + void addYChart(const std::vector & vec_y, const std::string& stype, const std::string& strokeColor = "0000ff") { size_t index0 = cpt++; @@ -195,7 +195,7 @@ namespace htmlDocument stream << "];\n"; stream << "board.createElement('chart', " <<"data"< - void pushXYChart(const std::vector& vec_x, const std::vector& vec_y,std::string name) + void pushXYChart(const std::vector& vec_x, const std::vector& vec_y, const std::string& name, const std::string& color = "0000ff") { std::pair< std::pair, std::pair > range = autoJSXGraphViewport(vec_x, vec_y); htmlDocument::JSXGraphWrapper jsxGraph; jsxGraph.init(name,600,300); - jsxGraph.addXYChart(vec_x, vec_y, "line,point"); + jsxGraph.addXYChart(vec_x, vec_y, "line,point", color); jsxGraph.UnsuspendUpdate(); jsxGraph.setViewport(range); jsxGraph.close(); pushInfo(jsxGraph.toStr()); } - + + template + void pushXYChart(const std::vector& vec_x, const std::vector>& vec_y, const std::string& name, const std::vector& colors, const std::string& sType= "line,point") + { + std::pair< std::pair, std::pair > range = autoJSXGraphViewport(vec_x, vec_y.front()); + + htmlDocument::JSXGraphWrapper jsxGraph; + jsxGraph.init(name, 600, 300); + for (int i = 0; i < vec_y.size(); ++i) + { + jsxGraph.addXYChart(vec_x, vec_y[i], sType, colors[i]); + } + jsxGraph.UnsuspendUpdate(); + jsxGraph.setViewport(range); + jsxGraph.close(); + pushInfo(jsxGraph.toStr()); + } + std::string getDoc() { return htmlMarkup("html", htmlStream.str()); diff --git a/src/samples/texturing/main_evcorrection.cpp b/src/samples/texturing/main_evcorrection.cpp index 949af45003..b693fdb5e2 100644 --- a/src/samples/texturing/main_evcorrection.cpp +++ b/src/samples/texturing/main_evcorrection.cpp @@ -108,13 +108,13 @@ int main(int argc, char **argv) } - float evMedian = sfm_data.getMedianEv(); - ALICEVISION_LOG_INFO(" EV Median :" << evMedian); + float cameraExposureMedian = sfm_data.getMedianCameraExposureSetting(); + ALICEVISION_LOG_INFO(" EV Median :" << cameraExposureMedian); for(int i = 0; i < sfm_data.views.size(); ++i) { - sfmData::View view = *(sfm_data.views[i]); - float evComp = view.getEvCompensation(evMedian); + sfmData::View view = *(sfm_data.views[i]); + float evComp = cameraExposureMedian / view.getCameraExposureSetting(); image::Image img; image::readImage(view.getImagePath(), img, image::EImageColorSpace::LINEAR); @@ -123,9 +123,9 @@ int main(int argc, char **argv) img(pix) *= evComp; ALICEVISION_LOG_INFO(fs::path(view.getImagePath()).stem()); - ALICEVISION_LOG_INFO(" EV :" << view.getEv()); - ALICEVISION_LOG_INFO(" EV Comp:" << view.getEvCompensation(evMedian)); - + ALICEVISION_LOG_INFO(" EV: " << view.getEv()); + ALICEVISION_LOG_INFO(" EV Compensation: " << evComp); + std::string outputPath = outputFilePath + fs::path(view.getImagePath()).stem().string() + ".EXR"; oiio::ParamValueList metadata = image::getMetadataFromMap(view.getMetadata()); image::writeImage(outputPath, img, image::EImageColorSpace::LINEAR, metadata); @@ -167,8 +167,8 @@ int main(int argc, char **argv) // calculate median EV if necessary std::sort(evList.begin(), evList.end()); - evMedian = evList[evList.size()/2]; - ALICEVISION_LOG_INFO("Median EV: " << evMedian); + cameraExposureMedian = evList[evList.size()/2]; + ALICEVISION_LOG_INFO("Median EV: " << cameraExposureMedian); // write corrected images, not over exposed images for(int i = 0; i < imgList.size(); ++i) @@ -176,7 +176,7 @@ int main(int argc, char **argv) Image& img = imgList[i]; ALICEVISION_LOG_INFO(img.getName()); - float evComp = std::pow(2.0f, img.getEv() - evMedian); + float evComp = std::pow(2.0f, img.getEv() - cameraExposureMedian); ALICEVISION_LOG_INFO(" EV Compensation: " << evComp); for(int pix = 0; pix < img.width() * img.height(); ++pix) diff --git a/src/software/convert/CMakeLists.txt b/src/software/convert/CMakeLists.txt index 0571598eed..9a084047a8 100644 --- a/src/software/convert/CMakeLists.txt +++ b/src/software/convert/CMakeLists.txt @@ -43,6 +43,8 @@ alicevision_add_software(aliceVision_convertLDRToHDR LINKS aliceVision_system aliceVision_image aliceVision_hdr + aliceVision_sfmData + aliceVision_sfmDataIO ${Boost_LIBRARIES} ) endif() diff --git a/src/software/convert/main_convertLDRToHDR.cpp b/src/software/convert/main_convertLDRToHDR.cpp index 68204b6157..868e019e6a 100644 --- a/src/software/convert/main_convertLDRToHDR.cpp +++ b/src/software/convert/main_convertLDRToHDR.cpp @@ -8,19 +8,24 @@ #include #include #include + +/*SFMData*/ +#include +#include + +/*HDR Related*/ #include #include #include #include #include #include +#include +/*Command line parameters*/ #include #include - -#include -#include -#include +#include // These constants define the current software version. // They must be updated when the command line is changed. @@ -33,317 +38,130 @@ namespace po = boost::program_options; namespace fs = boost::filesystem; + enum class ECalibrationMethod { - LINEAR, - ROBERTSON, - DEBEVEC, - GROSSBERG + LINEAR, + ROBERTSON, + DEBEVEC, + GROSSBERG, + LAGUERRE, }; /** - * @brief convert an enum ECalibrationMethod to its corresponding string - * @param ECalibrationMethod - * @return String - */ +* @brief convert an enum ECalibrationMethod to its corresponding string +* @param ECalibrationMethod +* @return String +*/ inline std::string ECalibrationMethod_enumToString(const ECalibrationMethod calibrationMethod) { - switch(calibrationMethod) - { + switch (calibrationMethod) + { case ECalibrationMethod::LINEAR: return "linear"; case ECalibrationMethod::ROBERTSON: return "robertson"; case ECalibrationMethod::DEBEVEC: return "debevec"; case ECalibrationMethod::GROSSBERG: return "grossberg"; - } - throw std::out_of_range("Invalid method name enum"); + case ECalibrationMethod::LAGUERRE: return "laguerre"; + } + throw std::out_of_range("Invalid method name enum"); } /** - * @brief convert a string calibration method name to its corresponding enum ECalibrationMethod - * @param ECalibrationMethod - * @return String - */ +* @brief convert a string calibration method name to its corresponding enum ECalibrationMethod +* @param ECalibrationMethod +* @return String +*/ inline ECalibrationMethod ECalibrationMethod_stringToEnum(const std::string& calibrationMethodName) { - std::string methodName = calibrationMethodName; - std::transform(methodName.begin(), methodName.end(), methodName.begin(), ::tolower); + std::string methodName = calibrationMethodName; + std::transform(methodName.begin(), methodName.end(), methodName.begin(), ::tolower); - if(methodName == "linear") return ECalibrationMethod::LINEAR; - if(methodName == "robertson") return ECalibrationMethod::ROBERTSON; - if(methodName == "debevec") return ECalibrationMethod::DEBEVEC; - if(methodName == "grossberg") return ECalibrationMethod::GROSSBERG; + if (methodName == "linear") return ECalibrationMethod::LINEAR; + if (methodName == "robertson") return ECalibrationMethod::ROBERTSON; + if (methodName == "debevec") return ECalibrationMethod::DEBEVEC; + if (methodName == "grossberg") return ECalibrationMethod::GROSSBERG; + if (methodName == "laguerre") return ECalibrationMethod::LAGUERRE; - throw std::out_of_range("Invalid method name : '" + calibrationMethodName + "'"); + throw std::out_of_range("Invalid method name : '" + calibrationMethodName + "'"); } inline std::ostream& operator<<(std::ostream& os, ECalibrationMethod calibrationMethodName) { - os << ECalibrationMethod_enumToString(calibrationMethodName); - return os; + os << ECalibrationMethod_enumToString(calibrationMethodName); + return os; } inline std::istream& operator>>(std::istream& in, ECalibrationMethod& calibrationMethod) { - std::string token; - in >> token; - calibrationMethod = ECalibrationMethod_stringToEnum(token); - return in; -} - - -/** - * @brief check if given file is regular and matches with authorized extensions - * @param[in] filepath - * @return true if valid file - */ -bool isValidImageFile(const fs::path& filepath) -{ - if(!fs::is_regular_file(filepath)) - false; - - const std::string validExtensions( - // Basic image file extensions - "jpg|jpeg|png|tiff|tif|" - // RAW image file extensions: - "3fr|" // Hasselblad - "arw|" // Sony - "crw|cr2|cr3|" // Canon - "dng|" // Adobe - "kdc|" // Kodak - "mrw|" // Minolta - "nef|nrw|" // Nikon - "orf|" // Olympus - "ptx|pef|" // Pentax - "raf|" // Fuji - "R3D|" // RED - "rw2|" // Panasonic - "srw|" // Samsung - "x3f" // Sigma - ); - const std::regex validExtensionsExp("\\.(?:" + validExtensions + ")"); - - std::string extension = filepath.extension().string(); - std::transform(extension.begin(), extension.end(), extension.begin(), ::tolower); - - return std::regex_match(extension, validExtensionsExp); -} - - -/** - * @brief get image file paths in folder and add it to output vector if file is valid - * @param[in] folder - input folder path - * @param[out] out_imageFiles - vector of images paths - */ -void getImagesGroup(const fs::path& folder, std::vector& out_imageFiles) -{ - - for(const fs::directory_entry& file: fs::directory_iterator(folder)) - { - if(isValidImageFile(file.path())) - out_imageFiles.push_back(file.path().string()); - } -} - - -/** - * @brief get all input images paths from input provided by user - * @param[in] imagesFolder - input folder or list provided by user - * @param[out] ldrImagesPaths - vector of vector of images paths sorted by group - */ -void getInputPaths(const std::vector& imagesFolder, std::vector>& ldrImagesPaths) -{ - std::vector singleFiles; - bool sub_folders = false; - - for(const std::string& entry: imagesFolder) - { - fs::path entryPath = fs::path(entry); - if(fs::is_directory(entryPath)) - { - std::vector group; - getImagesGroup(entryPath, group); - - if(group.empty()) - { - // folder without image, assume it is a folder with sub-folders - sub_folders = true; - for(const fs::directory_entry& subEntry: fs::directory_iterator(entryPath)) - { - fs::path subEntryPath = fs::path(subEntry); - if(fs::is_directory(subEntryPath)); - { - std::vector subGroup; - getImagesGroup(subEntryPath, subGroup); - ldrImagesPaths.push_back(subGroup); - } - } - } - else - { - // folder with images - ldrImagesPaths.push_back(group); - } - } - // list of images - else if(fs::is_regular_file(entryPath)) - { - if(isValidImageFile(entryPath)) - singleFiles.push_back(entry); - } - else // is an expression - { - // extract folder / expression - std::string imagesExp = entryPath.stem().string(); - std::string extension = entryPath.extension().string(); - std::size_t pos = imagesExp.find("*"); - if(pos != std::string::npos) - { - imagesExp.insert(pos, std::string(".")); - } - else - { - throw std::runtime_error("[ldrToHdr] Invalid expression of input files."); - } - std::regex exp("\\.(?:" + imagesExp + "\\" + extension + ")"); - - - for(const fs::directory_entry& file: fs::directory_iterator(entryPath.parent_path())) - { - if(fs::is_regular_file(file) && std::regex_match(file.path().string(), exp)) - if(isValidImageFile(file.path())) - singleFiles.push_back(file.path().string()); - } - - if(singleFiles.empty()) - throw std::runtime_error("[ldrToHdr] Invalid expression of input files."); - } - } - - if(!singleFiles.empty()) - { - if(!ldrImagesPaths.empty()) - { - throw std::runtime_error("[ldrToHdr] Cannot mix files and folders in input."); - } - ldrImagesPaths.push_back(singleFiles); - } - - // if folder with sub-folders we need to sort sub-folders in alphabetic order - if(sub_folders) - { - std::vector> ldrImagesPaths_copy = ldrImagesPaths; - std::vector subFoldersNames; - - for(const std::vector& group: ldrImagesPaths) - { - fs::path firstPath = fs::path(group.front()); - subFoldersNames.push_back(firstPath.parent_path().string()); - } - - std::vector subFoldersNames_sorted = subFoldersNames; - std::sort(subFoldersNames_sorted.begin(), subFoldersNames_sorted.end()); - - for(std::size_t g = 0; g < ldrImagesPaths.size(); ++g) - { - std::vector::iterator it = std::find(subFoldersNames.begin(), subFoldersNames.end(), subFoldersNames_sorted[g]); - if(it == subFoldersNames.end()) - ALICEVISION_LOG_ERROR("Cannot sort folers, please store them in alphabetic order."); - - ldrImagesPaths[g] = ldrImagesPaths_copy.at(std::distance(subFoldersNames.begin(), it)); - } - } -} - - -/** - * @brief recreate the source image at the target exposure by applying the inverse camera response function to HDR image - * and calculate the offset between the mean value of target source image and the recovered image - * @param[in] hdrImage - * @param[in] response - * @param[in] channelQuantization - * @param[in] path to write the output image - */ -void recoverSourceImage(const image::Image& hdrImage, hdr::rgbCurve& response, float channelQuantization, float meanVal[], image::Image &targetRecover) -{ - float meanRecovered[3] = {0.f, 0.f, 0.f}; - for(std::size_t channel = 0; channel < 3; ++channel) - { - std::vector::iterator first = response.getCurve(channel).begin(); - std::vector::iterator last = response.getCurve(channel).end(); - for(std::size_t y = 0; y < hdrImage.Height(); ++y) - { - for(std::size_t x = 0; x < hdrImage.Width(); ++x) - { - const float &pixelValue = hdrImage(y, x)(channel); - std::vector::iterator it = std::lower_bound(first, last, pixelValue); - float value = float(std::distance(response.getCurve(channel).begin(), it)) / (channelQuantization - 1.f); - targetRecover(y, x)(channel) = value; - - meanRecovered[channel] += value; - - } - } - meanRecovered[channel] /= hdrImage.size(); - } - float offset[3]; - for(int i=0; i<3; ++i) - offset[i] = std::abs(meanRecovered[i] - meanVal[i]); - ALICEVISION_LOG_INFO("Offset between target source image and recovered from hdr = " << offset); + std::string token; + in >> token; + calibrationMethod = ECalibrationMethod_stringToEnum(token); + return in; } -int main(int argc, char** argv) +int main(int argc, char * argv[]) { - // command-line parameters - std::string verboseLevel = system::EVerboseLevel_enumToString(system::Logger::getDefaultVerboseLevel()); - std::vector imagesFolder; - std::vector outputHDRImagesPath; - std::string outputResponsePath; + std::string sfmInputDataFilename = ""; + std::string sfmOutputDataFilename = ""; + int nbBrackets = 0; ECalibrationMethod calibrationMethod = ECalibrationMethod::LINEAR; - std::string inputResponsePath; + float highlightCorrectionFactor = 1.0f; + float highlightTargetLux = 120000.0f; + bool fisheye = false; + int channelQuantizationPower = 10; + int calibrationNbPoints = 0; + int calibrationDownscale = 4; + bool refineExposures = false; + bool byPass = false; + std::string calibrationWeightFunction = "default"; hdr::EFunctionType fusionWeightFunction = hdr::EFunctionType::GAUSSIAN; - std::vector targets; - float clampedValueCorrection = 1.f; - bool fisheye = true; - std::string recoverSourcePath; - po::options_description allParams("AliceVision convertLDRToHDR"); + // Command line parameters + po::options_description allParams( + "Parse external information about cameras used in a panorama.\n" + "AliceVision PanoramaExternalInfo"); - po::options_description requiredParams("Required Parameters"); + po::options_description requiredParams("Required parameters"); requiredParams.add_options() - ("input,i", po::value>(&imagesFolder)->required()->multitoken(), - "List of LDR images or a folder containing them (accepted formats are: .jpg .jpeg .png .tif .tiff or RAW image file extensions: .3fr .arw .crw .cr2 .cr3 .dng .kdc .mrw .nef .nrw .orf .ptx .pef .raf .R3D .rw2 .srw .x3f") - ("output,o", po::value>(&outputHDRImagesPath)->required()->multitoken(), - "Output HDR image folders or complete paths."); + ("input,i", po::value(&sfmInputDataFilename)->required(), "SfMData file input.") + ("outSfMDataFilename,o", po::value(&sfmOutputDataFilename)->required(), "SfMData file output.") + ; - po::options_description optionalParams("Optional Parameters"); + po::options_description optionalParams("Optional parameters"); optionalParams.add_options() - ("calibrationMethod,m", po::value(&calibrationMethod )->default_value(calibrationMethod), - "Name of method used for camera calibration (linear, robertson -> slow !, debevec, grossberg).") - ("expandDynamicRange,e", po::value(&clampedValueCorrection)->default_value(clampedValueCorrection), - "float value between 0 and 1 to correct clamped high values in dynamic range: use 0 for no correction, 0.5 for interior lighting and 1 for outdoor lighting.") - ("targetExposureImage,t", po::value>(&targets)->multitoken(), - "Name of LDR image to center your HDR exposure.") + ("nbBrackets,b", po::value(&nbBrackets)->default_value(nbBrackets), "bracket count per HDR image (0 means automatic).") + ("calibrationMethod,m", po::value(&calibrationMethod)->default_value(calibrationMethod), + "Name of method used for camera calibration: linear, robertson (slow), debevec, grossberg, laguerre.") + ("highlightTargetLux", po::value(&highlightTargetLux)->default_value(highlightTargetLux), + "Highlights maximum luminance.") + ("highlightCorrectionFactor", po::value(&highlightCorrectionFactor)->default_value(highlightCorrectionFactor), + "float value between 0 and 1 to correct clamped highlights in dynamic range: use 0 for no correction, 1 for full correction to maxLuminance.") ("fisheyeLens,f", po::value(&fisheye)->default_value(fisheye), - "Set to 1 if images are taken with a fisheye lens and to 0 if not. Default value is set to 1.") - ("inputResponse,r", po::value(&inputResponsePath), - "External camera response file to fuse all LDR images together.") + "Set to 1 if images are taken with a fisheye lens and to 0 if not. Default value is set to 1.") + ("byPass", po::value(&byPass)->default_value(byPass), + "bypass HDR creation and use medium bracket as input for next steps") + ("channelQuantizationPower", po::value(&channelQuantizationPower)->default_value(channelQuantizationPower), + "Quantization level like 8 bits or 10 bits.") ("calibrationWeight,w", po::value(&calibrationWeightFunction)->default_value(calibrationWeightFunction), - "Weight function used to calibrate camera response (default depends on the calibration method, gaussian, triangle, plateau).") + "Weight function used to calibrate camera response (default depends on the calibration method, gaussian, triangle, plateau).") ("fusionWeight,W", po::value(&fusionWeightFunction)->default_value(fusionWeightFunction), - "Weight function used to fuse all LDR images together (gaussian, triangle, plateau).") - ("outputResponse", po::value(&outputResponsePath), - "(For debug) Output camera response function folder or complete path.") - ("recoverPath", po::value(&recoverSourcePath)->default_value(recoverSourcePath), - "(For debug) Folder path for recovering LDR images at the target exposures by applying inverse response on HDR images."); + "Weight function used to fuse all LDR images together (gaussian, triangle, plateau).") + ("calibrationNbPoints", po::value(&calibrationNbPoints)->default_value(calibrationNbPoints), + "Number of points used to calibrate (Use 0 for automatic selection based on the calibration method).") + ("calibrationDownscale", po::value(&calibrationDownscale)->default_value(calibrationDownscale), + "Image downscale used to calibration the response function.") + ("calibrationRefineExposures", po::value(&refineExposures)->default_value(refineExposures), + "Refine exposures provided by metadata (shutter speed, f-number, iso). Only available for 'laguerre' calibration method. Default value is set to 0.") + ; po::options_description logParams("Log parameters"); logParams.add_options() ("verboseLevel,v", po::value(&verboseLevel)->default_value(verboseLevel), - "verbosity level (fatal, error, warning, info, debug, trace)."); - + "verbosity level (fatal, error, warning, info, debug, trace)."); + allParams.add(requiredParams).add(optionalParams).add(logParams); po::variables_map vm; @@ -374,312 +192,498 @@ int main(int argc, char** argv) ALICEVISION_COUT("Program called with the following parameters:"); ALICEVISION_COUT(vm); - // set verbose level system::Logger::get()->setLogLevel(verboseLevel); - std::vector> imagesPaths; - getInputPaths(imagesFolder, imagesPaths); - std::size_t nbGroups = imagesPaths.size(); - std::size_t nbImages = imagesPaths.front().size(); - for(int g = 0; g < nbGroups; ++g) + // Analyze path + boost::filesystem::path path(sfmOutputDataFilename); + std::string outputPath = path.parent_path().string(); + + // Read sfm data + sfmData::SfMData sfmData; + if(!sfmDataIO::Load(sfmData, sfmInputDataFilename, sfmDataIO::ESfMData::ALL)) { - if(imagesPaths[g].size() != nbImages) - { - ALICEVISION_LOG_ERROR("All groups should have the same number of LDR images."); - return EXIT_FAILURE; - } + ALICEVISION_LOG_ERROR("The input SfMData file '" << sfmInputDataFilename << "' cannot be read."); + return EXIT_FAILURE; } - ALICEVISION_LOG_INFO(nbGroups << " group(s) of LDR images found"); - - std::vector< std::vector< image::Image > > ldrImageGroups(nbGroups); - std::vector< std::vector< image::Image > > ldrImageGroups_sorted(nbGroups); - std::vector< std::vector > times(nbGroups); - std::vector< std::vector > times_sorted(nbGroups); - std::vector targetTimes(nbGroups); - std::vector targetMetadatas(nbGroups); - - hdr::rgbCurve response(0); - std::size_t channelQuantization; - image::EImageColorSpace loadColorSpace; - - targets.resize(nbGroups); - outputHDRImagesPath.resize(nbGroups); - - std::string outputPath = outputHDRImagesPath.front(); - - if(fs::is_directory(outputPath)) + size_t countImages = sfmData.getViews().size(); + if (countImages == 0) { - if(nbGroups == 1) - outputHDRImagesPath.front() = (fs::path(outputPath) / ("hdr.exr")).string(); - else - for(int g = 0; g < nbGroups; ++g) - outputHDRImagesPath[g] = (fs::path(outputPath) / ("hdr_" + std::to_string(g) + ".exr")).string(); + ALICEVISION_LOG_ERROR("The input SfMData contains no input !"); + return EXIT_FAILURE; } - else if(outputHDRImagesPath.size() != nbGroups) + + if (nbBrackets > 0 && countImages % nbBrackets != 0) { - ALICEVISION_LOG_ERROR("Please provide one output path for each group of LDR images."); + ALICEVISION_LOG_ERROR("The input SfMData file is not compatible with the number of brackets."); return EXIT_FAILURE; } - // read input response file or set the correct channel quantization according to the calibration method used - if(!inputResponsePath.empty()) + // Make sure there is only one kind of image in dataset + if (sfmData.getIntrinsics().size() > 2) { - response = hdr::rgbCurve(inputResponsePath); // use the camera response function set in "responseCalibration", calibrationMethod is set to "none" - channelQuantization = response.getSize(); + ALICEVISION_LOG_ERROR("Multiple intrinsics : Different kind of images in dataset"); + return EXIT_FAILURE; } - else + + // If two intrinsics, may be some images are simply rotated + if (sfmData.getIntrinsics().size() == 2) { - channelQuantization = std::pow(2, 10); //RAW 10 bit precision, 2^10 values between black and white point - response.resize(channelQuantization); - } + const sfmData::Intrinsics & intrinsics = sfmData.getIntrinsics(); + + unsigned int w = intrinsics.begin()->second->w(); + unsigned int h = intrinsics.begin()->second->h(); + unsigned int rw = intrinsics.rbegin()->second->w(); + unsigned int rh = intrinsics.rbegin()->second->h(); + + if (w != rh || h != rw) + { + ALICEVISION_LOG_ERROR("Multiple intrinsics : Different kind of images in dataset"); + return EXIT_FAILURE; + } - // set correct color space according to calibration method - if(calibrationMethod == ECalibrationMethod::LINEAR) - loadColorSpace = image::EImageColorSpace::LINEAR; - else - loadColorSpace = image::EImageColorSpace::SRGB; + IndexT firstId = intrinsics.begin()->first; + IndexT secondId = intrinsics.rbegin()->first; - // force clamped value correction between 0 and 1 - clampedValueCorrection = clamp(clampedValueCorrection, 0.f, 1.f); + size_t first = 0; + size_t second = 0; + sfmData::Views & views = sfmData.getViews(); + for (const auto & v : views) + { + if (v.second->getIntrinsicId() == firstId) + { + first++; + } + else + { + second++; + } + } - // set the correct weight functions corresponding to the string parameter - hdr::rgbCurve fusionWeight(channelQuantization); - fusionWeight.setFunction(fusionWeightFunction); + // Set all view with the majority intrinsics + if (first > second) + { + for (const auto & v : views) + { + v.second->setIntrinsicId(firstId); + } - hdr::rgbCurve calibrationWeight(channelQuantization); - std::transform(calibrationWeightFunction.begin(), calibrationWeightFunction.end(), calibrationWeightFunction.begin(), ::tolower); - if(calibrationWeightFunction == "default") - { - switch(calibrationMethod) + sfmData.getIntrinsics().erase(secondId); + } + else { - case ECalibrationMethod::LINEAR: break; - case ECalibrationMethod::DEBEVEC: calibrationWeight.setTriangular(); break; - case ECalibrationMethod::ROBERTSON: calibrationWeight.setRobertsonWeight(); break; - case ECalibrationMethod::GROSSBERG: break; + for (const auto & v : views) + { + v.second->setIntrinsicId(secondId); + } + + sfmData.getIntrinsics().erase(firstId); } } - else - calibrationWeight.setFunction(hdr::EFunctionType_stringToEnum(calibrationWeightFunction)); - - for(int g = 0; g < nbGroups; ++g) + // Rotate needed images { - if(nbGroups > 1) - ALICEVISION_LOG_INFO("Group " << g << " : "); - - std::vector> &ldrImages = ldrImageGroups[g]; - ldrImages.resize(nbImages); - std::vector &ldrTimes = times[g]; - // std::vector &ldrEv = ev.at(0); - std::vector &inputImagesNames = imagesPaths[g]; - - std::vector stemImages(nbImages); - std::vector nameImages(nbImages); - std::vector metadatas(nbImages); - std::vector aperture; - std::vector colorSpace; - - for(int i=0; isecond->w(); + unsigned int h = intrinsics.begin()->second->h(); - ALICEVISION_LOG_INFO("Reading " << imagePath); + sfmData::Views & views = sfmData.getViews(); + for (auto & v : views) + { + if (v.second->getWidth() == h && v.second->getHeight() == w) + { + ALICEVISION_LOG_INFO("Create intermediate rotated image !"); - image::readImage(imagePath, ldrImages[i], loadColorSpace); + // Read original image + image::Image originalImage; + image::readImage(v.second->getImagePath(), originalImage, image::EImageColorSpace::LINEAR); - metadatas[i] = image::readImageMetadata(imagePath); + // Create a rotated image + image::Image rotated(w, h); + for (int y = 0; y < h; ++y) + { + for (int x = 0; x < w; x++) + { + rotated(y, x) = originalImage(x, rotated.Height() - 1 - y); + } + } - // Debevec and Robertson algorithms use shutter speed as ev value - // TODO: in the future, we should use EVs instead of just shutter speed. - try - { - // const float iso = std::stof(metadata.at("Exif:PhotographicSensitivity")); - // const float iso = std::stof(metadata.at("Exif:ISOSpeedRatings")); + boost::filesystem::path origImgPath(v.second->getImagePath()); + std::string origFilename = origImgPath.stem().string(); - aperture.emplace_back(metadatas[i].get_float("FNumber")); - ldrTimes.emplace_back(metadatas[i].get_float("ExposureTime")); - colorSpace.emplace_back(metadatas[i].get_string("oiio:ColorSpace")); + // Save this image + std::string rotatedImagePath = (fs::path(outputPath) / (origFilename + ".exr")).string(); + oiio::ParamValueList metadata = image::readImageMetadata(v.second->getImagePath()); + image::writeImage(rotatedImagePath, rotated, image::EImageColorSpace::AUTO, metadata); - // ldrEv.push_back(std::log2(pow(aperture, 2) / shutter) + std::log2(iso/100)); + // Update view for this modification + v.second->setWidth(w); + v.second->setHeight(h); + v.second->setImagePath(rotatedImagePath); } - catch(std::exception& e) + } + } + + // Order views by their image names (without path and extension to make sure we handle rotated images) + std::vector> viewsOrderedByName; + for (auto & viewIt: sfmData.getViews()) + { + viewsOrderedByName.push_back(viewIt.second); + } + std::sort(viewsOrderedByName.begin(), viewsOrderedByName.end(), [](const std::shared_ptr & a, const std::shared_ptr & b) -> bool { + if (a == nullptr || b == nullptr) return true; + + boost::filesystem::path path_a(a->getImagePath()); + boost::filesystem::path path_b(b->getImagePath()); + + return (path_a.stem().string() < path_b.stem().string()); + }); + + { + // Print a warning if the aperture changes. + std::set fnumbers; + for (auto & view : viewsOrderedByName) + { + fnumbers.insert(view->getMetadataFNumber()); + } + if(fnumbers.size() != 1) + { + ALICEVISION_LOG_WARNING("Different apertures amongst the dataset. For correct HDR, you should only change the shutter speed (and eventually the ISO)."); + ALICEVISION_LOG_WARNING("Used f-numbers:"); + for (auto f : fnumbers) { - ALICEVISION_LOG_ERROR("no metadata in images : " << imagePath); - return EXIT_FAILURE; + ALICEVISION_LOG_WARNING(" * " << f); } } + } + + // Make groups + std::vector>> groupedViews; + { + std::vector> group; + std::vector exposures; + for (auto & view : viewsOrderedByName) + { + if (nbBrackets > 0) + { + group.push_back(view); + if (group.size() == nbBrackets) + { + groupedViews.push_back(group); + group.clear(); + } + } + else + { + // Automatically determines the number of brackets + float exp = view->getCameraExposureSetting(); + if (!exposures.empty() && exp != exposures.back() && exp == exposures.front()) + { + groupedViews.push_back(group); + group.clear(); + exposures.clear(); + } + exposures.push_back(exp); + group.push_back(view); + } + } + if (!group.empty()) + groupedViews.push_back(group); + } + if (nbBrackets <= 0) + { + std::set sizeOfGroups; + for (auto & group : groupedViews) + { + sizeOfGroups.insert(group.size()); + } + if (sizeOfGroups.size() == 1) + { + ALICEVISION_LOG_INFO("Number of brackets automatically detected: " << *sizeOfGroups.begin() << ". It will generate " << groupedViews.size() << " hdr images."); + } + else + { + ALICEVISION_LOG_ERROR("Exposure groups do not have a consistent number of brackets."); + } + } + + std::vector> targetViews; + for (auto & group : groupedViews) + { + // Sort all images by exposure time + std::sort(group.begin(), group.end(), [](const std::shared_ptr & a, const std::shared_ptr & b) -> bool { + if (a == nullptr || b == nullptr) return true; + return (a->getCameraExposureSetting() < b->getCameraExposureSetting()); + }); + // Target views are the middle exposed views + int middleIndex = group.size() / 2; - // assert that all images have the same aperture and same color space - if(std::adjacent_find(aperture.begin(), aperture.end(), std::not_equal_to()) != aperture.end()) + // If odd size, choose the more exposed view + if (group.size() % 2 && group.size() > 1) { - ALICEVISION_LOG_ERROR("Input images have different apertures."); - return EXIT_FAILURE; + middleIndex++; } - if(std::adjacent_find(colorSpace.begin(), colorSpace.end(), std::not_equal_to()) != colorSpace.end()) + + targetViews.push_back(group[middleIndex]); + } + + // Build camera exposure table + std::vector> groupedExposures; + for (int i = 0; i < groupedViews.size(); i++) + { + const std::vector> & group = groupedViews[i]; + std::vector exposures; + + for (int j = 0; j < group.size(); j++) { - ALICEVISION_LOG_ERROR("Input images have different color spaces."); - return EXIT_FAILURE; + float etime = group[j]->getCameraExposureSetting(); + exposures.push_back(etime); } + groupedExposures.push_back(exposures); + } + // Build table of file names + std::vector> groupedFilenames; + for (int i = 0; i < groupedViews.size(); i++) + { + const std::vector> & group = groupedViews[i]; - std::vector &ldrTimes_sorted = times_sorted[g]; - ldrTimes_sorted = ldrTimes; - std::sort(ldrTimes_sorted.begin(), ldrTimes_sorted.end()); + std::vector filenames; - // we sort the images according to their exposure time - ldrImageGroups_sorted[g].resize(nbImages); - for(int i=0; i::iterator it = std::find(ldrTimes.begin(), ldrTimes.end(), ldrTimes_sorted.at(i)); - if(it == ldrTimes.end()) - { - ALICEVISION_LOG_ERROR("sorting failed"); - return EXIT_FAILURE; - } + filenames.push_back(group[j]->getImagePath()); + } + + groupedFilenames.push_back(filenames); + } + + + sfmData::SfMData outputSfm; + sfmData::Views & vs = outputSfm.getViews(); + outputSfm.getIntrinsics() = sfmData.getIntrinsics(); - ldrImageGroups_sorted[g].at(i) = ldrImages.at(std::distance(ldrTimes.begin(), it)); + /*If bypass, simply use central bracket*/ + if (byPass) { + for(int g = 0; g < groupedFilenames.size(); ++g) + { + vs[targetViews[g]->getViewId()] = targetViews[g]; } - // find target exposure time corresponding to the image name given by user - if(!targets[g].empty()) + // Export output sfmData + if (!sfmDataIO::Save(outputSfm, sfmOutputDataFilename, sfmDataIO::ESfMData::ALL)) { - nameImages.insert(nameImages.end(), stemImages.begin(), stemImages.end()); - nameImages.insert(nameImages.end(), inputImagesNames.begin(), inputImagesNames.end()); //search target for filenames only, filenames + extensions and complete paths - std::vector::iterator it = std::find(nameImages.begin(), nameImages.end(), targets[g]); - if(it == nameImages.end()) - { - ALICEVISION_CERR("Invalid target name: \"" << targets[g] << "\""); - return EXIT_FAILURE; - } + ALICEVISION_LOG_ERROR("Can not save output sfm file at " << sfmOutputDataFilename); + return EXIT_FAILURE; + } - std::size_t targetIndex = std::distance(nameImages.begin(), it) % nbImages; - targetMetadatas[g] = metadatas[targetIndex]; - targetTimes[g] = ldrTimes.at(targetIndex); - } - else + return EXIT_SUCCESS; + } + + + size_t channelQuantization = std::pow(2, channelQuantizationPower); + // set the correct weight functions corresponding to the string parameter + hdr::rgbCurve calibrationWeight(channelQuantization); + std::transform(calibrationWeightFunction.begin(), calibrationWeightFunction.end(), calibrationWeightFunction.begin(), ::tolower); + std::string calibrationWeightFunctionV = calibrationWeightFunction; + if (calibrationWeightFunction == "default") + { + switch (calibrationMethod) { - targetTimes[g] = ldrTimes_sorted.at(ldrTimes_sorted.size()/2); - std::size_t targetIndex = std::distance(ldrTimes.begin(), std::find(ldrTimes.begin(), ldrTimes.end(), targetTimes[g])); - targets[g] = nameImages.at(targetIndex); - targetMetadatas[g] = metadatas[targetIndex]; + case ECalibrationMethod::LINEAR: break; + case ECalibrationMethod::DEBEVEC: calibrationWeight.setTriangular(); calibrationWeightFunctionV="triangular"; break; + case ECalibrationMethod::ROBERTSON: calibrationWeight.setRobertsonWeight(); calibrationWeightFunctionV = "robertsonWeight"; break; + case ECalibrationMethod::GROSSBERG: break; + case ECalibrationMethod::LAGUERRE: break; } } + else + { + calibrationWeight.setFunction(hdr::EFunctionType_stringToEnum(calibrationWeightFunction)); + } + { + std::string methodName = calibrationWeightFunctionV; + std::string outputHtmlPath = (fs::path(outputPath) / (std::string("calibration_weight_") + methodName + std::string(".html"))).string(); + + calibrationWeight.writeHtml(outputHtmlPath, "Calibration weight: " + methodName); + } + + hdr::rgbCurve response(channelQuantization); + + const float lambda = channelQuantization * 1.f; // calculate the response function according to the method given in argument or take the response provided by the user - if(inputResponsePath.empty()) { - switch(calibrationMethod) - { + switch (calibrationMethod) + { case ECalibrationMethod::LINEAR: { - // set the response function to linear - response.setLinear(); + // set the response function to linear + response.setLinear(); + + { + hdr::rgbCurve r = response; + r.applyGamma(2.2); + std::string methodName = ECalibrationMethod_enumToString(calibrationMethod); + std::string outputResponsePath = (fs::path(outputPath) / (std::string("response_log_") + methodName + std::string(".csv"))).string(); + std::string outputResponsePathHtml = (fs::path(outputPath) / (std::string("response_log_") + methodName + std::string(".html"))).string(); + + r.write(outputResponsePath); + r.writeHtml(outputResponsePathHtml, "Camera Response Curve " + methodName); + ALICEVISION_LOG_INFO("Camera response function written as " << outputResponsePath); + } } break; - case ECalibrationMethod::DEBEVEC: { - ALICEVISION_LOG_INFO("Debevec calibration"); - const float lambda = channelQuantization * 1.f; - const int nbPoints = 10000; - hdr::DebevecCalibrate calibration; - calibration.process(ldrImageGroups_sorted, channelQuantization, times_sorted, nbPoints, fisheye, calibrationWeight, lambda, response); - - response.exponential(); - response.scale(); + ALICEVISION_LOG_INFO("Debevec calibration"); + const float lambda = channelQuantization * 1.f; + if(calibrationNbPoints <= 0) + calibrationNbPoints = 10000; + hdr::DebevecCalibrate calibration; + calibration.process(groupedFilenames, channelQuantization, groupedExposures, calibrationNbPoints, calibrationDownscale, fisheye, calibrationWeight, lambda, response); + + { + std::string methodName = ECalibrationMethod_enumToString(calibrationMethod); + std::string outputResponsePath = (fs::path(outputPath) / (std::string("response_log_") + methodName + std::string(".csv"))).string(); + std::string outputResponsePathHtml = (fs::path(outputPath) / (std::string("response_log_") + methodName + std::string(".html"))).string(); + + response.write(outputResponsePath); + response.writeHtml(outputResponsePathHtml, "Camera Response Curve " + methodName); + ALICEVISION_LOG_INFO("Camera response function written as " << outputResponsePath); + } + + { + hdr::rgbCurve r = response; + r.applyGammaInv(2.2); + std::string methodName = ECalibrationMethod_enumToString(calibrationMethod); + std::string outputResponsePath = (fs::path(outputPath) / (std::string("response_invGamma_") + methodName + std::string(".csv"))).string(); + std::string outputResponsePathHtml = (fs::path(outputPath) / (std::string("response_invGamma_") + methodName + std::string(".html"))).string(); + + r.write(outputResponsePath); + r.writeHtml(outputResponsePathHtml, "Camera Response Curve " + methodName); + ALICEVISION_LOG_INFO("Camera response function written as " << outputResponsePath); + } + + response.exponential(); + response.scale(); } break; - case ECalibrationMethod::ROBERTSON: { - ALICEVISION_LOG_INFO("Robertson calibration"); - hdr::RobertsonCalibrate calibration(10); - const int nbPoints = 1000000; - calibration.process(ldrImageGroups_sorted, channelQuantization, times_sorted, nbPoints, fisheye, calibrationWeight, response); - response.scale(); + /* + ALICEVISION_LOG_INFO("Robertson calibration"); + hdr::RobertsonCalibrate calibration(10); + if(calibrationNbPoints <= 0) + calibrationNbPoints = 1000000; + calibration.process(groupedFilenames, channelQuantization, groupedExposures, calibrationNbPoints, fisheye, calibrationWeight, response); + response.scale(); + */ } break; - case ECalibrationMethod::GROSSBERG: { - ALICEVISION_LOG_INFO("Grossberg calibration"); - const int nbPoints = 1000000; - hdr::GrossbergCalibrate calibration(3); - calibration.process(ldrImageGroups_sorted, channelQuantization, times_sorted, nbPoints, fisheye, response); + ALICEVISION_LOG_INFO("Grossberg calibration"); + if (calibrationNbPoints <= 0) + calibrationNbPoints = 1000000; + hdr::GrossbergCalibrate calibration(3); + calibration.process(groupedFilenames, channelQuantization, groupedExposures, calibrationNbPoints, fisheye, response); } break; - } + case ECalibrationMethod::LAGUERRE: + { + ALICEVISION_LOG_INFO("Laguerre calibration"); + if (calibrationNbPoints <= 0) + calibrationNbPoints = 1000000; + hdr::LaguerreBACalibration calibration; + calibration.process(groupedFilenames, channelQuantization, groupedExposures, calibrationNbPoints, calibrationDownscale, fisheye, refineExposures, response); + } + break; + } } - if(!outputResponsePath.empty()) + ALICEVISION_LOG_INFO("Calibration done."); + { - std::string methodName = ECalibrationMethod_enumToString(calibrationMethod); - if(fs::is_directory(fs::path(outputResponsePath))) - { - outputResponsePath = (fs::path(outputResponsePath) / (std::string("response_") + methodName + std::string(".csv"))).string(); - } + std::string methodName = ECalibrationMethod_enumToString(calibrationMethod); + std::string outputResponsePath = (fs::path(outputPath) / (std::string("response_") + methodName + std::string(".csv"))).string(); + std::string outputResponsePathHtml = (fs::path(outputPath) / (std::string("response_") + methodName + std::string(".html"))).string(); - response.write(outputResponsePath); - ALICEVISION_LOG_INFO("Camera response function written as " << outputResponsePath); + response.write(outputResponsePath); + response.writeHtml(outputResponsePathHtml, "Camera Response Curve " + methodName); + ALICEVISION_LOG_INFO("Camera response function written as " << outputResponsePath); } + // HDR Fusion - for(int g = 0; g < nbGroups; ++g) - { - image::Image HDRimage(ldrImageGroups[g].front().Width(), ldrImageGroups[g].front().Height(), false); - - hdr::hdrMerge merge; - merge.process(ldrImageGroups_sorted[g], times_sorted[g], fusionWeight, response, HDRimage, targetTimes[g], false, clampedValueCorrection); + hdr::rgbCurve fusionWeight(channelQuantization); + fusionWeight.setFunction(fusionWeightFunction); - image::writeImage(outputHDRImagesPath[g], HDRimage, image::EImageColorSpace::AUTO, targetMetadatas[g]); + { + std::string methodName = EFunctionType_enumToString(fusionWeightFunction); + std::string outputHtmlPath = (fs::path(outputPath) / (std::string("fusion_weight_") + methodName + std::string(".html"))).string(); - ALICEVISION_LOG_INFO("Successfull HDR fusion of " << nbImages << " LDR images centered on " << targets[g]); - ALICEVISION_LOG_INFO("HDR image written as " << outputHDRImagesPath[g]); + calibrationWeight.writeHtml(outputHtmlPath, "Fusion weight: " + methodName); } + image::EImageColorSpace mergeColorspace = image::EImageColorSpace::LINEAR; - // test of recovery of source target image from HDR - if(!recoverSourcePath.empty()) + switch (calibrationMethod) + { + case ECalibrationMethod::LINEAR: + case ECalibrationMethod::GROSSBERG: + case ECalibrationMethod::LAGUERRE: + mergeColorspace = image::EImageColorSpace::LINEAR; + break; + case ECalibrationMethod::DEBEVEC: + case ECalibrationMethod::ROBERTSON: + mergeColorspace = image::EImageColorSpace::SRGB; + break; + } + for(int g = 0; g < groupedFilenames.size(); ++g) { - fs::path recoverPath = fs::path(recoverSourcePath); + std::vector> images(groupedViews[g].size()); + std::shared_ptr targetView = targetViews[g]; - for(int g = 0; g < nbGroups; ++g) + // Load all images of the group + for (int i = 0; i < images.size(); i++) { - image::Image HDRimage; - image::readImage(outputHDRImagesPath[g], HDRimage, image::EImageColorSpace::NO_CONVERSION); - - // calcul of mean value of target images - std::size_t targetIndex = std::distance(times[g].begin(), std::find(times[g].begin(), times[g].end(), targetTimes[g])); - float meanVal[3] = {0.f, 0.f, 0.f}; - image::Image &targetImage = ldrImageGroups[g].at(targetIndex); - image::Image targetRecover(targetImage.Width(), targetImage.Height(), false); - for(std::size_t channel=0; channel<3; ++channel) - { - for(std::size_t y = 0; y < targetImage.Height(); ++y) - { - for(std::size_t x = 0; x < targetImage.Width(); ++x) - { - meanVal[channel] += targetImage(y, x)(channel); - } - } - meanVal[channel] /= targetImage.size(); - } - recoverSourceImage(HDRimage, response, channelQuantization, meanVal, targetRecover); + ALICEVISION_LOG_INFO("Load " << groupedFilenames[g][i]); + image::readImage(groupedFilenames[g][i], images[i], mergeColorspace); + } - if(nbGroups == 1) - recoverSourcePath = (recoverPath / (std::string("recovered.exr"))).string(); - else - recoverSourcePath = (recoverPath / (std::string("recovered_") + std::to_string(g) + std::string(".exr"))).string(); + // Merge HDR images + hdr::hdrMerge merge; + float targetCameraExposure = targetView->getCameraExposureSetting(); + image::Image HDRimage; + merge.process(images, groupedExposures[g], fusionWeight, response, HDRimage, targetCameraExposure); - image::writeImage(recoverSourcePath, targetRecover, image::EImageColorSpace::AUTO); - ALICEVISION_LOG_INFO("Recovered target source image written as " << recoverSourcePath); + if(highlightCorrectionFactor > 0.0) + { + merge.postProcessHighlight(images, groupedExposures[g], fusionWeight, response, HDRimage, targetCameraExposure, highlightCorrectionFactor, highlightTargetLux); } + + // Output image file path + std::stringstream sstream; + sstream << "hdr_" << std::setfill('0') << std::setw(4) << g << ".exr"; + std::string hdrImagePath = (fs::path(outputPath) / sstream.str()).string(); + + // Write an image with parameters from the target view + oiio::ParamValueList targetMetadata = image::readImageMetadata(targetView->getImagePath()); + image::writeImage(hdrImagePath, HDRimage, image::EImageColorSpace::AUTO, targetMetadata); + + targetViews[g]->setImagePath(hdrImagePath); + vs[targetViews[g]->getViewId()] = targetViews[g]; } + // Export output sfmData + if (!sfmDataIO::Save(outputSfm, sfmOutputDataFilename, sfmDataIO::ESfMData::ALL)) + { + ALICEVISION_LOG_ERROR("Can not save output sfm file at " << sfmOutputDataFilename); + return EXIT_FAILURE; + } + return EXIT_SUCCESS; -} +} \ No newline at end of file diff --git a/src/software/pipeline/CMakeLists.txt b/src/software/pipeline/CMakeLists.txt index d916cffb03..323a5d46a3 100644 --- a/src/software/pipeline/CMakeLists.txt +++ b/src/software/pipeline/CMakeLists.txt @@ -88,6 +88,63 @@ if(ALICEVISION_BUILD_SFM) ${Boost_LIBRARIES} ) + # Panorama + alicevision_add_software(aliceVision_panoramaEstimation + SOURCE main_panoramaEstimation.cpp + FOLDER ${FOLDER_SOFTWARE_PIPELINE} + LINKS aliceVision_system + aliceVision_image + aliceVision_feature + aliceVision_sfm + aliceVision_sfmData + aliceVision_sfmDataIO + ${Boost_LIBRARIES} + ) + alicevision_add_software(aliceVision_panoramaWarping + SOURCE main_panoramaWarping.cpp + FOLDER ${FOLDER_SOFTWARE_PIPELINE} + LINKS aliceVision_system + aliceVision_image + aliceVision_feature + aliceVision_sfm + aliceVision_sfmData + aliceVision_sfmDataIO + ${Boost_LIBRARIES} + ) + alicevision_add_software(aliceVision_panoramaCompositing + SOURCE main_panoramaCompositing.cpp + FOLDER ${FOLDER_SOFTWARE_PIPELINE} + LINKS aliceVision_system + aliceVision_image + aliceVision_feature + aliceVision_sfm + aliceVision_sfmData + aliceVision_sfmDataIO + ${Boost_LIBRARIES} + ) + alicevision_add_software(aliceVision_panoramaExternalInfo + SOURCE main_panoramaExternalInfo.cpp + FOLDER ${FOLDER_SOFTWARE_PIPELINE} + LINKS aliceVision_system + aliceVision_image + aliceVision_feature + aliceVision_sfm + aliceVision_sfmData + aliceVision_sfmDataIO + ${Boost_LIBRARIES} + ) + alicevision_add_software(aliceVision_cameraDownscale + SOURCE main_cameraDownscale.cpp + FOLDER ${FOLDER_SOFTWARE_PIPELINE} + LINKS aliceVision_system + aliceVision_image + aliceVision_feature + aliceVision_sfm + aliceVision_sfmData + aliceVision_sfmDataIO + ${Boost_LIBRARIES} + ) + # Compute structure from known camera poses alicevision_add_software(aliceVision_computeStructureFromKnownPoses SOURCE main_computeStructureFromKnownPoses.cpp diff --git a/src/software/pipeline/main_cameraDownscale.cpp b/src/software/pipeline/main_cameraDownscale.cpp new file mode 100644 index 0000000000..f27753712f --- /dev/null +++ b/src/software/pipeline/main_cameraDownscale.cpp @@ -0,0 +1,157 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +// These constants define the current software version. +// They must be updated when the command line is changed. +#define ALICEVISION_SOFTWARE_VERSION_MAJOR 2 +#define ALICEVISION_SOFTWARE_VERSION_MINOR 0 + +using namespace aliceVision; +namespace po = boost::program_options; +namespace fs = boost::filesystem; + +int main(int argc, char * argv[]) { + std::string verboseLevel = system::EVerboseLevel_enumToString(system::Logger::getDefaultVerboseLevel()); + std::string sfmInputDataFilename = ""; + std::string sfmOutputDataFilename = ""; + float rescaleFactor = 1.0f; + + /***** + * DESCRIBE COMMAND LINE PARAMETERS + */ + po::options_description allParams( + "Parse external information about cameras used in a panorama.\n" + "AliceVision PanoramaExternalInfo"); + + po::options_description requiredParams("Required parameters"); + requiredParams.add_options() + ("rescalefactor,r", po::value(&rescaleFactor)->default_value(rescaleFactor), "Rescale Factor ]0.0, 1.0].") + ("input,i", po::value(&sfmInputDataFilename)->required(), "SfMData file input.") + ("outSfMDataFilename,o", po::value(&sfmOutputDataFilename)->required(), "SfMData file output.") + ; + + po::options_description optionalParams("Optional parameters"); + + po::options_description logParams("Log parameters"); + logParams.add_options() + ("verboseLevel,v", po::value(&verboseLevel)->default_value(verboseLevel), + "verbosity level (fatal, error, warning, info, debug, trace)."); + + allParams.add(requiredParams).add(optionalParams).add(logParams); + + + /** + * READ COMMAND LINE + */ + po::variables_map vm; + try + { + po::store(po::parse_command_line(argc, argv, allParams), vm); + + if(vm.count("help") || (argc == 1)) + { + ALICEVISION_COUT(allParams); + return EXIT_SUCCESS; + } + po::notify(vm); + } + catch(boost::program_options::required_option& e) + { + ALICEVISION_CERR("ERROR: " << e.what()); + ALICEVISION_COUT("Usage:\n\n" << allParams); + return EXIT_FAILURE; + } + catch(boost::program_options::error& e) + { + ALICEVISION_CERR("ERROR: " << e.what()); + ALICEVISION_COUT("Usage:\n\n" << allParams); + return EXIT_FAILURE; + } + + ALICEVISION_COUT("Program called with the following parameters:"); + ALICEVISION_COUT(vm); + + /** + * set verbose level + **/ + system::Logger::get()->setLogLevel(verboseLevel); + + if (rescaleFactor < 0.0001f || rescaleFactor > 1.0f) { + ALICEVISION_LOG_ERROR("Invalid scale factor (]0,1]"); + return EXIT_FAILURE; + } + + + /*Analyze path*/ + boost::filesystem::path path(sfmOutputDataFilename); + std::string output_path = path.parent_path().string(); + + /** + * Read input + */ + sfmData::SfMData sfmData; + if(!sfmDataIO::Load(sfmData, sfmInputDataFilename, sfmDataIO::ESfMData(sfmDataIO::ALL))) + { + ALICEVISION_LOG_ERROR("The input SfMData file '" << sfmInputDataFilename << "' cannot be read."); + return EXIT_FAILURE; + } + + for (auto & v : sfmData.getViews()) { + + /*Read original image*/ + image::Image originalImage; + image::readImage(v.second->getImagePath(), originalImage, image::EImageColorSpace::LINEAR); + + unsigned int w = v.second->getWidth(); + unsigned int h = v.second->getHeight(); + unsigned int nw = (unsigned int)(floor(float(w) * rescaleFactor)); + unsigned int nh = (unsigned int)(floor(float(h) * rescaleFactor)); + + /*Create a rotated image*/ + image::Image rescaled(nw, nh); + + oiio::ImageSpec imageSpecResized(nw, nh, 3, oiio::TypeDesc::FLOAT); + oiio::ImageSpec imageSpecOrigin(w, h, 3, oiio::TypeDesc::FLOAT); + + oiio::ImageBuf bufferOrigin(imageSpecOrigin, originalImage.data()); + oiio::ImageBuf bufferResized(imageSpecResized, rescaled.data()); + + oiio::ImageBufAlgo::resample(bufferResized, bufferOrigin); + + boost::filesystem::path old_path(v.second->getImagePath()); + std::string old_filename = old_path.stem().string(); + + /*Save this image*/ + std::stringstream sstream; + sstream << output_path << "/" << old_filename << "_rescaled.exr"; + image::writeImage(sstream.str(), rescaled, image::EImageColorSpace::AUTO); + + /*Update view for this modification*/ + v.second->setWidth(nw); + v.second->setHeight(nh); + v.second->setImagePath(sstream.str()); + } + + for (auto & i : sfmData.getIntrinsics()) { + i.second->rescale(rescaleFactor); + } + + /*** + * Save downscaled sfmData + */ + if (!sfmDataIO::Save(sfmData, sfmOutputDataFilename, sfmDataIO::ESfMData(sfmDataIO::ALL))) { + ALICEVISION_LOG_ERROR("The output SfMData file '" << sfmOutputDataFilename << "' cannot be write."); + return EXIT_FAILURE; + } + + return 0; +} \ No newline at end of file diff --git a/src/software/pipeline/main_globalSfM.cpp b/src/software/pipeline/main_globalSfM.cpp index 3fed48e3a2..524b27555e 100644 --- a/src/software/pipeline/main_globalSfM.cpp +++ b/src/software/pipeline/main_globalSfM.cpp @@ -43,8 +43,8 @@ int main(int argc, char **argv) std::string outSfMDataFilename = "SfmData.json"; std::string describerTypesName = feature::EImageDescriberType_enumToString(feature::EImageDescriberType::SIFT); - int rotationAveragingMethod = static_cast(sfm::ROTATION_AVERAGING_L2); - int translationAveragingMethod = static_cast(sfm::TRANSLATION_AVERAGING_SOFTL1); + sfm::ERotationAveragingMethod rotationAveragingMethod = sfm::ROTATION_AVERAGING_L2; + sfm::ETranslationAveragingMethod translationAveragingMethod = sfm::TRANSLATION_AVERAGING_SOFTL1; bool lockAllIntrinsics = false; po::options_description allParams("Implementation of the paper\n" @@ -70,12 +70,13 @@ int main(int argc, char **argv) "Filename of the output SfMData file.") ("describerTypes,d", po::value(&describerTypesName)->default_value(describerTypesName), feature::EImageDescriberType_informations().c_str()) - ("rotationAveraging", po::value(&rotationAveragingMethod)->default_value(rotationAveragingMethod), + ("rotationAveraging", po::value(&rotationAveragingMethod)->default_value(rotationAveragingMethod), "* 1: L1 minimization\n" "* 2: L2 minimization") - ("translationAveraging", po::value(&translationAveragingMethod)->default_value(translationAveragingMethod), + ("translationAveraging", po::value(&translationAveragingMethod)->default_value(translationAveragingMethod), "* 1: L1 minimization\n" - "* 2: L2 minimization of sum of squared Chordal distances") + "* 2: L2 minimization of sum of squared Chordal distances\n" + "* 3: L1 soft minimization") ("lockAllIntrinsics", po::value(&lockAllIntrinsics)->default_value(lockAllIntrinsics), "Force lock of all camera intrinsic parameters, so they will not be refined during Bundle Adjustment."); diff --git a/src/software/pipeline/main_panoramaCompositing.cpp b/src/software/pipeline/main_panoramaCompositing.cpp new file mode 100644 index 0000000000..05cea4508b --- /dev/null +++ b/src/software/pipeline/main_panoramaCompositing.cpp @@ -0,0 +1,1066 @@ +/** + * Input and geometry +*/ +#include +#include + +/** + * Image stuff + */ +#include +#include + +/*Logging stuff*/ +#include + +/*Reading command line options*/ +#include +#include + +/*IO*/ +#include +#include +#include +#include + +// These constants define the current software version. +// They must be updated when the command line is changed. +#define ALICEVISION_SOFTWARE_VERSION_MAJOR 1 +#define ALICEVISION_SOFTWARE_VERSION_MINOR 0 + +using namespace aliceVision; + +namespace po = boost::program_options; +namespace bpt = boost::property_tree; + +typedef struct { + size_t offset_x; + size_t offset_y; + std::string img_path; + std::string mask_path; + std::string weights_path; +} ConfigView; + + +void getMaskFromLabels(aliceVision::image::Image & mask, aliceVision::image::Image & labels, unsigned char index, size_t offset_x, size_t offset_y) { + + for (int i = 0; i < mask.Height(); i++) { + + int di = i + offset_y; + + for (int j = 0; j < mask.Width(); j++) { + + int dj = j + offset_x; + if (dj >= labels.Width()) { + dj = dj - labels.Width(); + } + + + if (labels(di, dj) == index) { + mask(i, j) = 1.0f; + } + else { + mask(i, j) = 0.0f; + } + } + } +} + +template +bool makeImagePyramidCompatible(image::Image & output, size_t & out_offset_x, size_t & out_offset_y, const image::Image & input, size_t offset_x, size_t offset_y, size_t num_levels) { + + if (num_levels == 0) { + return false; + } + + double max_scale = 1.0 / pow(2.0, num_levels - 1); + + double low_offset_x = double(offset_x) * max_scale; + double low_offset_y = double(offset_y) * max_scale; + + /*Make sure offset is integer even at the lowest level*/ + double corrected_low_offset_x = floor(low_offset_x); + double corrected_low_offset_y = floor(low_offset_y); + + /*Add some borders on the top and left to make sure mask can be smoothed*/ + corrected_low_offset_x = std::max(0.0, corrected_low_offset_x - 3.0); + corrected_low_offset_y = std::max(0.0, corrected_low_offset_y - 3.0); + + /*Compute offset at largest level*/ + out_offset_x = size_t(corrected_low_offset_x / max_scale); + out_offset_y = size_t(corrected_low_offset_y / max_scale); + + /*Compute difference*/ + double doffset_x = double(offset_x) - double(out_offset_x); + double doffset_y = double(offset_y) - double(out_offset_y); + + /* update size with border update */ + double large_width = double(input.Width()) + doffset_x; + double large_height = double(input.Height()) + doffset_y; + + /* compute size at largest scale */ + double low_width = large_width * max_scale; + double low_height = large_height * max_scale; + + /*Make sure width is integer event at the lowest level*/ + double corrected_low_width = ceil(low_width); + double corrected_low_height = ceil(low_height); + + /*Add some borders on the right and bottom to make sure mask can be smoothed*/ + corrected_low_width = corrected_low_width + 3; + corrected_low_height = corrected_low_height + 3; + + /*Compute size at largest level*/ + size_t width = size_t(corrected_low_width / max_scale); + size_t height = size_t(corrected_low_height / max_scale); + + output = image::Image(width, height, true, T(0.0f)); + output.block(doffset_y, doffset_x, input.Height(), input.Width()) = input; + + return true; +} + + +class Compositer { +public: + Compositer(size_t outputWidth, size_t outputHeight) : + _panorama(outputWidth, outputHeight, true, image::RGBAfColor(0.0f, 0.0f, 0.0f, 0.0f)) + { + } + + virtual bool append(const aliceVision::image::Image & color, const aliceVision::image::Image & inputMask, const aliceVision::image::Image & inputWeights, size_t offset_x, size_t offset_y) { + + for (size_t i = 0; i < color.Height(); i++) { + + size_t pano_i = offset_y + i; + if (pano_i >= _panorama.Height()) { + continue; + } + + for (size_t j = 0; j < color.Width(); j++) { + + if (!inputMask(i, j)) { + continue; + } + + size_t pano_j = offset_x + j; + if (pano_j >= _panorama.Width()) { + pano_j = pano_j - _panorama.Width(); + } + + _panorama(pano_i, pano_j).r() = color(i, j).r(); + _panorama(pano_i, pano_j).g() = color(i, j).g(); + _panorama(pano_i, pano_j).b() = color(i, j).b(); + _panorama(pano_i, pano_j).a() = 1.0f; + } + } + + return true; + } + + virtual bool terminate() { + return true; + } + + const aliceVision::image::Image & getPanorama() const { + return _panorama; + } + +protected: + aliceVision::image::Image _panorama; +}; + +class AlphaCompositer : public Compositer { +public: + + AlphaCompositer(size_t outputWidth, size_t outputHeight) : + Compositer(outputWidth, outputHeight) { + + } + + virtual bool append(const aliceVision::image::Image & color, const aliceVision::image::Image & inputMask, const aliceVision::image::Image & inputWeights, size_t offset_x, size_t offset_y) { + + for (size_t i = 0; i < color.Height(); i++) { + + size_t pano_i = offset_y + i; + if (pano_i >= _panorama.Height()) { + continue; + } + + for (size_t j = 0; j < color.Width(); j++) { + + if (!inputMask(i, j)) { + continue; + } + + size_t pano_j = offset_x + j; + if (pano_j >= _panorama.Width()) { + pano_j = pano_j - _panorama.Width(); + } + + float wc = inputWeights(i, j); + + _panorama(pano_i, pano_j).r() += wc * color(i, j).r(); + _panorama(pano_i, pano_j).g() += wc * color(i, j).g(); + _panorama(pano_i, pano_j).b() += wc * color(i, j).b(); + _panorama(pano_i, pano_j).a() += wc; + } + } + + return true; + } + + virtual bool terminate() { + + for (int i = 0; i < _panorama.Height(); i++) { + for (int j = 0; j < _panorama.Width(); j++) { + + if (_panorama(i, j).a() < 1e-6) { + _panorama(i, j).r() = 1.0f; + _panorama(i, j).g() = 0.0f; + _panorama(i, j).b() = 0.0f; + _panorama(i, j).a() = 0.0f; + } + else { + _panorama(i, j).r() = _panorama(i, j).r() / _panorama(i, j).a(); + _panorama(i, j).g() = _panorama(i, j).g() / _panorama(i, j).a(); + _panorama(i, j).b() = _panorama(i, j).b() / _panorama(i, j).a(); + _panorama(i, j).a() = 1.0f; + } + } + } + + return true; + } +}; + +template +inline void convolveRow(typename image::Image::RowXpr output_row, typename image::Image::ConstRowXpr input_row, const Eigen::Matrix & kernel, bool loop) { + + const int radius = 2; + + for (int j = 0; j < input_row.cols(); j++) { + + T sum = T(); + float sumw = 0.0f; + + for (int k = 0; k < kernel.size(); k++) { + + float w = kernel(k); + int col = j + k - radius; + + /* mirror 5432 | 123456 | 5432 */ + + if (!loop) { + if (col < 0) { + col = - col; + } + + if (col >= input_row.cols()) { + col = input_row.cols() - 1 - (col + 1 - input_row.cols()); + } + } + else { + if (col < 0) { + col = input_row.cols() + col; + } + + if (col >= input_row.cols()) { + col = col - input_row.cols(); + } + } + + sum += w * input_row(col); + sumw += w; + } + + output_row(j) = sum / sumw; + } +} + +template +inline void convolveColumns(typename image::Image::RowXpr output_row, const image::Image & input_rows, const Eigen::Matrix & kernel) { + + for (int j = 0; j < output_row.cols(); j++) { + + T sum = T(); + float sumw = 0.0f; + + for (int k = 0; k < kernel.size(); k++) { + + float w = kernel(k); + sum += w * input_rows(k, j); + sumw += w; + } + + output_row(j) = sum / sumw; + } +} + +template +bool convolveGaussian5x5(image::Image & output, const image::Image & input, bool loop = false) { + + if (output.size() != input.size()) { + return false; + } + + Eigen::Matrix kernel; + kernel[0] = 1.0f; + kernel[1] = 4.0f; + kernel[2] = 6.0f; + kernel[3] = 4.0f; + kernel[4] = 1.0f; + kernel = kernel / kernel.sum(); + + image::Image buf(output.Width(), 5); + + int radius = 2; + + convolveRow(buf.row(0), input.row(2), kernel, loop); + convolveRow(buf.row(1), input.row(1), kernel, loop); + convolveRow(buf.row(2), input.row(0), kernel, loop); + convolveRow(buf.row(3), input.row(1), kernel, loop); + convolveRow(buf.row(4), input.row(2), kernel, loop); + + for (int i = 0; i < output.Height() - 3; i++) { + + convolveColumns(output.row(i), buf, kernel); + + + buf.row(0) = buf.row(1); + buf.row(1) = buf.row(2); + buf.row(2) = buf.row(3); + buf.row(3) = buf.row(4); + convolveRow(buf.row(4), input.row(i + 3), kernel, loop); + } + + /** + current row : -5 -4 -3 -2 -1 + next 1 : -4 -3 -2 -1 -2 + next 2 : -3 -2 -1 -2 -3 + */ + convolveColumns(output.row(output.Height() - 3), buf, kernel); + + buf.row(0) = buf.row(1); + buf.row(1) = buf.row(2); + buf.row(2) = buf.row(3); + buf.row(3) = buf.row(4); + convolveRow(buf.row(4), input.row(output.Height() - 2), kernel, loop); + convolveColumns(output.row(output.Height() - 2), buf, kernel); + + buf.row(0) = buf.row(1); + buf.row(1) = buf.row(2); + buf.row(2) = buf.row(3); + buf.row(3) = buf.row(4); + convolveRow(buf.row(4), input.row(output.Height() - 3), kernel, loop); + convolveColumns(output.row(output.Height() - 1), buf, kernel); + + return true; +} + + +template +bool downscale(aliceVision::image::Image & outputColor, const aliceVision::image::Image & inputColor) { + + size_t output_width = inputColor.Width() / 2; + size_t output_height = inputColor.Height() / 2; + + for (int i = 0; i < output_height; i++) { + for (int j = 0; j < output_width; j++) { + outputColor(i, j) = inputColor(i * 2, j * 2); + } + } + + return true; +} + +template +bool upscale(aliceVision::image::Image & outputColor, const aliceVision::image::Image & inputColor) { + + size_t width = inputColor.Width(); + size_t height = inputColor.Height(); + + for (int i = 0; i < height; i++) { + + int di = i * 2; + + for (int j = 0; j < width; j++) { + int dj = j * 2; + + outputColor(di, dj) = T(); + outputColor(di, dj + 1) = T(); + outputColor(di + 1, dj) = T(); + outputColor(di + 1, dj + 1) = inputColor(i, j); + } + } + + return true; +} + +template +bool substract(aliceVision::image::Image & AminusB, const aliceVision::image::Image & A, const aliceVision::image::Image & B) { + + size_t width = AminusB.Width(); + size_t height = AminusB.Height(); + + if (AminusB.size() != A.size()) { + return false; + } + + if (AminusB.size() != B.size()) { + return false; + } + + for (int i = 0; i < height; i++) { + + for (int j = 0; j < width; j++) { + + AminusB(i, j) = A(i, j) - B(i, j); + } + } + + return true; +} + +template +bool addition(aliceVision::image::Image & AplusB, const aliceVision::image::Image & A, const aliceVision::image::Image & B) { + + size_t width = AplusB.Width(); + size_t height = AplusB.Height(); + + if (AplusB.size() != A.size()) { + return false; + } + + if (AplusB.size() != B.size()) { + return false; + } + + for (int i = 0; i < height; i++) { + + for (int j = 0; j < width; j++) { + + AplusB(i, j) = A(i, j) + B(i, j); + } + } + + return true; +} + +void removeNegativeValues(aliceVision::image::Image & img) { + for (int i = 0; i < img.Height(); i++) { + for (int j = 0; j < img.Width(); j++) { + image::RGBfColor & pix = img(i, j); + image::RGBfColor rpix; + rpix.r() = std::exp(pix.r()); + rpix.g() = std::exp(pix.g()); + rpix.b() = std::exp(pix.b()); + + if (rpix.r() < 0.0) { + pix.r() = 0.0; + } + + if (rpix.g() < 0.0) { + pix.g() = 0.0; + } + + if (rpix.b() < 0.0) { + pix.b() = 0.0; + } + } + } +} + +class LaplacianPyramid { +public: + LaplacianPyramid(size_t base_width, size_t base_height, size_t max_levels) { + + size_t width = base_width; + size_t height = base_height; + + /*Make sure pyramid size can be divided by 2 on each levels*/ + double max_scale = 1.0 / pow(2.0, max_levels - 1); + width = size_t(ceil(double(width) * max_scale) / max_scale); + height = size_t(ceil(double(height) * max_scale) / max_scale); + + /*Prepare pyramid*/ + for (int lvl = 0; lvl < max_levels; lvl++) { + + _levels.push_back(aliceVision::image::Image(width, height, true, image::RGBfColor(0.0f,0.0f,0.0f))); + _weights.push_back(aliceVision::image::Image(width, height, true, 0.0f)); + + height /= 2; + width /= 2; + } + } + + bool apply(const aliceVision::image::Image & source, const aliceVision::image::Image & weights, size_t offset_x, size_t offset_y) { + + int width = source.Width(); + int height = source.Height(); + + image::Image current_color = source; + image::Image next_color; + image::Image current_weights = weights; + image::Image next_weights; + + for (int l = 0; l < _levels.size() - 1; l++) { + + aliceVision::image::Image buf(width, height); + aliceVision::image::Image buf2(width, height); + aliceVision::image::Image bufw(width, height); + + next_color = aliceVision::image::Image(width / 2, height / 2); + next_weights = aliceVision::image::Image(width / 2, height / 2); + + convolveGaussian5x5(buf, current_color); + downscale(next_color, buf); + + convolveGaussian5x5(bufw, current_weights); + downscale(next_weights, bufw); + + upscale(buf, next_color); + convolveGaussian5x5(buf2, buf); + + for (int i = 0; i < buf2.Height(); i++) { + for (int j = 0; j < buf2.Width(); j++) { + buf2(i,j) *= 4.0f; + } + } + + substract(current_color, current_color, buf2); + + merge(current_color, current_weights, l, offset_x, offset_y); + + current_color = next_color; + current_weights = next_weights; + width /= 2; + height /= 2; + offset_x /= 2; + offset_y /= 2; + } + + merge(current_color, current_weights, _levels.size() - 1, offset_x, offset_y); + + return true; + } + + bool merge(const aliceVision::image::Image & oimg, const aliceVision::image::Image & oweight, size_t level, size_t offset_x, size_t offset_y) { + + image::Image & img = _levels[level]; + image::Image & weight = _weights[level]; + + for (int i = 0; i < oimg.Height(); i++) { + + int di = i + offset_y; + if (di >= img.Height()) continue; + + for (int j = 0; j < oimg.Width(); j++) { + + int dj = j + offset_x; + if (dj >= weight.Width()) { + dj = dj - weight.Width(); + } + + img(di, dj).r() += oimg(i, j).r() * oweight(i, j); + img(di, dj).g() += oimg(i, j).g() * oweight(i, j); + img(di, dj).b() += oimg(i, j).b() * oweight(i, j); + weight(di, dj) += oweight(i, j); + } + } + + return true; + } + + bool rebuild(image::Image & output) { + + for (int l = 0; l < _levels.size(); l++) { + for (int i = 0; i < _levels[l].Height(); i++) { + for (int j = 0; j < _levels[l].Width(); j++) { + if (_weights[l](i, j) < 1e-6) { + _levels[l](i, j) = image::RGBfColor(0.0); + continue; + } + + _levels[l](i, j).r() = _levels[l](i, j).r() / _weights[l](i, j); + _levels[l](i, j).g() = _levels[l](i, j).g() / _weights[l](i, j); + _levels[l](i, j).b() = _levels[l](i, j).b() / _weights[l](i, j); + } + } + } + + removeNegativeValues(_levels[_levels.size() - 1]); + + for (int l = _levels.size() - 2; l >= 0; l--) { + + aliceVision::image::Image buf(_levels[l].Width(), _levels[l].Height()); + aliceVision::image::Image buf2(_levels[l].Width(), _levels[l].Height()); + + upscale(buf, _levels[l + 1]); + convolveGaussian5x5(buf2, buf, true); + + for (int i = 0; i < buf2.Height(); i++) { + for (int j = 0; j < buf2.Width(); j++) { + buf2(i,j) *= 4.0f; + } + } + + addition(_levels[l], _levels[l], buf2); + removeNegativeValues(_levels[l]); + } + + /*Write output to RGBA*/ + for (int i = 0; i < output.Height(); i++) { + for (int j = 0; j < output.Width(); j++) { + output(i, j).r() = _levels[0](i, j).r(); + output(i, j).g() = _levels[0](i, j).g(); + output(i, j).b() = _levels[0](i, j).b(); + + if (_weights[0](i, j) < 1e-6) { + output(i, j).a() = 0.0f; + } + else { + output(i, j).a() = 1.0f; + } + } + } + + return true; + } + +private: + std::vector> _levels; + std::vector> _weights; +}; + +class DistanceSeams { +public: + DistanceSeams(size_t outputWidth, size_t outputHeight) : + _weights(outputWidth, outputHeight, true, 0.0f), + _labels(outputWidth, outputHeight, true, 255), + currentIndex(0) + { + } + + virtual bool append(const aliceVision::image::Image & inputMask, const aliceVision::image::Image & inputWeights, size_t offset_x, size_t offset_y) { + + if (inputMask.size() != inputWeights.size()) { + return false; + } + + for (int i = 0; i < inputMask.Height(); i++) { + + int di = i + offset_y; + + for (int j = 0; j < inputMask.Width(); j++) { + + if (!inputMask(i, j)) { + continue; + } + + int dj = j + offset_x; + if (dj >= _weights.Width()) { + dj = dj - _weights.Width(); + } + + if (inputWeights(i, j) > _weights(di, dj)) { + _labels(di, dj) = currentIndex; + _weights(di, dj) = inputWeights(i, j); + } + } + } + + currentIndex++; + + return true; + } + + const image::Image & getLabels() { + return _labels; + } + +private: + image::Image _weights; + image::Image _labels; + unsigned char currentIndex; +}; + +class LaplacianCompositer : public Compositer { +public: + + LaplacianCompositer(size_t outputWidth, size_t outputHeight, size_t bands) : + Compositer(outputWidth, outputHeight), + _pyramid_panorama(outputWidth, outputHeight, bands), + _bands(bands) { + + } + + bool feathering(aliceVision::image::Image & output, const aliceVision::image::Image & color, const aliceVision::image::Image & inputMask) { + + std::vector> feathering; + std::vector> feathering_mask; + feathering.push_back(color); + feathering_mask.push_back(inputMask); + + int lvl = 0; + int width = color.Width(); + int height = color.Height(); + + while (1) { + const image::Image & src = feathering[lvl]; + const image::Image & src_mask = feathering_mask[lvl]; + + image::Image half(width / 2, height / 2); + image::Image half_mask(width / 2, height / 2); + + for (int i = 0; i < half.Height(); i++) { + + int di = i * 2; + for (int j = 0; j < half.Width(); j++) { + int dj = j * 2; + + int count = 0; + half(i, j) = image::RGBfColor(0.0,0.0,0.0); + + if (src_mask(di, dj)) { + half(i, j) += src(di, dj); + count++; + } + + if (src_mask(di, dj + 1)) { + half(i, j) += src(di, dj + 1); + count++; + } + + if (src_mask(di + 1, dj)) { + half(i, j) += src(di + 1, dj); + count++; + } + + if (src_mask(di + 1, dj + 1)) { + half(i, j) += src(di + 1, dj + 1); + count++; + } + + if (count > 0) { + half(i, j) /= float(count); + half_mask(i, j) = 1; + } + else { + half_mask(i, j) = 0; + } + } + + + } + + feathering.push_back(half); + feathering_mask.push_back(half_mask); + + + width = half.Width(); + height = half.Height(); + + if (width < 2 || height < 2) break; + + lvl++; + } + + + for (int lvl = feathering.size() - 2; lvl >= 0; lvl--) { + + image::Image & src = feathering[lvl]; + image::Image & src_mask = feathering_mask[lvl]; + image::Image & ref = feathering[lvl + 1]; + image::Image & ref_mask = feathering_mask[lvl + 1]; + + for (int i = 0; i < src_mask.Height(); i++) { + for (int j = 0; j < src_mask.Width(); j++) { + if (!src_mask(i, j)) { + int mi = i / 2; + int mj = j / 2; + + if (mi >= ref_mask.Height()) { + mi = ref_mask.Height() - 1; + } + + if (mj >= ref_mask.Width()) { + mj = ref_mask.Width() - 1; + } + + src_mask(i, j) = ref_mask(mi, mj); + src(i, j) = ref(mi, mj); + } + } + } + } + + output = feathering[0]; + + return true; + } + + virtual bool append(const aliceVision::image::Image & color, const aliceVision::image::Image & inputMask, const aliceVision::image::Image & inputWeights, size_t offset_x, size_t offset_y) { + + aliceVision::image::Image color_big_feathered; + + size_t new_offset_x, new_offset_y; + aliceVision::image::Image color_pot; + aliceVision::image::Image mask_pot; + aliceVision::image::Image weights_pot; + makeImagePyramidCompatible(color_pot, new_offset_x, new_offset_y, color, offset_x, offset_y, _bands); + makeImagePyramidCompatible(mask_pot, new_offset_x, new_offset_y, inputMask, offset_x, offset_y, _bands); + makeImagePyramidCompatible(weights_pot, new_offset_x, new_offset_y, inputWeights, offset_x, offset_y, _bands); + + + + aliceVision::image::Image feathered; + feathering(feathered, color_pot, mask_pot); + + /*To log space for hdr*/ + for (int i = 0; i < feathered.Height(); i++) { + for (int j = 0; j < feathered.Width(); j++) { + + feathered(i, j).r() = std::log(std::max(1e-8f, feathered(i, j).r())); + feathered(i, j).g() = std::log(std::max(1e-8f, feathered(i, j).g())); + feathered(i, j).b() = std::log(std::max(1e-8f, feathered(i, j).b())); + } + } + + _pyramid_panorama.apply(feathered, weights_pot, new_offset_x, new_offset_y); + + return true; + } + + virtual bool terminate() { + + _pyramid_panorama.rebuild(_panorama); + + /*Go back to normal space from log space*/ + for (int i = 0; i < _panorama.Height(); i++) { + for (int j = 0; j < _panorama.Width(); j++) { + _panorama(i, j).r() = std::exp(_panorama(i, j).r()); + _panorama(i, j).g() = std::exp(_panorama(i, j).g()); + _panorama(i, j).b() = std::exp(_panorama(i, j).b()); + } + } + + return true; + } + +protected: + LaplacianPyramid _pyramid_panorama; + size_t _bands; +}; + +int main(int argc, char **argv) { + + /** + * Program description + */ + po::options_description allParams ( + "Perform panorama stiching of cameras around a nodal point for 360° panorama creation. \n" + "AliceVision PanoramaCompositing" + ); + + /** + * Description of mandatory parameters + */ + std::string inputDirectory; + std::string outputPanorama; + po::options_description requiredParams("Required parameters"); + requiredParams.add_options() + ("input,i", po::value(&inputDirectory)->required(), "Warping directory.") + ("output,o", po::value(&outputPanorama)->required(), "Path of the output panorama."); + allParams.add(requiredParams); + + /** + * Description of optional parameters + */ + std::string compositerType = "multiband"; + po::options_description optionalParams("Optional parameters"); + optionalParams.add_options() + ("compositerType,c", po::value(&compositerType)->required(), "Compositer Type [replace, alpha, multiband]."); + allParams.add(optionalParams); + + /** + * Setup log level given command line + */ + std::string verboseLevel = system::EVerboseLevel_enumToString(system::Logger::getDefaultVerboseLevel()); + po::options_description logParams("Log parameters"); + logParams.add_options() + ("verboseLevel,v", po::value(&verboseLevel)->default_value(verboseLevel), "verbosity level (fatal, error, warning, info, debug, trace)."); + allParams.add(logParams); + + + /** + * Effectively parse command line given parse options + */ + po::variables_map vm; + try + { + po::store(po::parse_command_line(argc, argv, allParams), vm); + + if(vm.count("help") || (argc == 1)) + { + ALICEVISION_COUT(allParams); + return EXIT_SUCCESS; + } + po::notify(vm); + } + catch(boost::program_options::required_option& e) + { + ALICEVISION_CERR("ERROR: " << e.what()); + ALICEVISION_COUT("Usage:\n\n" << allParams); + return EXIT_FAILURE; + } + catch(boost::program_options::error& e) + { + ALICEVISION_CERR("ERROR: " << e.what()); + ALICEVISION_COUT("Usage:\n\n" << allParams); + return EXIT_FAILURE; + } + + ALICEVISION_COUT("Program called with the following parameters:"); + ALICEVISION_COUT(vm); + + + /** + * Set verbose level given command line + */ + system::Logger::get()->setLogLevel(verboseLevel); + + + /*Load output from previous warping step*/ + std::stringstream ss; + ss << inputDirectory << "/config_views.json"; + std::ifstream file_config(ss.str()); + if (file_config.is_open() == false) { + ALICEVISION_LOG_INFO("Config file for warping can't be found at " << ss.str()); + return EXIT_FAILURE; + } + + bpt::ptree configTree; + bpt::read_json(ss.str(), configTree); + + + std::pair panoramaSize; + panoramaSize.first = configTree.get("panoramaWidth"); + panoramaSize.second = configTree.get("panoramaHeight"); + + ALICEVISION_LOG_INFO("Output panorama size set to " << panoramaSize.first << "x" << panoramaSize.second); + + std::vector configViews; + size_t pos = 0; + for (auto & item : configTree.get_child("views")) { + ConfigView cv; + + /*if (pos == 32 || pos == 33 || pos == 34)*/ + { + cv.img_path = item.second.get("filename_view"); + cv.mask_path = item.second.get("filename_mask"); + cv.weights_path = item.second.get("filename_weights"); + cv.offset_x = item.second.get("offsetx"); + cv.offset_y = item.second.get("offsety"); + configViews.push_back(cv); + } + pos++; + } + + std::unique_ptr compositer; + bool isMultiBand = false; + if (compositerType == "multiband") { + compositer = std::unique_ptr(new LaplacianCompositer(panoramaSize.first, panoramaSize.second, 8)); + isMultiBand = true; + } + else if (compositerType == "alpha") { + compositer = std::unique_ptr(new AlphaCompositer(panoramaSize.first, panoramaSize.second)); + } + else { + compositer = std::unique_ptr(new Compositer(panoramaSize.first, panoramaSize.second)); + } + + /*Compute seams*/ + std::unique_ptr distanceseams(new DistanceSeams(panoramaSize.first, panoramaSize.second)); + if (isMultiBand) { + for (const ConfigView & cv : configViews) { + + /** + * Load mask + */ + std::string maskPath = cv.mask_path; + ALICEVISION_LOG_INFO("Load mask with path " << maskPath); + image::Image mask; + image::readImage(maskPath, mask, image::EImageColorSpace::NO_CONVERSION); + + /** + * Load Weights + */ + std::string weightsPath = cv.weights_path; + ALICEVISION_LOG_INFO("Load weights with path " << weightsPath); + image::Image weights; + image::readImage(weightsPath, weights, image::EImageColorSpace::NO_CONVERSION); + + distanceseams->append(mask, weights, cv.offset_x, cv.offset_y); + } + } + image::Image labels = distanceseams->getLabels(); + distanceseams.reset(); + distanceseams = nullptr; + + /*Do compositing*/ + pos = 0; + for (const ConfigView & cv : configViews) { + + /** + * Load image and convert it to linear colorspace + */ + std::string imagePath = cv.img_path; + ALICEVISION_LOG_INFO("Load image with path " << imagePath); + image::Image source; + image::readImage(imagePath, source, image::EImageColorSpace::NO_CONVERSION); + + /** + * Load mask + */ + std::string maskPath = cv.mask_path; + ALICEVISION_LOG_INFO("Load mask with path " << maskPath); + image::Image mask; + image::readImage(maskPath, mask, image::EImageColorSpace::NO_CONVERSION); + + /** + * Load Weights + */ + std::string weightsPath = cv.weights_path; + ALICEVISION_LOG_INFO("Load weights with path " << weightsPath); + image::Image weights; + image::readImage(weightsPath, weights, image::EImageColorSpace::NO_CONVERSION); + + + /*Build weight map*/ + if (isMultiBand) { + image::Image seams(weights.Width(), weights.Height()); + getMaskFromLabels(seams, labels, pos, cv.offset_x, cv.offset_y); + + /* Composite image into panorama */ + compositer->append(source, mask, seams, cv.offset_x, cv.offset_y); + } + else { + compositer->append(source, mask, weights, cv.offset_x, cv.offset_y); + } + + pos++; + } + + /* Build image */ + compositer->terminate(); + + + /* Store output */ + ALICEVISION_LOG_INFO("Write output panorama to file " << outputPanorama); + const aliceVision::image::Image & panorama = compositer->getPanorama(); + image::writeImage(outputPanorama, panorama, image::EImageColorSpace::SRGB); + + + return EXIT_SUCCESS; +} diff --git a/src/software/pipeline/main_panoramaEstimation.cpp b/src/software/pipeline/main_panoramaEstimation.cpp new file mode 100644 index 0000000000..84d67e6be1 --- /dev/null +++ b/src/software/pipeline/main_panoramaEstimation.cpp @@ -0,0 +1,375 @@ +// This file is part of the AliceVision project. +// Copyright (c) 2017 AliceVision contributors. +// Copyright (c) 2012 openMVG contributors. +// This Source Code Form is subject to the terms of the Mozilla Public License, +// v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include + +// These constants define the current software version. +// They must be updated when the command line is changed. +#define ALICEVISION_SOFTWARE_VERSION_MAJOR 1 +#define ALICEVISION_SOFTWARE_VERSION_MINOR 0 + +using namespace aliceVision; + +namespace po = boost::program_options; +namespace fs = boost::filesystem; + +inline std::istream& operator>>(std::istream& in, std::pair& out) +{ + in >> out.first; + in >> out.second; + return in; +} + +int main(int argc, char **argv) +{ + // command-line parameters + + std::string verboseLevel = system::EVerboseLevel_enumToString(system::Logger::getDefaultVerboseLevel()); + std::string sfmDataFilename; + std::vector featuresFolders; + std::vector matchesFolders; + std::string outDirectory; + + // user optional parameters + + std::string outSfMDataFilename = "sfmData.json"; + std::string describerTypesName = feature::EImageDescriberType_enumToString(feature::EImageDescriberType::SIFT); + sfm::ERotationAveragingMethod rotationAveragingMethod = sfm::ROTATION_AVERAGING_L2; + sfm::ERelativeRotationMethod relativeRotationMethod = sfm::RELATIVE_ROTATION_FROM_E; + bool refine = true; + bool lockAllIntrinsics = false; + int orientation = 0; + float offsetLongitude = 0.0f; + float offsetLatitude = 0.0f; + + po::options_description allParams( + "Perform estimation of cameras orientation around a nodal point for 360° panorama.\n" + "AliceVision PanoramaEstimation"); + + po::options_description requiredParams("Required parameters"); + requiredParams.add_options() + ("input,i", po::value(&sfmDataFilename)->required(), + "SfMData file.") + ("output,o", po::value(&outDirectory)->required(), + "Path of the output folder.") + ("featuresFolders,f", po::value>(&featuresFolders)->multitoken()->required(), + "Path to folder(s) containing the extracted features.") + ("matchesFolders,m", po::value>(&matchesFolders)->multitoken()->required(), + "Path to folder(s) in which computed matches are stored."); + + po::options_description optionalParams("Optional parameters"); + optionalParams.add_options() + ("outSfMDataFilename", po::value(&outSfMDataFilename)->default_value(outSfMDataFilename), + "Filename of the output SfMData file.") + ("describerTypes,d", po::value(&describerTypesName)->default_value(describerTypesName), + feature::EImageDescriberType_informations().c_str()) + ("rotationAveraging", po::value(&rotationAveragingMethod)->default_value(rotationAveragingMethod), + "* 1: L1 minimization\n" + "* 2: L2 minimization") + ("relativeRotation", po::value(&relativeRotationMethod)->default_value(relativeRotationMethod), + "* from essential matrix" + "* from homography matrix") + ("orientation", po::value(&orientation)->default_value(orientation), + "Orientation") + ("offsetLongitude", po::value(&offsetLongitude)->default_value(offsetLongitude), + "offset to camera longitude") + ("offsetLatitude", po::value(&offsetLatitude)->default_value(offsetLatitude), + "offset to camera latitude") + ("refine", po::value(&refine)->default_value(refine), + "Refine cameras with a Bundle Adjustment") + ("lockAllIntrinsics", po::value(&lockAllIntrinsics)->default_value(lockAllIntrinsics), + "Force lock of all camera intrinsic parameters, so they will not be refined during Bundle Adjustment."); + + po::options_description logParams("Log parameters"); + logParams.add_options() + ("verboseLevel,v", po::value(&verboseLevel)->default_value(verboseLevel), + "verbosity level (fatal, error, warning, info, debug, trace)."); + + allParams.add(requiredParams).add(optionalParams).add(logParams); + + po::variables_map vm; + try + { + po::store(po::parse_command_line(argc, argv, allParams), vm); + + if(vm.count("help") || (argc == 1)) + { + ALICEVISION_COUT(allParams); + return EXIT_SUCCESS; + } + po::notify(vm); + } + catch(boost::program_options::required_option& e) + { + ALICEVISION_CERR("ERROR: " << e.what()); + ALICEVISION_COUT("Usage:\n\n" << allParams); + return EXIT_FAILURE; + } + catch(boost::program_options::error& e) + { + ALICEVISION_CERR("ERROR: " << e.what()); + ALICEVISION_COUT("Usage:\n\n" << allParams); + return EXIT_FAILURE; + } + + ALICEVISION_COUT("Program called with the following parameters:"); + ALICEVISION_COUT(vm); + + // set verbose level + system::Logger::get()->setLogLevel(verboseLevel); + + if (rotationAveragingMethod < sfm::ROTATION_AVERAGING_L1 || + rotationAveragingMethod > sfm::ROTATION_AVERAGING_L2 ) + { + ALICEVISION_LOG_ERROR("Rotation averaging method is invalid"); + return EXIT_FAILURE; + } + + if (relativeRotationMethod < sfm::RELATIVE_ROTATION_FROM_E || + relativeRotationMethod > sfm::RELATIVE_ROTATION_FROM_H ) + { + ALICEVISION_LOG_ERROR("Relative rotation method is invalid"); + return EXIT_FAILURE; + } + + // load input SfMData scene + sfmData::SfMData inputSfmData; + if(!sfmDataIO::Load(inputSfmData, sfmDataFilename, sfmDataIO::ESfMData(sfmDataIO::VIEWS|sfmDataIO::INTRINSICS|sfmDataIO::EXTRINSICS))) + { + ALICEVISION_LOG_ERROR("The input SfMData file '" << sfmDataFilename << "' cannot be read."); + return EXIT_FAILURE; + } + + if(!inputSfmData.structure.empty()) + { + ALICEVISION_LOG_ERROR("Part computed SfMData are not currently supported in Global SfM." << std::endl << "Please use Incremental SfM. Aborted"); + return EXIT_FAILURE; + } + + if(!inputSfmData.getRigs().empty()) + { + ALICEVISION_LOG_ERROR("Rigs are not currently supported in Global SfM." << std::endl << "Please use Incremental SfM. Aborted"); + return EXIT_FAILURE; + } + + sfmData::Poses & initial_poses = inputSfmData.getPoses(); + Eigen::Matrix3d ref_R_base = Eigen::Matrix3d::Identity(); + if (!initial_poses.empty()) { + + ref_R_base = initial_poses.begin()->second.getTransform().rotation(); + } + + // get describerTypes + const std::vector describerTypes = feature::EImageDescriberType_stringToEnums(describerTypesName); + + // features reading + feature::FeaturesPerView featuresPerView; + if(!sfm::loadFeaturesPerView(featuresPerView, inputSfmData, featuresFolders, describerTypes)) + { + ALICEVISION_LOG_ERROR("Invalid features"); + return EXIT_FAILURE; + } + + // matches reading + // Load the match file (try to read the two matches file formats). + matching::PairwiseMatches pairwiseMatches; + if(!sfm::loadPairwiseMatches(pairwiseMatches, inputSfmData, matchesFolders, describerTypes)) + { + ALICEVISION_LOG_ERROR("Unable to load matches files from: " << matchesFolders); + return EXIT_FAILURE; + } + + if(outDirectory.empty()) + { + ALICEVISION_LOG_ERROR("It is an invalid output folder"); + return EXIT_FAILURE; + } + + if(!fs::exists(outDirectory)) + fs::create_directory(outDirectory); + + // Panorama reconstruction process + aliceVision::system::Timer timer; + sfm::ReconstructionEngine_panorama sfmEngine( + inputSfmData, + outDirectory, + (fs::path(outDirectory) / "sfm_log.html").string()); + + // configure the featuresPerView & the matches_provider + sfmEngine.SetFeaturesProvider(&featuresPerView); + sfmEngine.SetMatchesProvider(&pairwiseMatches); + + // configure reconstruction parameters + sfmEngine.setLockAllIntrinsics(lockAllIntrinsics); // TODO: rename param + + // configure motion averaging method + sfmEngine.SetRotationAveragingMethod(sfm::ERotationAveragingMethod(rotationAveragingMethod)); + + // configure relative rotation method (from essential or from homography matrix) + sfmEngine.SetRelativeRotationMethod(sfm::ERelativeRotationMethod(relativeRotationMethod)); + + + if(!sfmEngine.process()) + return EXIT_FAILURE; + + // set featuresFolders and matchesFolders relative paths + { + sfmEngine.getSfMData().addFeaturesFolders(featuresFolders); + sfmEngine.getSfMData().addMatchesFolders(matchesFolders); + sfmEngine.getSfMData().setAbsolutePath(outSfMDataFilename); + } + + if(refine) + { + sfmDataIO::Save(sfmEngine.getSfMData(), (fs::path(outDirectory) / "BA_before.abc").string(), sfmDataIO::ESfMData::ALL); + + sfmEngine.Adjust(); + + sfmDataIO::Save(sfmEngine.getSfMData(), (fs::path(outDirectory) / "BA_after.abc").string(), sfmDataIO::ESfMData::ALL); + } + + + sfmData::SfMData& outSfmData = sfmEngine.getSfMData(); + + + /** + * If an initial set of poses was available, make sure at least one pose is aligned with it + */ + sfmData::Poses & final_poses = outSfmData.getPoses(); + if (!final_poses.empty()) { + + Eigen::Matrix3d ref_R_current = final_poses.begin()->second.getTransform().rotation(); + Eigen::Matrix3d R_restore = ref_R_current.transpose() * ref_R_base; + + for (auto & pose : outSfmData.getPoses()) { + geometry::Pose3 p = pose.second.getTransform(); + Eigen::Matrix3d newR = p.rotation() * R_restore ; + p.rotation() = newR; + pose.second.setTransform(p); + } + } + + + ALICEVISION_LOG_INFO("Panorama solve took (s): " << timer.elapsed()); + ALICEVISION_LOG_INFO("Generating HTML report..."); + + sfm::generateSfMReport(outSfmData, (fs::path(outDirectory) / "sfm_report.html").string()); + + ALICEVISION_LOG_INFO("Panorama results:" << std::endl + << "\t- # input images: " << outSfmData.getViews().size() << std::endl + << "\t- # cameras calibrated: " << outSfmData.getPoses().size()); + + auto validViews = outSfmData.getValidViews(); + int nbCameras = outSfmData.getValidViews().size(); + if(nbCameras == 0) + { + ALICEVISION_LOG_ERROR("Failed to get valid cameras from input images."); + return -1; + } + + if (initial_poses.empty()) + { + std::string firstShot_datetime; + IndexT firstShot_viewId = 0; + + for(auto& viewIt: outSfmData.getViews()) + { + IndexT viewId = viewIt.first; + const sfmData::View& view = *viewIt.second.get(); + if(!outSfmData.isPoseAndIntrinsicDefined(&view)) + continue; + std::string datetime = view.getMetadataDateTimeOriginal(); + ALICEVISION_LOG_TRACE("Shot datetime candidate: " << datetime << "."); + if(firstShot_datetime.empty() || datetime < firstShot_datetime) + { + firstShot_datetime = datetime; + firstShot_viewId = viewId; + ALICEVISION_LOG_TRACE("Update shot datetime: " << firstShot_datetime << "."); + } + } + ALICEVISION_LOG_INFO("First shot datetime: " << firstShot_datetime << "."); + ALICEVISION_LOG_TRACE("Reset orientation to view: " << firstShot_viewId << "."); + + double S; + Mat3 R = Mat3::Identity(); + Vec3 t; + + ALICEVISION_LOG_INFO("orientation: " << orientation); + if(orientation == 0) + { + ALICEVISION_LOG_INFO("Orientation: FROM IMAGES"); + sfm::computeNewCoordinateSystemFromSingleCamera(outSfmData, std::to_string(firstShot_viewId), S, R, t); + } + else if(orientation == 1) + { + ALICEVISION_LOG_INFO("Orientation: RIGHT"); + R = Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,1,0)) + * Eigen::AngleAxisd(degreeToRadian(90.0), Vec3(0,0,1)) + * outSfmData.getAbsolutePose(firstShot_viewId).getTransform().rotation(); + } + else if(orientation == 2) + { + ALICEVISION_LOG_INFO("Orientation: LEFT"); + R = Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,1,0)) + * Eigen::AngleAxisd(degreeToRadian(270.0), Vec3(0,0,1)) + * outSfmData.getAbsolutePose(firstShot_viewId).getTransform().rotation(); + } + else if(orientation == 3) + { + ALICEVISION_LOG_INFO("Orientation: UPSIDEDOWN"); + R = Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,1,0)) + * outSfmData.getAbsolutePose(firstShot_viewId).getTransform().rotation(); + } + else if(orientation == 4) + { + ALICEVISION_LOG_INFO("Orientation: NONE"); + R = Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,1,0)) + * Eigen::AngleAxisd(degreeToRadian(180.0), Vec3(0,0,1)) + * outSfmData.getAbsolutePose(firstShot_viewId).getTransform().rotation(); + } + + // We only need to correct the rotation + S = 1.0; + t = Vec3::Zero(); + + sfm::applyTransform(outSfmData, S, R, t); + } + + /*Add offsets to rotations*/ + for (auto& pose: outSfmData.getPoses()) { + + geometry::Pose3 p = pose.second.getTransform(); + Eigen::Matrix3d newR = p.rotation() * Eigen::AngleAxisd(degreeToRadian(offsetLongitude), Vec3(0,1,0)) * Eigen::AngleAxisd(degreeToRadian(offsetLatitude), Vec3(1,0,0)); + p.rotation() = newR; + pose.second.setTransform(p); + } + + // export to disk computed scene (data & visualizable results) + ALICEVISION_LOG_INFO("Export SfMData to disk"); + sfmDataIO::Save(outSfmData, outSfMDataFilename, sfmDataIO::ESfMData::ALL); + sfmDataIO::Save(outSfmData, (fs::path(outDirectory) / "cloud_and_poses.ply").string(), sfmDataIO::ESfMData::ALL); + + return EXIT_SUCCESS; +} diff --git a/src/software/pipeline/main_panoramaExternalInfo.cpp b/src/software/pipeline/main_panoramaExternalInfo.cpp new file mode 100644 index 0000000000..ee87cad4e5 --- /dev/null +++ b/src/software/pipeline/main_panoramaExternalInfo.cpp @@ -0,0 +1,191 @@ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +// These constants define the current software version. +// They must be updated when the command line is changed. +#define ALICEVISION_SOFTWARE_VERSION_MAJOR 2 +#define ALICEVISION_SOFTWARE_VERSION_MINOR 0 + +using namespace aliceVision; +namespace po = boost::program_options; +namespace fs = boost::filesystem; +namespace pt = boost::property_tree; + +int main(int argc, char * argv[]) { + std::string verboseLevel = system::EVerboseLevel_enumToString(system::Logger::getDefaultVerboseLevel()); + std::string externalInfoFilename = ""; + std::string sfmInputDataFilename = ""; + std::string sfmOutputDataFilename = ""; + + /***** + * DESCRIBE COMMAND LINE PARAMETERS + */ + po::options_description allParams( + "Parse external information about cameras used in a panorama.\n" + "AliceVision PanoramaExternalInfo"); + + po::options_description requiredParams("Required parameters"); + requiredParams.add_options() + ("config,c", po::value(&externalInfoFilename)->required(), "External info xml file.") + ("input,i", po::value(&sfmInputDataFilename)->required(), "SfMData file input.") + ("outSfMDataFilename,o", po::value(&sfmOutputDataFilename)->required(), "SfMData file output.") + ; + + po::options_description optionalParams("Optional parameters"); + + po::options_description logParams("Log parameters"); + logParams.add_options() + ("verboseLevel,v", po::value(&verboseLevel)->default_value(verboseLevel), + "verbosity level (fatal, error, warning, info, debug, trace)."); + + allParams.add(requiredParams).add(optionalParams).add(logParams); + + + /** + * READ COMMAND LINE + */ + po::variables_map vm; + try + { + po::store(po::parse_command_line(argc, argv, allParams), vm); + + if(vm.count("help") || (argc == 1)) + { + ALICEVISION_COUT(allParams); + return EXIT_SUCCESS; + } + po::notify(vm); + } + catch(boost::program_options::required_option& e) + { + ALICEVISION_CERR("ERROR: " << e.what()); + ALICEVISION_COUT("Usage:\n\n" << allParams); + return EXIT_FAILURE; + } + catch(boost::program_options::error& e) + { + ALICEVISION_CERR("ERROR: " << e.what()); + ALICEVISION_COUT("Usage:\n\n" << allParams); + return EXIT_FAILURE; + } + + ALICEVISION_COUT("Program called with the following parameters:"); + ALICEVISION_COUT(vm); + + /** + * set verbose level + **/ + system::Logger::get()->setLogLevel(verboseLevel); + + + pt::ptree tree; + + try { + pt::read_xml(externalInfoFilename, tree); + } + catch (...) { + ALICEVISION_CERR("Error parsing input file"); + } + + pt::ptree lens = tree.get_child("papywizard.header.lens"); + pt::ptree shoot = tree.get_child("papywizard.shoot"); + + std::string lensType = lens.get(".type"); + // double lensFocal = lens.get("focal"); + + /*Make sure we control everything for debug purpose*/ + if (lensType != "rectilinear") { + ALICEVISION_CERR("Lens type not supported: " << lensType); + return EXIT_FAILURE; + } + + + std::map rotations; + + for (auto it : shoot) { + + int id = it.second.get(".id"); + int bracket = it.second.get(".bracket"); + + if (rotations.find(id) != rotations.end()) { + ALICEVISION_CERR("Multiple xml attributes with a same id: " << id); + return EXIT_FAILURE; + } + + double yaw_degree = it.second.get("position..yaw"); + double pitch_degree = it.second.get("position..pitch"); + double roll_degree = it.second.get("position..roll"); + + double yaw = degreeToRadian(yaw_degree); + double pitch = degreeToRadian(pitch_degree); + double roll = degreeToRadian(roll_degree); + + Eigen::AngleAxis Myaw(yaw, Eigen::Vector3d::UnitY()); + Eigen::AngleAxis Mpitch(pitch, Eigen::Vector3d::UnitX()); + Eigen::AngleAxis Mroll(roll, Eigen::Vector3d::UnitZ()); + Eigen::AngleAxis Mimage(-M_PI_2, Eigen::Vector3d::UnitZ()); + + Eigen::Matrix3d cRo = Myaw.toRotationMatrix() * Mpitch.toRotationMatrix() * Mroll.toRotationMatrix() * Mimage.toRotationMatrix() ; + + rotations[id] = cRo.transpose(); + } + + + /** + * Update sfm accordingly + */ + sfmData::SfMData sfmData; + if(!sfmDataIO::Load(sfmData, sfmInputDataFilename, sfmDataIO::ESfMData(sfmDataIO::VIEWS|sfmDataIO::INTRINSICS|sfmDataIO::EXTRINSICS))) + { + ALICEVISION_LOG_ERROR("The input SfMData file '" << sfmInputDataFilename << "' cannot be read."); + return EXIT_FAILURE; + } + + if (sfmData.getViews().size() != rotations.size()) { + ALICEVISION_LOG_ERROR("The input SfMData has an incorrect number of views."); + return EXIT_FAILURE; + } + + + /** + * HEURISTIC : + * The xml file describe rotations for view ids which are not correlated with Alicevision view id + * Let assume that the order of xml views ids is the lexicographic order of the image names. + */ + std::vector> names_with_id; + for (auto v : sfmData.getViews()) { + names_with_id.push_back(std::make_pair(v.second->getImagePath(), v.first)); + } + std::sort(names_with_id.begin(), names_with_id.end()); + + size_t index = 0; + for (auto &item_rotation: rotations) { + + IndexT viewIdx = names_with_id[index].second; + + + if (item_rotation.second.trace() != 0) { + sfmData::CameraPose pose(geometry::Pose3 (item_rotation.second, Eigen::Vector3d::Zero())); + sfmData.setAbsolutePose(viewIdx, pose); + } + + index++; + } + + + if (!sfmDataIO::Save(sfmData, sfmOutputDataFilename, sfmDataIO::ESfMData(sfmDataIO::VIEWS | sfmDataIO::INTRINSICS | sfmDataIO::EXTRINSICS))) { + ALICEVISION_LOG_ERROR("The output SfMData file '" << sfmOutputDataFilename << "' cannot be write."); + return EXIT_FAILURE; + } + + return 0; +} diff --git a/src/software/pipeline/main_panoramaWarping.cpp b/src/software/pipeline/main_panoramaWarping.cpp new file mode 100644 index 0000000000..b858182dcd --- /dev/null +++ b/src/software/pipeline/main_panoramaWarping.cpp @@ -0,0 +1,1326 @@ +/** + * Input and geometry +*/ +#include +#include + +/** + * Image stuff + */ +#include +#include + +/*Logging stuff*/ +#include + +/*Reading command line options*/ +#include +#include + +/*IO*/ +#include +#include +#include +#include + +// These constants define the current software version. +// They must be updated when the command line is changed. +#define ALICEVISION_SOFTWARE_VERSION_MAJOR 1 +#define ALICEVISION_SOFTWARE_VERSION_MINOR 0 + +using namespace aliceVision; + +namespace po = boost::program_options; +namespace bpt = boost::property_tree; + + +Eigen::VectorXf gaussian_kernel_vector(size_t kernel_length, float sigma) { + + Eigen::VectorXd x; + x.setLinSpaced(kernel_length + 1, -sigma, +sigma); + + Eigen::VectorXd cdf(kernel_length + 1); + for (int i = 0; i < kernel_length + 1; i++) { + cdf(i) = 0.5 * (1 + std::erf(x(i)/sqrt(2.0))); + } + + Eigen::VectorXd k1d(kernel_length); + for (int i = 0; i < kernel_length; i++) { + k1d(i) = cdf(i + 1) - cdf(i); + } + + double sum = k1d.sum(); + k1d = k1d / sum; + + return k1d.cast(); +} + +Eigen::MatrixXf gaussian_kernel(size_t kernel_length, float sigma) { + + Eigen::VectorXf k1d = gaussian_kernel_vector(kernel_length, sigma); + Eigen::MatrixXf K = k1d * k1d.transpose(); + + + double sum = K.sum(); + K = K / sum; + + return K; +} + + + +bool convolve(image::Image & output, const image::Image & input, const image::Image & mask, const Eigen::MatrixXf & kernel) { + + if (output.size() != input.size()) { + return false; + } + + if (output.size() != mask.size()) { + return false; + } + + if (kernel.size() % 2 == 0) { + return false; + } + + if (kernel.rows() != kernel.cols()) { + return false; + } + + int radius = kernel.rows() / 2; + + Eigen::MatrixXf kernel_scaled = kernel; + + for (int i = 0; i < output.Height(); i++) { + for (int j = 0; j < output.Width(); j++) { + + float sum = 0.0f; + float sum_mask = 0.0f; + + if (!mask(i, j)) { + output(i, j) = 0.0f; + continue; + } + + for (int k = 0; k < kernel.rows(); k++) { + float ni = i + k - radius; + if (ni < 0 || ni >= output.Height()) { + continue; + } + + for (int l = 0; l < kernel.cols(); l++) { + float nj = j + l - radius; + if (nj < 0 || nj >= output.Width()) { + continue; + } + + if (!mask(ni, nj)) { + continue; + } + + float val = kernel(k, l) * input(ni, nj); + sum += val; + sum_mask += kernel(k, l); + } + } + + output(i, j) = sum / sum_mask; + } + } + + return true; +} + +bool convolve(image::Image & output, const image::Image & input, const image::Image & mask, const Eigen::MatrixXf & kernel) { + + if (output.size() != input.size()) { + return false; + } + + if (output.size() != mask.size()) { + return false; + } + + if (kernel.size() % 2 == 0) { + return false; + } + + if (kernel.rows() != kernel.cols()) { + return false; + } + + int radius = kernel.rows() / 2; + + Eigen::MatrixXf kernel_scaled = kernel; + + for (int i = 0; i < output.Height(); i++) { + for (int j = 0; j < output.Width(); j++) { + + image::RGBfColor sum(0.0f); + float sum_mask = 0.0f; + + if (!mask(i, j)) { + output(i, j) = sum; + continue; + } + + for (int k = 0; k < kernel.rows(); k++) { + float ni = i + k - radius; + if (ni < 0 || ni >= output.Height()) { + continue; + } + + for (int l = 0; l < kernel.cols(); l++) { + float nj = j + l - radius; + if (nj < 0 || nj >= output.Width()) { + continue; + } + + if (!mask(ni, nj)) { + continue; + } + + sum.r() += kernel(k, l) * input(ni, nj).r(); + sum.g() += kernel(k, l) * input(ni, nj).g(); + sum.b() += kernel(k, l) * input(ni, nj).b(); + sum_mask += kernel(k, l); + } + } + + output(i, j) = sum / sum_mask; + } + } + + return true; +} + +bool computeDistanceMap(image::Image & distance, const image::Image & mask) { + + int m = mask.Height(); + int n = mask.Width(); + + int maxval = m * n; + + distance = image::Image (n, m, false); + for(int x = 0; x < n; ++x) { + + //A corner is when mask becomes 0 + bool b = !mask(0, x); + if (b) { + distance(0, x) = 0; + } + else { + distance(0, x) = maxval * maxval; + } + + for (int y = 1; y < m; y++) { + bool b = !mask(y, x); + if (b) { + distance(y, x) = 0; + } + else { + distance(y, x) = 1 + distance(y - 1, x); + } + } + + for (int y = m - 2; y >= 0; y--) { + if (distance(y + 1, x) < distance(y, x)) { + distance(y, x) = 1 + distance(y + 1, x); + } + } + } + + for (int y = 0; y < m; y++) { + int q; + std::map s; + std::map t; + + q = 0; + s[0] = 0; + t[0] = 0; + + std::function f = [distance, y](int x, int i) { + int gi = distance(y, i); + return (x - i)*(x - i) + gi * gi; + }; + + std::function sep = [distance, y](int i, int u) { + int gu = distance(y, u); + int gi = distance(y, i); + + int nom = (u * u) - (i * i) + (gu * gu) - (gi * gi); + int denom = 2 * (u - i); + + return nom / denom; + }; + + for (int u = 1; u < n; u++) { + + while (q >= 0 && (f(t[q], s[q]) > f(t[q], u))) { + q = q - 1; + } + + if (q < 0) { + q = 0; + s[0] = u; + } + else { + int w = 1 + sep(s[q], u); + if (w < n) { + q = q + 1; + s[q] = u; + t[q] = w; + } + } + } + + for (int u = n - 1; u >= 0; u--) { + distance(y, u) = f(u, s[q]); + if (u == t[q]) { + q = q - 1; + } + } + } + + return true; +} + + +namespace SphericalMapping +{ + /** + * Map from equirectangular to spherical coordinates + * @param equirectangular equirectangular coordinates + * @param width number of pixels used to represent longitude + * @param height number of pixels used to represent latitude + * @return spherical coordinates + */ + Vec3 fromEquirectangular(const Vec2 & equirectangular, int width, int height) + { + const double latitude = (equirectangular(1) / double(height)) * M_PI - M_PI_2; + const double longitude = ((equirectangular(0) / double(width)) * 2.0 * M_PI) - M_PI; + + const double Px = cos(latitude) * sin(longitude); + const double Py = sin(latitude); + const double Pz = cos(latitude) * cos(longitude); + + return Vec3(Px, Py, Pz); + } + + /** + * Map from Spherical to equirectangular coordinates + * @param spherical spherical coordinates + * @param width number of pixels used to represent longitude + * @param height number of pixels used to represent latitude + * @return equirectangular coordinates + */ + Vec2 toEquirectangular(const Vec3 & spherical, int width, int height) { + + double vertical_angle = asin(spherical(1)); + double horizontal_angle = atan2(spherical(0), spherical(2)); + + double latitude = ((vertical_angle + M_PI_2) / M_PI) * height; + double longitude = ((horizontal_angle + M_PI) / (2.0 * M_PI)) * width; + + return Vec2(longitude, latitude); + } + + /** + * Map from Spherical to equirectangular coordinates in radians + * @param spherical spherical coordinates + * @return equirectangular coordinates + */ + Vec2 toLongitudeLatitude(const Vec3 & spherical) { + + double latitude = asin(spherical(1)); + double longitude = atan2(spherical(0), spherical(2)); + + return Vec2(longitude, latitude); + } +} + +class GaussianPyramidNoMask { +public: + GaussianPyramidNoMask(const size_t width_base, const size_t height_base, const size_t limit_scales = 64) : + _width_base(width_base), + _height_base(height_base) + { + /** + * Compute optimal scale + * The smallest level will be at least of size min_size + */ + size_t min_dim = std::min(_width_base, _height_base); + size_t min_size = 32; + _scales = std::min(limit_scales, static_cast(floor(log2(double(min_dim) / float(min_size))))); + + + /** + * Create pyramid + **/ + size_t new_width = _width_base; + size_t new_height = _height_base; + for (int i = 0; i < _scales; i++) { + + _pyramid_color.push_back(image::Image(new_width, new_height, true, image::RGBfColor(0))); + _filter_buffer.push_back(image::Image(new_width, new_height, true, image::RGBfColor(0))); + new_height /= 2; + new_width /= 2; + } + } + + bool process(const image::Image & input) { + + if (input.Height() != _pyramid_color[0].Height()) return false; + if (input.Width() != _pyramid_color[0].Width()) return false; + + + /** + * Kernel + */ + oiio::ImageBuf K; + oiio::ImageBufAlgo::make_kernel(K, "gaussian", 5, 5); + + /** + * Build pyramid + */ + _pyramid_color[0] = input; + for (int lvl = 0; lvl < _scales - 1; lvl++) { + + const image::Image & source = _pyramid_color[lvl]; + image::Image & dst = _filter_buffer[lvl]; + + oiio::ImageSpec spec(source.Width(), source.Height(), 3, oiio::TypeDesc::FLOAT); + + const oiio::ImageBuf inBuf(spec, const_cast(source.data())); + oiio::ImageBuf outBuf(spec, dst.data()); + oiio::ImageBufAlgo::convolve(outBuf, inBuf, K); + + downscale(_pyramid_color[lvl + 1], _filter_buffer[lvl]); + } + + return true; + } + + + bool downscale(image::Image & output, const image::Image & input) { + + for (int i = 0; i < output.Height(); i++) { + int ui = i * 2; + + for (int j = 0; j < output.Width(); j++) { + int uj = j * 2; + + output(i, j) = input(ui, uj); + } + } + + return true; + } + + const size_t getScalesCount() const { + return _scales; + } + + const std::vector> & getPyramidColor() const { + return _pyramid_color; + } + + std::vector> & getPyramidColor() { + return _pyramid_color; + } + +protected: + std::vector> _pyramid_color; + std::vector> _filter_buffer; + size_t _width_base; + size_t _height_base; + size_t _scales; +}; + +class CoordinatesMap { +private: + struct BBox { + int left; + int top; + int width; + int height; + }; + +public: + /** + * Build coordinates map given camera properties + * @param panoramaSize desired output panoramaSize + * @param pose the camera pose wrt an arbitrary reference frame + * @param intrinsics the camera intrinsics + */ + bool build(const std::pair & panoramaSize, const geometry::Pose3 & pose, const aliceVision::camera::IntrinsicBase & intrinsics) { + + BBox coarse_bbox; + if (!computeCoarseBB(coarse_bbox, panoramaSize, pose, intrinsics)) { + return false; + } + + + /* Effectively compute the warping map */ + aliceVision::image::Image buffer_coordinates(coarse_bbox.width, coarse_bbox.height, false); + aliceVision::image::Image buffer_mask(coarse_bbox.width, coarse_bbox.height, true, 0); + + size_t max_x = 0; + size_t max_y = 0; + size_t min_x = panoramaSize.first; + size_t min_y = panoramaSize.second; + +#ifdef _MSC_VER + // TODO + // no support for reduction min in MSVC implementation of openmp +#else + #pragma omp parallel for reduction(min: min_x, min_y) reduction(max: max_x, max_y) +#endif + for (size_t y = 0; y < coarse_bbox.height; y++) { + + size_t cy = y + coarse_bbox.top; + + size_t row_max_x = 0; + size_t row_max_y = 0; + size_t row_min_x = panoramaSize.first; + size_t row_min_y = panoramaSize.second; + + + for (size_t x = 0; x < coarse_bbox.width; x++) { + + size_t cx = x + coarse_bbox.left; + + Vec3 ray = SphericalMapping::fromEquirectangular(Vec2(cx, cy), panoramaSize.first, panoramaSize.second); + + /** + * Check that this ray should be visible. + * This test is camera type dependent + */ + Vec3 transformedRay = pose(ray); + if (!intrinsics.isVisibleRay(transformedRay)) { + continue; + } + + /** + * Project this ray to camera pixel coordinates + */ + const Vec2 pix_disto = intrinsics.project(pose, ray, true); + + /** + * Ignore invalid coordinates + */ + if (!intrinsics.isVisible(pix_disto)) { + continue; + } + + + buffer_coordinates(y, x) = pix_disto; + buffer_mask(y, x) = 1; + + row_min_x = std::min(x, row_min_x); + row_min_y = std::min(y, row_min_y); + row_max_x = std::max(x, row_max_x); + row_max_y = std::max(y, row_max_y); + } + + min_x = std::min(row_min_x, min_x); + min_y = std::min(row_min_y, min_y); + max_x = std::max(row_max_x, max_x); + max_y = std::max(row_max_y, max_y); + } + + _offset_x = coarse_bbox.left + min_x; + if (_offset_x > panoramaSize.first) { + /*The coarse bounding box may cross the borders where as the true coordinates may not*/ + int ox = int(_offset_x) - int(panoramaSize.first); + _offset_x = ox; + } + _offset_y = coarse_bbox.top + min_y; + + size_t real_width = max_x - min_x + 1; + size_t real_height = max_y - min_y + 1; + + /* Resize buffers */ + _coordinates = aliceVision::image::Image(real_width, real_height, false); + _mask = aliceVision::image::Image(real_width, real_height, true, 0); + + _coordinates.block(0, 0, real_height, real_width) = buffer_coordinates.block(min_y, min_x, real_height, real_width); + _mask.block(0, 0, real_height, real_width) = buffer_mask.block(min_y, min_x, real_height, real_width); + + return true; + } + + bool computeScale(double & result) { + + std::vector scales; + size_t real_height = _coordinates.Height(); + size_t real_width = _coordinates.Width(); + + for (int i = 0; i < real_height - 1; i++) { + for (int j = 0; j < real_width - 1; j++) { + if (!_mask(i, j) || !_mask(i, j + 1) || !_mask(i + 1, j)) { + continue; + } + + double dxx = _coordinates(i, j + 1).x() - _coordinates(i, j).x(); + double dxy = _coordinates(i + 1, j).x() - _coordinates(i, j).x(); + double dyx = _coordinates(i, j + 1).y() - _coordinates(i, j).y(); + double dyy = _coordinates(i + 1, j).y() - _coordinates(i, j).y(); + + double det = std::abs(dxx*dyy - dxy*dyx); + scales.push_back(det); + } + } + + if (scales.size() <= 1) return false; + + std::nth_element(scales.begin(), scales.begin() + scales.size() / 2, scales.end()); + result = sqrt(scales[scales.size() / 2]); + + + return true; + } + + size_t getOffsetX() const { + return _offset_x; + } + + size_t getOffsetY() const { + return _offset_y; + } + + const aliceVision::image::Image & getCoordinates() const { + return _coordinates; + } + + const aliceVision::image::Image & getMask() const { + return _mask; + } + +private: + + bool computeCoarseBB(BBox & coarse_bbox, const std::pair & panoramaSize, const geometry::Pose3 & pose, const aliceVision::camera::IntrinsicBase & intrinsics) { + + coarse_bbox.left = 0; + coarse_bbox.top = 0; + coarse_bbox.width = panoramaSize.first; + coarse_bbox.height = panoramaSize.second; + + int bbox_left, bbox_top; + int bbox_right, bbox_bottom; + int bbox_width, bbox_height; + + /*Estimate distorted maximal distance from optical center*/ + Vec2 pts[] = {{0.0f, 0.0f}, {intrinsics.w(), 0.0f}, {intrinsics.w(), intrinsics.h()}, {0.0f, intrinsics.h()}}; + float max_radius = 0.0; + for (int i = 0; i < 4; i++) { + + Vec2 ptmeter = intrinsics.ima2cam(pts[i]); + float radius = ptmeter.norm(); + max_radius = std::max(max_radius, radius); + } + + /* Estimate undistorted maximal distance from optical center */ + float max_radius_distorted = intrinsics.getMaximalDistortion(0.0, max_radius); + + /* + Coarse rectangle bouding box in camera space + We add intermediate points to ensure arclength between 2 points is never more than 180° + */ + Vec2 pts_radius[] = { + {-max_radius_distorted, -max_radius_distorted}, + {0, -max_radius_distorted}, + {max_radius_distorted, -max_radius_distorted}, + {max_radius_distorted, 0}, + {max_radius_distorted, max_radius_distorted}, + {0, max_radius_distorted}, + {-max_radius_distorted, max_radius_distorted}, + {-max_radius_distorted, 0} + }; + + + /* + Transform bounding box into the panorama frame. + Point are on a unit sphere. + */ + Vec3 rotated_pts[8]; + for (int i = 0; i < 8; i++) { + Vec3 pt3d = pts_radius[i].homogeneous().normalized(); + rotated_pts[i] = pose.rotation().transpose() * pt3d; + } + + /* Vertical Default solution : no pole*/ + bbox_top = panoramaSize.second; + bbox_bottom = 0; + + for (int i = 0; i < 8; i++) { + int i2 = (i + 1) % 8; + + Vec3 extremaY = getExtremaY(rotated_pts[i], rotated_pts[i2]); + + Vec2 res; + res = SphericalMapping::toEquirectangular(extremaY, panoramaSize.first, panoramaSize.second); + bbox_top = std::min(int(floor(res(1))), bbox_top); + bbox_bottom = std::max(int(ceil(res(1))), bbox_bottom); + + res = SphericalMapping::toEquirectangular(rotated_pts[i], panoramaSize.first, panoramaSize.second); + bbox_top = std::min(int(floor(res(1))), bbox_top); + bbox_bottom = std::max(int(ceil(res(1))), bbox_bottom); + } + + /* + Check if our region circumscribe a pole of the sphere : + Check that the region projected on the Y=0 plane contains the point (0, 0) + This is a special projection case + */ + bool pole = isPoleInTriangle(rotated_pts[0], rotated_pts[1], rotated_pts[7]); + pole |= isPoleInTriangle(rotated_pts[1], rotated_pts[2], rotated_pts[3]); + pole |= isPoleInTriangle(rotated_pts[3], rotated_pts[4], rotated_pts[5]); + pole |= isPoleInTriangle(rotated_pts[7], rotated_pts[5], rotated_pts[6]); + pole |= isPoleInTriangle(rotated_pts[1], rotated_pts[3], rotated_pts[5]); + pole |= isPoleInTriangle(rotated_pts[1], rotated_pts[5], rotated_pts[7]); + + + if (pole) { + Vec3 normal = (rotated_pts[1] - rotated_pts[0]).cross(rotated_pts[3] - rotated_pts[0]); + if (normal(1) > 0) { + //Lower pole + bbox_bottom = panoramaSize.second - 1; + } + else { + //upper pole + bbox_top = 0; + } + } + + bbox_height = bbox_bottom - bbox_top + 1; + + + /*Check if we cross the horizontal loop*/ + bool crossH = false; + for (int i = 0; i < 8; i++) { + int i2 = (i + 1) % 8; + + bool cross = crossHorizontalLoop(rotated_pts[i], rotated_pts[i2]); + crossH |= cross; + } + + if (pole) { + /*Easy : if we cross the pole, the width is full*/ + bbox_left = 0; + bbox_right = panoramaSize.first - 1; + bbox_width = bbox_right - bbox_left + 1; + } + else if (crossH) { + + int first_cross = 0; + for (int i = 0; i < 8; i++) { + int i2 = (i + 1) % 8; + bool cross = crossHorizontalLoop(rotated_pts[i], rotated_pts[i2]); + if (cross) { + first_cross = i; + break; + } + } + + bbox_left = panoramaSize.first - 1; + bbox_right = 0; + bool is_right = true; + for (int index = 0; index < 8; index++) { + + int i = (index + first_cross) % 8; + int i2 = (i + 1) % 8; + + Vec2 res_1 = SphericalMapping::toEquirectangular(rotated_pts[i], panoramaSize.first, panoramaSize.second); + Vec2 res_2 = SphericalMapping::toEquirectangular(rotated_pts[i2], panoramaSize.first, panoramaSize.second); + + /*[----right //// left-----]*/ + bool cross = crossHorizontalLoop(rotated_pts[i], rotated_pts[i2]); + if (cross) { + if (res_1(0) > res_2(0)) { /*[----res2 //// res1----]*/ + bbox_left = std::min(int(res_1(0)), bbox_left); + bbox_right = std::max(int(res_2(0)), bbox_right); + is_right = true; + } + else { /*[----res1 //// res2----]*/ + bbox_left = std::min(int(res_2(0)), bbox_left); + bbox_right = std::max(int(res_1(0)), bbox_right); + is_right = false; + } + } + else { + if (is_right) { + bbox_right = std::max(int(res_1(0)), bbox_right); + bbox_right = std::max(int(res_2(0)), bbox_right); + } + else { + bbox_left = std::min(int(res_1(0)), bbox_left); + bbox_left = std::min(int(res_2(0)), bbox_left); + } + } + } + + bbox_width = bbox_right + (panoramaSize.first - bbox_left); + } + else { + /*horizontal default solution : no border crossing, no pole*/ + bbox_left = panoramaSize.first; + bbox_right = 0; + for (int i = 0; i < 8; i++) { + Vec2 res = SphericalMapping::toEquirectangular(rotated_pts[i], panoramaSize.first, panoramaSize.second); + bbox_left = std::min(int(floor(res(0))), bbox_left); + bbox_right = std::max(int(ceil(res(0))), bbox_right); + } + bbox_width = bbox_right - bbox_left + 1; + } + + /*Assign solution to result*/ + coarse_bbox.left = bbox_left; + coarse_bbox.top = bbox_top; + coarse_bbox.width = bbox_width; + coarse_bbox.height = bbox_height; + + return true; + } + + Vec3 getExtremaY(const Vec3 & pt1, const Vec3 & pt2) { + Vec3 delta = pt2 - pt1; + double dx = delta(0); + double dy = delta(1); + double dz = delta(2); + double sx = pt1(0); + double sy = pt1(1); + double sz = pt1(2); + + double ot_y = -(dx*sx*sy - (dy*sx)*(dy*sx) - (dy*sz)*(dy*sz) + dz*sy*sz)/(dx*dx*sy - dx*dy*sx - dy*dz*sz + dz*dz*sy); + + Vec3 pt_extrema = pt1 + ot_y * delta; + + return pt_extrema.normalized(); + } + + bool crossHorizontalLoop(const Vec3 & pt1, const Vec3 & pt2) { + Vec3 direction = pt2 - pt1; + + /*Vertical line*/ + if (std::abs(direction(0)) < 1e-12) { + return false; + } + + double t = - pt1(0) / direction(0); + Vec3 cross = pt1 + direction * t; + + if (t >= 0.0 && t <= 1.0) { + if (cross(2) < 0.0) { + return true; + } + } + + return false; + } + + bool isPoleInTriangle(const Vec3 & pt1, const Vec3 & pt2, const Vec3 & pt3) { + + double a = (pt2.x()*pt3.z() - pt3.x()*pt2.z())/(pt1.x()*pt2.z() - pt1.x()*pt3.z() - pt2.x()*pt1.z() + pt2.x()*pt3.z() + pt3.x()*pt1.z() - pt3.x()*pt2.z()); + double b = (-pt1.x()*pt3.z() + pt3.x()*pt1.z())/(pt1.x()*pt2.z() - pt1.x()*pt3.z() - pt2.x()*pt1.z() + pt2.x()*pt3.z() + pt3.x()*pt1.z() - pt3.x()*pt2.z()); + double c = 1.0 - a - b; + + if (a < 0.0 || a > 1.0) return false; + if (b < 0.0 || b > 1.0) return false; + if (c < 0.0 || c > 1.0) return false; + + return true; + } + +private: + size_t _offset_x = 0; + size_t _offset_y = 0; + + aliceVision::image::Image _coordinates; + aliceVision::image::Image _mask; +}; + +class AlphaBuilder { +public: + virtual bool build(const CoordinatesMap & map, const aliceVision::camera::IntrinsicBase & intrinsics) { + + float w = static_cast(intrinsics.w()); + float h = static_cast(intrinsics.h()); + float cx = w / 2.0f; + float cy = h / 2.0f; + + + const aliceVision::image::Image & coordinates = map.getCoordinates(); + const aliceVision::image::Image & mask = map.getMask(); + + _weights = aliceVision::image::Image(coordinates.Width(), coordinates.Height()); + + for (int i = 0; i < _weights.Height(); i++) { + for (int j = 0; j < _weights.Width(); j++) { + + _weights(i, j) = 0.0f; + + bool valid = mask(i, j); + if (!valid) { + continue; + } + + const Vec2 & coords = coordinates(i, j); + + float x = coords(0); + float y = coords(1); + + float wx = 1.0f - std::abs((x - cx) / cx); + float wy = 1.0f - std::abs((y - cy) / cy); + + _weights(i, j) = wx * wy; + } + } + + return true; + } + + const aliceVision::image::Image & getWeights() const { + return _weights; + } + +private: + aliceVision::image::Image _weights; +}; + +class Warper { +public: + virtual bool warp(const CoordinatesMap & map, const aliceVision::image::Image & source) { + + /** + * Copy additional info from map + */ + _offset_x = map.getOffsetX(); + _offset_y = map.getOffsetY(); + _mask = map.getMask(); + + const image::Sampler2d sampler; + const aliceVision::image::Image & coordinates = map.getCoordinates(); + + /** + * Create buffer + * No longer need to keep a 2**x size + */ + _color = aliceVision::image::Image(coordinates.Width(), coordinates.Height()); + + /** + * Simple warp + */ + for (size_t i = 0; i < _color.Height(); i++) { + for (size_t j = 0; j < _color.Width(); j++) { + + bool valid = _mask(i, j); + if (!valid) { + continue; + } + + const Eigen::Vector2d & coord = coordinates(i, j); + const image::RGBfColor pixel = sampler(source, coord(1), coord(0)); + + _color(i, j) = pixel; + } + } + + return true; + } + + const aliceVision::image::Image & getColor() const { + return _color; + } + + const aliceVision::image::Image & getMask() const { + return _mask; + } + + + size_t getOffsetX() const { + return _offset_x; + } + + size_t getOffsetY() const { + return _offset_y; + } + +protected: + size_t _offset_x = 0; + size_t _offset_y = 0; + + aliceVision::image::Image _color; + aliceVision::image::Image _mask; +}; + +class GaussianWarper : public Warper { +public: + virtual bool warp(const CoordinatesMap & map, const aliceVision::image::Image & source) { + + /** + * Copy additional info from map + */ + _offset_x = map.getOffsetX(); + _offset_y = map.getOffsetY(); + _mask = map.getMask(); + + const image::Sampler2d sampler; + const aliceVision::image::Image & coordinates = map.getCoordinates(); + + /** + * Create a pyramid for input + */ + GaussianPyramidNoMask pyramid(source.Width(), source.Height()); + pyramid.process(source); + const std::vector> & mlsource = pyramid.getPyramidColor(); + size_t max_level = pyramid.getScalesCount() - 1; + + /** + * Create buffer + */ + _color = aliceVision::image::Image(coordinates.Width(), coordinates.Height(), true, image::RGBfColor(1.0, 0.0, 0.0)); + + + /** + * Multi level warp + */ + for (size_t i = 0; i < _color.Height(); i++) { + for (size_t j = 0; j < _color.Width(); j++) { + + bool valid = _mask(i, j); + if (!valid) { + continue; + } + + if (i == _color.Height() - 1 || j == _color.Width() - 1 || !_mask(i + 1, j) || !_mask(i, j + 1)) { + const Eigen::Vector2d & coord = coordinates(i, j); + const image::RGBfColor pixel = sampler(source, coord(1), coord(0)); + _color(i, j) = pixel; + continue; + } + + const Eigen::Vector2d & coord_mm = coordinates(i, j); + const Eigen::Vector2d & coord_mp = coordinates(i, j + 1); + const Eigen::Vector2d & coord_pm = coordinates(i + 1, j); + + double dxx = coord_pm(0) - coord_mm(0); + double dxy = coord_mp(0) - coord_mm(0); + double dyx = coord_pm(1) - coord_mm(1); + double dyy = coord_mp(1) - coord_mm(1); + double det = std::abs(dxx*dyy - dxy*dyx); + double scale = sqrt(det); + + double flevel = std::max(0.0, log2(scale)); + size_t blevel = std::min(max_level, size_t(floor(flevel))); + + double dscale, x, y; + dscale = 1.0 / pow(2.0, blevel); + x = coord_mm(0) * dscale; + y = coord_mm(1) * dscale; + /*Fallback to first level if outside*/ + if (x >= mlsource[blevel].Width() - 1 || y >= mlsource[blevel].Height() - 1) { + _color(i, j) = sampler(mlsource[0], coord_mm(1), coord_mm(0)); + continue; + } + + _color(i, j) = sampler(mlsource[blevel], y, x); + } + } + + return true; + } +}; + +bool computeOptimalPanoramaSize(std::pair & optimalSize, const sfmData::SfMData & sfmData) { + + optimalSize.first = 512; + optimalSize.second = 256; + + /** + * Loop over views to estimate best scale + */ + std::vector scales; + for (auto & viewIt: sfmData.getViews()) { + + /** + * Retrieve view + */ + const sfmData::View& view = *viewIt.second.get(); + if (!sfmData.isPoseAndIntrinsicDefined(&view)) { + continue; + } + + /** + * Get intrinsics and extrinsics + */ + const geometry::Pose3 camPose = sfmData.getPose(view).getTransform(); + const camera::IntrinsicBase & intrinsic = *sfmData.getIntrinsicPtr(view.getIntrinsicId()); + + /** + * Compute map + */ + CoordinatesMap map; + if (!map.build(optimalSize, camPose, intrinsic)) { + continue; + } + + double scale; + if (!map.computeScale(scale)) { + continue; + } + + scales.push_back(scale); + } + + + if (scales.size() > 1) { + double median_scale; + std::nth_element(scales.begin(), scales.begin() + scales.size() / 2, scales.end()); + median_scale = scales[scales.size() / 2]; + + double multiplier = pow(2.0, int(floor(log2(median_scale)))); + + optimalSize.first = optimalSize.first * multiplier; + optimalSize.second = optimalSize.second * multiplier; + } + + return true; +} + +int main(int argc, char **argv) { + + /** + * Program description + */ + po::options_description allParams ( + "Perform panorama stiching of cameras around a nodal point for 360° panorama creation. \n" + "AliceVision PanoramaWarping" + ); + + /** + * Description of mandatory parameters + */ + std::string sfmDataFilename; + std::string outputDirectory; + po::options_description requiredParams("Required parameters"); + requiredParams.add_options() + ("input,i", po::value(&sfmDataFilename)->required(), "SfMData file.") + ("output,o", po::value(&outputDirectory)->required(), "Path of the output folder."); + allParams.add(requiredParams); + + /** + * Description of optional parameters + */ + std::pair panoramaSize = {1024, 0}; + po::options_description optionalParams("Optional parameters"); + optionalParams.add_options() + ("panoramaWidth,w", po::value(&panoramaSize.first)->default_value(panoramaSize.first), "Panorama Width in pixels."); + allParams.add(optionalParams); + + /** + * Setup log level given command line + */ + std::string verboseLevel = system::EVerboseLevel_enumToString(system::Logger::getDefaultVerboseLevel()); + po::options_description logParams("Log parameters"); + logParams.add_options() + ("verboseLevel,v", po::value(&verboseLevel)->default_value(verboseLevel), "verbosity level (fatal, error, warning, info, debug, trace)."); + allParams.add(logParams); + + + /** + * Effectively parse command line given parse options + */ + po::variables_map vm; + try + { + po::store(po::parse_command_line(argc, argv, allParams), vm); + + if(vm.count("help") || (argc == 1)) + { + ALICEVISION_COUT(allParams); + return EXIT_SUCCESS; + } + po::notify(vm); + } + catch(boost::program_options::required_option& e) + { + ALICEVISION_CERR("ERROR: " << e.what()); + ALICEVISION_COUT("Usage:\n\n" << allParams); + return EXIT_FAILURE; + } + catch(boost::program_options::error& e) + { + ALICEVISION_CERR("ERROR: " << e.what()); + ALICEVISION_COUT("Usage:\n\n" << allParams); + return EXIT_FAILURE; + } + + ALICEVISION_COUT("Program called with the following parameters:"); + ALICEVISION_COUT(vm); + + + /** + * Set verbose level given command line + */ + system::Logger::get()->setLogLevel(verboseLevel); + + /** + * Load information about inputs + * Camera images + * Camera intrinsics + * Camera extrinsics + */ + sfmData::SfMData sfmData; + if(!sfmDataIO::Load(sfmData, sfmDataFilename, sfmDataIO::ESfMData(sfmDataIO::VIEWS| sfmDataIO::INTRINSICS| sfmDataIO::EXTRINSICS))) + { + ALICEVISION_LOG_ERROR("The input SfMData file '" << sfmDataFilename << "' cannot be read."); + return EXIT_FAILURE; + } + + + /*Order views by their image names for easier debugging*/ + std::vector> viewsOrderedByName; + for (auto & viewIt: sfmData.getViews()) { + viewsOrderedByName.push_back(viewIt.second); + } + std::sort(viewsOrderedByName.begin(), viewsOrderedByName.end(), [](const std::shared_ptr & a, const std::shared_ptr & b) -> bool { + if (a == nullptr || b == nullptr) return true; + return (a->getImagePath() < b->getImagePath()); + }); + + + /*If panorama width is undefined, estimate it*/ + if (panoramaSize.first <= 0) { + std::pair optimalPanoramaSize; + if (computeOptimalPanoramaSize(optimalPanoramaSize, sfmData)) { + panoramaSize = optimalPanoramaSize; + } + } + else { + double max_scale = 1.0 / pow(2.0, 10); + panoramaSize.first = int(ceil(double(panoramaSize.first) * max_scale) / max_scale); + panoramaSize.second = panoramaSize.first / 2; + } + + + + ALICEVISION_LOG_INFO("Choosen panorama size : " << panoramaSize.first << "x" << panoramaSize.second); + + bpt::ptree viewsTree; + + /** + * Preprocessing per view + */ + size_t pos = 0; + for (const std::shared_ptr & viewIt: viewsOrderedByName) { + + /** + * Retrieve view + */ + const sfmData::View& view = *viewIt; + if (!sfmData.isPoseAndIntrinsicDefined(&view)) { + continue; + } + + ALICEVISION_LOG_INFO("Processing view " << view.getViewId()); + + /** + * Get intrinsics and extrinsics + */ + const geometry::Pose3 camPose = sfmData.getPose(view).getTransform(); + const camera::IntrinsicBase & intrinsic = *sfmData.getIntrinsicPtr(view.getIntrinsicId()); + + /** + * Prepare coordinates map + */ + CoordinatesMap map; + map.build(panoramaSize, camPose, intrinsic); + + /** + * Load image and convert it to linear colorspace + */ + std::string imagePath = view.getImagePath(); + ALICEVISION_LOG_INFO("Load image with path " << imagePath); + image::Image source; + image::readImage(imagePath, source, image::EImageColorSpace::LINEAR); + + /** + * Warp image + */ + GaussianWarper warper; + warper.warp(map, source); + + /** + * Alpha mask + */ + AlphaBuilder alphabuilder; + alphabuilder.build(map, intrinsic); + + + /** + * Combine mask and image + */ + const aliceVision::image::Image & cam = warper.getColor(); + const aliceVision::image::Image & mask = warper.getMask(); + const aliceVision::image::Image & weights = alphabuilder.getWeights(); + + /** + * Store result image + */ + bpt::ptree viewTree; + std::string path; + + { + std::stringstream ss; + ss << outputDirectory << "/view_" << pos << ".exr"; + path = ss.str(); + viewTree.put("filename_view", path); + ALICEVISION_LOG_INFO("Store view " << pos << " with path " << path); + image::writeImage(path, cam, image::EImageColorSpace::AUTO); + } + + { + std::stringstream ss; + ss << outputDirectory << "/mask_" << pos << ".png"; + path = ss.str(); + viewTree.put("filename_mask", path); + ALICEVISION_LOG_INFO("Store mask " << pos << " with path " << path); + image::writeImage(path, mask, image::EImageColorSpace::NO_CONVERSION); + } + + { + std::stringstream ss; + ss << outputDirectory << "/weightmap_" << pos << ".exr"; + path = ss.str(); + viewTree.put("filename_weights", path); + ALICEVISION_LOG_INFO("Store weightmap " << pos << " with path " << path); + image::writeImage(path, weights, image::EImageColorSpace::AUTO); + } + + /** + * Store view info + */ + viewTree.put("offsetx", warper.getOffsetX()); + viewTree.put("offsety", warper.getOffsetY()); + viewsTree.push_back(std::make_pair("", viewTree)); + + pos++; + } + + + /** + * Config output + */ + bpt::ptree configTree; + configTree.put("panoramaWidth", panoramaSize.first); + configTree.put("panoramaHeight", panoramaSize.second); + configTree.add_child("views", viewsTree); + + std::stringstream ss; + ss << outputDirectory << "/config_views.json"; + ALICEVISION_LOG_INFO("Save config with path " << ss.str()); + bpt::write_json(ss.str(), configTree, std::locale(), true); + + return EXIT_SUCCESS; +} \ No newline at end of file diff --git a/src/software/pipeline/main_prepareDenseScene.cpp b/src/software/pipeline/main_prepareDenseScene.cpp index 6b194f81dc..d0c38459bf 100644 --- a/src/software/pipeline/main_prepareDenseScene.cpp +++ b/src/software/pipeline/main_prepareDenseScene.cpp @@ -78,8 +78,8 @@ bool prepareDenseScene(const SfMData& sfmData, boost::progress_display progressBar(viewIds.size(), std::cout, "Exporting Scene Undistorted Images\n"); // for exposure correction - const float medianEv = sfmData.getMedianEv(); - ALICEVISION_LOG_INFO("median Ev : " << medianEv); + const float medianCameraExposure = sfmData.getMedianCameraExposureSetting(); + ALICEVISION_LOG_INFO("Median Camera Exposure: " << medianCameraExposure << ", Median EV: " << std::log2(1.0f/medianCameraExposure)); #pragma omp parallel for num_threads(3) for(int i = 0; i < viewIds.size(); ++i) @@ -195,16 +195,16 @@ bool prepareDenseScene(const SfMData& sfmData, readImage(srcImage, image, image::EImageColorSpace::LINEAR); // add exposure values to images metadata - float exposure = view->getEv(); - float exposureCompensation = view->getEvCompensation(medianEv); - metadata.push_back(oiio::ParamValue("AliceVision:EV", exposure)); + float cameraExposure = view->getCameraExposureSetting(); + float ev = std::log2(1.0 / cameraExposure); + float exposureCompensation = medianCameraExposure / cameraExposure; + metadata.push_back(oiio::ParamValue("AliceVision:EV", ev)); metadata.push_back(oiio::ParamValue("AliceVision:EVComp", exposureCompensation)); //exposure correction if(evCorrection) { - ALICEVISION_LOG_INFO("image " + std::to_string(viewId) + " Ev : " + std::to_string(exposure)); - ALICEVISION_LOG_INFO("image " + std::to_string(viewId) + " Ev compensation : " + std::to_string(exposureCompensation)); + ALICEVISION_LOG_INFO("View: " << viewId << ", Ev: " << ev << ", Ev compensation: " << exposureCompensation); for(int pix = 0; pix < image.Width() * image.Height(); ++pix) image(pix) = image(pix) * exposureCompensation; diff --git a/src/software/utils/CMakeLists.txt b/src/software/utils/CMakeLists.txt index 4c5d080c5f..d409635d51 100644 --- a/src/software/utils/CMakeLists.txt +++ b/src/software/utils/CMakeLists.txt @@ -173,4 +173,16 @@ alicevision_add_software(aliceVision_utils_split360Images ${Boost_LIBRARIES} ) + +# Project 180° fisheye images into equirectangular +alicevision_add_software(aliceVision_utils_fisheyeProjection + SOURCE main_fisheyeProjection.cpp + FOLDER ${FOLDER_SOFTWARE_UTILS} + LINKS aliceVision_system + aliceVision_numeric + aliceVision_image + ${OPENIMAGEIO_LIBRARIES} + ${Boost_LIBRARIES} +) + endif() # ALICEVISION_BUILD_SFM diff --git a/src/software/utils/main_fisheyeProjection.cpp b/src/software/utils/main_fisheyeProjection.cpp new file mode 100644 index 0000000000..a3c749d048 --- /dev/null +++ b/src/software/utils/main_fisheyeProjection.cpp @@ -0,0 +1,419 @@ +// This file is part of the AliceVision project. +// Copyright (c) 2017 AliceVision contributors. +// Copyright (c) 2016 openMVG contributors. +// This Source Code Form is subject to the terms of the Mozilla Public License, +// v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at https://mozilla.org/MPL/2.0/. + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + + +// These constants define the current software version. +// They must be updated when the command line is changed. +#define ALICEVISION_SOFTWARE_VERSION_MAJOR 1 +#define ALICEVISION_SOFTWARE_VERSION_MINOR 0 + +using namespace aliceVision; + +namespace fs = boost::filesystem; +namespace po = boost::program_options; +namespace oiio = OIIO; +namespace bmf = boost::math::float_constants; + + +/** + * @brief Function to map 3D coordinates onto a 2D image according a spherical projection + */ +namespace SphericalMapping +{ + Vec3 get3DPoint(const Vec2& pos2d, int width, int height) + { + const double x = pos2d(0) - width; + const double y = height/2.0 - pos2d(1); + + const double longitude = M_PI * 2.0 * x / width; // between -PI and PI + const double latitude = M_PI * y / height; // between -PI/2 and PI/2 + + const double Px = cos(latitude) * cos(longitude); + const double Py = cos(latitude) * sin(longitude); + const double Pz = sin(latitude); + + return Vec3(Px, Py, Pz); + } + + Vec2 get2DCoordinates(const Vec3& ray, int inSize) + { + const double Px = ray(0); + const double Py = ray(1); + const double Pz = ray(2); + +// float aperture = 2.0 * std::atan(36.0 / (8.0 * 2.0)); +// float aperture = 2.0 * asin(1.0 / (2.0 * fNumber)); + + const float r = 2.f * std::atan2(std::sqrt(Square(Px) + Square(Pz)), Py) / bmf::pi; + const float theta = atan2(Pz, Px); + const Vec2 v(r*cos(theta), r*sin(theta)); + return v * inSize/2; + } +} + + +float sigmoid(float x, float sigwidth, float sigMid) +{ + return 1.0f / (1.0f + expf(10.0f * ((x - sigMid) / sigwidth))); +} + +/** + * @brief convert metadata "exif:OriginalDateTime" into float and fill a vector with returned value + * @param[in] dateTime - input string containing date and time of photo shooting + * @param[out] times - vector containing all times of photo shooting in float + */ +void convertTime(const std::string& dateTime, std::vector& times) +{ + const boost::regex dateTimeExp( + // Syntax of Exif:DateTimeOriginal tag + "(?[\\d\\:]*) " // YYYY:MM:DD date separated with time with a space + "(?\\d*):" // HH: hour + "(?\\d*):" // MM: minutes + "(?\\d*)" // SS: seconds + ); + + // convert time from string to float to sort images by time for future stitching + boost::smatch what; + if(boost::regex_search(dateTime, what, dateTimeExp)) + { + times.push_back(24.0 * std::stof(what["h"]) + 60.0 * std::stof(what["m"]) + std::stof(what["s"])); + } +} + + +/** + * @brief Function to set fisheye images with correct orientation and add an alpha channel + * @param[in] imageIn - input RGBf fisheye image + * @param[in] metadata - contains orientation information + * @param[out] buffer - to store input metadata in output image + * @param[out] imageAlpha - output RGBAf fisheye image correctly oriented + */ +void setFisheyeImage(image::Image& imageIn, float blurWidth_param, oiio::ImageBuf& buffer, image::Image& imageAlpha) +{ + bool correctOrientation = oiio::ImageBufAlgo::reorient(buffer, buffer); + + if(!correctOrientation) + { + ALICEVISION_LOG_ERROR("Can't set correct orientation of image"); + } + + std::size_t width = buffer.spec().width; + std::size_t height = buffer.spec().height; + + imageIn.resize(width, height, false); + buffer.get_pixels(buffer.roi(), buffer.spec().format, imageIn.data()); + + imageAlpha.resize(width, height, false); + + const float maxRadius = std::min(width, height) * 0.5f; + const float blurWidth = maxRadius * blurWidth_param; + const float blurMid = maxRadius - blurWidth/2.f; + const Vec2i center(width/2.f, height/2.f); + + for(std::size_t x = 0; x < width; ++x) + { + for(std::size_t y = 0; y < height; ++y) + { + const image::RGBfColor& inPixel = imageIn(y, x); + image::RGBAfColor& alphaPixel = imageAlpha(y, x); + const float dist = std::sqrt((x-center(0)) * (x-center(0)) + (y-center(1)) * (y-center(1))); + if(dist > maxRadius) + { + alphaPixel.a() = 0.f; + } + else + { + alphaPixel.r() = inPixel.r(); + alphaPixel.g() = inPixel.g(); + alphaPixel.b() = inPixel.b(); + alphaPixel.a() = sigmoid(dist, blurWidth, blurMid); + } + } + } +} + + +/** + * @brief Project fisheye image into an equirectangular map and merge it to output panorama + * @param[in] imageIn - input RGBAf fisheye image + * @param[in] nbImages + * @param[in] iter - index of input image for merging into panorama + * @param[in] rotations - contains adjustment rotations on each image set by user + * @param[out] imageOut - output panorama which is incremented at each call + */ +void fisheyeToEquirectangular(image::Image& imageIn, int nbImages, int iter, const std::array, 3>& rotations, image::Image& imageOut) +{ + std::size_t inWidth = imageIn.Width(); + std::size_t inHeight = imageIn.Height(); + std::size_t inSize = std::min(inWidth, inHeight); + + const double zRotation = double(30.0 - iter * 360.0 / nbImages) + rotations[2].at(iter); + const double xRotation = double(-abs(90.0 - abs(zRotation))/30.0) + rotations[0].at(iter); + const double yRotation = rotations[1].at(iter); + + const image::Sampler2d sampler; + for(int j = 0; j < inSize; ++j) + { + for(int i = 0; i < 2 * inSize; ++i) + { + Vec3 ray = SphericalMapping::get3DPoint(Vec2(i,j), 2*inSize, inSize); + + ray = rotationXYZ(degreeToRadian(xRotation), degreeToRadian(yRotation), degreeToRadian(zRotation)) * ray; + const Vec2 x = SphericalMapping::get2DCoordinates(ray, inSize); + const Vec2 xFish(inWidth/2 - x(0), inHeight/2 - x(1)); + + const image::RGBAfColor& pixel = sampler(imageIn, xFish(1), xFish(0)); + float alpha = pixel.a(); + + imageOut(j, i).r() = pixel.r()*alpha + imageOut(j, i).r()*(1.f-alpha); + imageOut(j, i).g() = pixel.g()*alpha + imageOut(j, i).g()*(1.f-alpha); + imageOut(j, i).b() = pixel.b()*alpha + imageOut(j, i).b()*(1.f-alpha); + imageOut(j, i).a() = pixel.a()*alpha + imageOut(j, i).a()*(1.f-alpha); + } + } +} + + +/** + * @brief Load input images and call functions to stitch 360° panorama + * @param[in] imagePaths - input images paths + * @param[in] metadatas - input metadata for each image + * @param[in] rotations - contains adjustment rotations on each image set by user + * @param[out] outputFolder - output folder path to write panorama + */ +void stitchPanorama(const std::vector& imagePaths, const std::vector& metadatas, float blurWidth, const std::array, 3>& rotations, std::string& outputPath) +{ + int nbImages = imagePaths.size(); + image::Image imageOut; + std::vector buffers(nbImages); + oiio::ImageBuf& bufferOut = buffers[0]; + std::size_t inSize; + + for(int i=0; i imageIn; + image::Image imageAlpha; + + ALICEVISION_LOG_INFO("Projecting " << imagePaths[i] << " into equirectangular space"); + + image::readImage(imagePaths[i], imageIn, image::EImageColorSpace::LINEAR); + image::getBufferFromImage(imageIn, buffer); + buffer.specmod().extra_attribs = metadatas[i]; + + setFisheyeImage(imageIn, blurWidth, buffer, imageAlpha); + + if(i == 0) + { + inSize = std::min(bufferOut.spec().width, bufferOut.spec().height); + imageOut.resize(2*inSize, inSize, true, image::RGBAfColor(0.f, 0.f, 0.f, 0.f)); + } + + fisheyeToEquirectangular(imageAlpha, nbImages, i, rotations, imageOut); + } + + // save equirectangular image with fisheye's metadata + if(fs::is_directory(fs::path(outputPath))) + { + outputPath = (fs::path(outputPath) / ("panorama.exr")).string(); + } + image::writeImage(outputPath, imageOut, image::EImageColorSpace::AUTO, bufferOut.specmod().extra_attribs); + + ALICEVISION_LOG_INFO("Panorama successfully written as " << outputPath); +} + + +int main(int argc, char** argv) +{ + // command-line parameters + std::string verboseLevel = system::EVerboseLevel_enumToString(system::Logger::getDefaultVerboseLevel()); + std::vector inputPath; // media file path list + std::string outputFolder; // output folder for panorama + float blurWidth = 0.2f; + std::vector xRotation; + std::vector yRotation; + std::vector zRotation; + + po::options_description allParams("This program is used to stitch multiple fisheye images into an equirectangular 360° panorama\n" + "AliceVision fisheyeProjection"); + + po::options_description requiredParams("Required parameters"); + requiredParams.add_options() + ("input,i", po::value>(&inputPath)->required()->multitoken(), + "List of fisheye images or a folder containing them.") + ("output,o", po::value(&outputFolder)->required(), + "Output panorama folder or complete path"); + + po::options_description optionalParams("Optional parameters"); + optionalParams.add_options() + ("blurWidth,b", po::value(&blurWidth)->default_value(blurWidth), + "Blur width of alpha channel for all fisheye (between 0 and 1), determine the transitions sharpness.") + ("xRotation,x", po::value>(&xRotation)->multitoken(), + "Angles to rotate each image on axis x : horizontal axis on the panorama.") + ("yRotation,y", po::value>(&yRotation)->multitoken(), + "Angles to rotate each image on axis y : vertical axis on the panorama.") + ("zRotation,z", po::value>(&zRotation)->multitoken(), + "Angles to rotate each image on axis z : depth axis on the panorama."); + + po::options_description logParams("Log parameters"); + logParams.add_options() + ("verboseLevel,v", po::value(&verboseLevel)->default_value(verboseLevel), + "verbosity level (fatal, error, warning, info, debug, trace)."); + + allParams.add(requiredParams).add(optionalParams).add(logParams); + + po::variables_map vm; + try + { + po::store(po::parse_command_line(argc, argv, allParams), vm); + + if(vm.count("help") || (argc == 1)) + { + ALICEVISION_COUT(allParams); + return EXIT_SUCCESS; + } + po::notify(vm); + } + catch(boost::program_options::required_option& e) + { + ALICEVISION_CERR("ERROR: " << e.what()); + ALICEVISION_COUT("Usage:\n\n" << allParams); + return EXIT_FAILURE; + } + catch(boost::program_options::error& e) + { + ALICEVISION_CERR("ERROR: " << e.what()); + ALICEVISION_COUT("Usage:\n\n" << allParams); + return EXIT_FAILURE; + } + + ALICEVISION_COUT("Program called with the following parameters:"); + ALICEVISION_COUT(vm); + + // set verbose level + system::Logger::get()->setLogLevel(verboseLevel); + + // check output folder and update to its absolute path + outputFolder = fs::absolute(outputFolder).string(); + + std::vector imagePaths; + std::vector times; + std::vector metadatas; + + for(const std::string& entry: inputPath) + { + const fs::path path = fs::absolute(entry); + if(fs::exists(path) && fs::is_directory(path)) + { + for(fs::directory_entry& file : boost::make_iterator_range(fs::directory_iterator(path), {})) + { + if(image::isSupported(file.path().extension().string())) + { + imagePaths.push_back(file.path().string()); + + const oiio::ParamValueList metadata = image::readImageMetadata(file.path().string()); + metadatas.push_back(metadata); + std::string dateTime; + dateTime = metadata.get_string("Exif:DateTimeOriginal"); + + if(dateTime.empty()) + { + ALICEVISION_LOG_ERROR("Can't sort images : no time metadata in " << file.path().string()); + return EXIT_FAILURE; + } + convertTime(dateTime, times); + } + } + } + + else if(fs::exists(path) && image::isSupported(path.extension().string())) + { + imagePaths.push_back(path.string()); + + oiio::ParamValueList metadata = image::readImageMetadata(entry); + metadatas.push_back(metadata); + std::string dateTime; + dateTime = metadata.get_string("Exif:DateTimeOriginal"); + if(dateTime.empty()) + { + ALICEVISION_LOG_ERROR("Can't sort images : no time metadata in " << entry); + return EXIT_FAILURE; + } + convertTime(dateTime, times); + } + + else + { + ALICEVISION_LOG_ERROR("Can't find file or folder " << inputPath); + return EXIT_FAILURE; + } + } + + if(imagePaths.empty()) + { + ALICEVISION_LOG_ERROR("No valid image file found in input folder or paths"); + return EXIT_FAILURE; + } + + const auto nbImages = imagePaths.size(); + std::vector times_sorted = times; + std::vector imagePaths_sorted; + std::vector metadatas_sorted; + xRotation.resize(nbImages, 0.0); + yRotation.resize(nbImages, 0.0); + zRotation.resize(nbImages, 0.0); + const std::array, 3> rotations = {xRotation, yRotation, zRotation}; + + // sort images according to their metadata "DateTime" + std::sort(times_sorted.begin(), times_sorted.end()); + for(std::size_t i=0; i::iterator it = std::find(times.begin(), times.end(), times_sorted[i]); + if(it != times.end()) + { + const std::size_t index = std::distance(times.begin(), it); + imagePaths_sorted.push_back(imagePaths.at(index)); + metadatas_sorted.push_back(metadatas.at(index)); + } + else + { + ALICEVISION_LOG_ERROR("sorting failed"); + return EXIT_FAILURE; + } + } + + ALICEVISION_LOG_INFO(nbImages << " file paths found."); + + stitchPanorama(imagePaths_sorted, metadatas_sorted, blurWidth, rotations, outputFolder); + + return EXIT_SUCCESS; +}