From bd8857cecd482045032e81f6431cde0f971e0533 Mon Sep 17 00:00:00 2001 From: Yuto Horikawa Date: Tue, 12 Jan 2021 00:48:09 +0900 Subject: [PATCH] Add more docstrings, and generate document by Documenter.jl (#87) * add docstrings and doctest for gausslegendre * fix jldoctest * fix jldoctest for gausslegendre * add docstring and jldoctest for gausshermite * add docstring and jldoctest for gausslaguerre * fix typos and form of README.md * add docstring and jldoctest for gausschebyshev * add docstring and jldoctest for gaussjacobi * add docstring and jldoctest for gaussradau * add docstring and jldoctest for gausslobatto * first commit for Documenter.jl * update gitignore for Documenter.jl * fix wording in docstring * update .travis.yml for Documenter * update authors in make.jl * update wording in index.md * update benchmark results * add pages for besselroots and benchmark * add badges for Documenter.jl * update docstring for besselroots * update travis badge (fix #82) * fix typos * update wording for clarity * update symbols to use unicode --- .gitignore | 4 + .travis.yml | 16 +- README.md | 124 +++++++------- docs/Project.toml | 3 + docs/make.jl | 23 +++ docs/src/benchmark.md | 35 ++++ docs/src/besselroots.md | 9 + docs/src/gaussquadrature.md | 55 ++++++ docs/src/index.md | 34 ++++ docs/src/reference.md | 17 ++ src/besselroots.jl | 73 +++++--- src/gausschebyshev.jl | 90 +++++++++- src/gausshermite.jl | 73 +++++--- src/gaussjacobi.jl | 221 +++++++++++++----------- src/gausslaguerre.jl | 333 ++++++++++++++++++++---------------- src/gausslegendre.jl | 35 +++- src/gausslobatto.jl | 30 ++++ src/gaussradau.jl | 41 ++++- test/test_gausschebyshev.jl | 12 +- test/test_gausshermite.jl | 19 +- test/test_gausslaguerre.jl | 46 ++--- 21 files changed, 872 insertions(+), 421 deletions(-) create mode 100644 docs/Project.toml create mode 100644 docs/make.jl create mode 100644 docs/src/benchmark.md create mode 100644 docs/src/besselroots.md create mode 100644 docs/src/gaussquadrature.md create mode 100644 docs/src/index.md create mode 100644 docs/src/reference.md diff --git a/.gitignore b/.gitignore index e338037..adb6ce9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,6 @@ .DS_Store /Manifest.toml +/dev/ +/docs/build/ +/docs/site/ +/docs/Manifest.toml diff --git a/.travis.yml b/.travis.yml index 5375b39..2030a30 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,9 +7,19 @@ julia: - 1.0 - 1.5 - nightly -matrix: - allow_failures: - - julia: nightly notifications: email: false +jobs: + allow_failures: + - julia: nightly + fast_finish: true + include: + - stage: Documentation + julia: nightly + script: julia --project=docs -e ' + using Pkg; + Pkg.develop(PackageSpec(path=pwd())); + Pkg.instantiate(); + include("docs/make.jl");' + after_success: skip codecov: true \ No newline at end of file diff --git a/README.md b/README.md index 8927840..8396ce8 100644 --- a/README.md +++ b/README.md @@ -1,47 +1,54 @@ FastGaussQuadrature.jl ========= -[![Build Status](https://travis-ci.org/JuliaApproximation/FastGaussQuadrature.jl.svg?branch=master)](https://travis-ci.org/JuliaApproximation/FastGaussQuadrature.jl) [![codecov](https://codecov.io/gh/JuliaApproximation/FastGaussQuadrature.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/JuliaApproximation/FastGaussQuadrature.jl) +[![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://JuliaApproximation.github.io/FastGaussQuadrature.jl/stable) +[![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://JuliaApproximation.github.io/FastGaussQuadrature.jl/dev) +[![Build Status](https://travis-ci.com/JuliaApproximation/FastGaussQuadrature.jl.svg?branch=master)](https://travis-ci.com/JuliaApproximation/FastGaussQuadrature.jl) +[![codecov](https://codecov.io/gh/JuliaApproximation/FastGaussQuadrature.jl/branch/master/graph/badge.svg)](https://codecov.io/gh/JuliaApproximation/FastGaussQuadrature.jl) -A Julia package to compute `n`-point Gauss quadrature nodes and weights to 16-digit accuracy and in `O(n)` time. So far the package includes `gausschebyshev()`, `gausslegendre()`, `gaussjacobi()`, `gaussradau()`, `gausslobatto()`, `gausslaguerre()`, and `gausshermite()`. This package is heavily influenced by Chebfun. +A Julia package to compute `n`-point Gauss quadrature nodes and weights to 16-digit accuracy and in `O(n)` time. +So far the package includes `gausschebyshev()`, `gausslegendre()`, `gaussjacobi()`, `gaussradau()`, `gausslobatto()`, `gausslaguerre()`, and `gausshermite()`. +This package is heavily influenced by [Chebfun](http://www.chebfun.org). -An introduction to Gauss quadrature can be found here. For a quirky account on the history of computing Gauss-Legendre quadrature, see [6]. +An introduction to Gauss quadrature can be found [here](http://en.wikipedia.org/wiki/Gaussian_quadrature). +For a quirky account on the history of computing Gauss-Legendre quadrature, see [[6]](http://pi.math.cornell.edu/~ajt/papers/QuadratureEssay.pdf). ## Our Aims * The fastest Julia code for Gauss quadrature nodes and weights (without tabulation). - * Change the perception that Gauss quadrature rules are expensive to compute. ## Examples -Here we compute `100000` nodes and weights of the Gauss rules. Try a million or ten million. +Here we compute `100000` nodes and weights of the Gauss rules. +Try a million or ten million. -``` -@time gausschebyshev( 100000 ); -0.002681 seconds (9 allocations: 1.526 MB, 228.45% gc time) +```julia +julia> @time gausschebyshev( 100000 ); + 0.001788 seconds (4 allocations: 1.526 MiB) -@time gausslegendre( 100000 ); -0.007110 seconds (17 allocations: 2.671 MB) +julia> @time gausslegendre( 100000 ); + 0.002976 seconds (10 allocations: 2.289 MiB) -@time gaussjacobi( 100000, .9, -.1 ); -1.782347 seconds (20.84 k allocations: 1.611 GB, 22.89% gc time) +julia> @time gaussjacobi( 100000, .9, -.1 ); + 0.894373 seconds (3.59 k allocations: 1.255 GiB, 36.38% gc time) -@time gaussradau( 100000 ); -1.849520 seconds (741.84 k allocations: 1.625 GB, 22.59% gc time) +julia> @time gaussradau( 100000 ); + 0.684122 seconds (3.59 k allocations: 1.256 GiB, 21.71% gc time) -@time gausslobatto( 100000 ); -1.905083 seconds (819.73 k allocations: 1.626 GB, 23.45% gc time) +julia> @time gausslobatto( 100000 ); + 0.748166 seconds (3.57 k allocations: 1.256 GiB, 27.78% gc time) -@time gausslaguerre( 100000 ) -.891567 seconds (115.19 M allocations: 3.540 GB, 3.05% gc time) +julia> @time gausslaguerre( 100000 ); + 0.156867 seconds (7 allocations: 2.292 MiB) -@time gausshermite( 100000 ); -0.249756 seconds (201.22 k allocations: 131.643 MB, 4.92% gc time) +julia> @time gausshermite( 100000 ); + 0.175055 seconds (386 allocations: 67.916 MiB, 9.18% gc time) ``` -The paper [1] computed a billion Gauss-Legendre nodes. So here we will do a billion + 1. -``` -@time gausslegendre( 1000000001 ); -131.392154 seconds (17 allocations: 26.077 GB, 1.17% gc time) +The paper [[1]](http://epubs.siam.org/doi/abs/10.1137/140954969) computed a billion Gauss-Legendre nodes. +So here we will do a billion + 1. +```julia +julia> @time gausslegendre( 1000000001 ); + 24.441304 seconds (10 allocations: 22.352 GiB, 2.08% gc time) ``` (The nodes near the endpoints coalesce in 16-digits of precision.) @@ -56,80 +63,71 @@ There are four kinds of Gauss-Chebyshev quadrature rules, corresponding to four 4. 4th kind, weight function `w(x) = sqrt((1-x)/(1+x))` -They are all have explicit simple formulas for the nodes and weights [4]. +They are all have explicit simple formulas for the nodes and weights [[4]](https://books.google.co.jp/books?id=8FHf0P3to0UC). + ## The algorithm for Gauss-Legendre Gauss quadrature for the weight function `w(x) = 1`. -* For `n<=5`: Use an analytic expression. - -* For `n<=60`: Use Newton's method to solve `Pn(x)=0`. Evaluate `Pn` and `Pn'` by 3-term recurrence. Weights are related to `Pn'`. - -* For `n>60`: Use asymptotic expansions for the Legendre nodes and weights [1]. +* For `n ≤ 5`: Use an analytic expression. +* For `n ≤ 60`: Use Newton's method to solve `Pₙ(x)=0`. Evaluate Legendre polynomials `Pₙ` and their derivatives `Pₙ'` by 3-term recurrence. Weights are related to `Pₙ'`. +* For `n > 60`: Use asymptotic expansions for the Legendre nodes and weights [[1]](http://epubs.siam.org/doi/abs/10.1137/140954969). ## The algorithm for Gauss-Jacobi -Gauss quadrature for the weight functions `w(x) = (1-x)^a(1+x)^b`, `a,b>-1`. - -* For `n<=100`: Use Newton's method to solve `Pn(x)=0`. Evaluate `Pn` and `Pn'` by three-term recurrence. +Gauss quadrature for the weight functions `w(x) = (1-x)^a(1+x)^b`, `a,b > -1`. -* For `n>100`: Use Newton's method to solve `Pn(x)=0`. Evaluate `Pn` and `Pn'` by an asymptotic expansion (in the interior of `[-1,1]`) and the three-term recurrence `O(n^-2)` close to the endpoints. (This is a small modification to the algorithm described in [3].) - -* For `max(a,b)>5`: Use the Golub-Welsch algorithm requiring `O(n^2)` operations. +* For `n ≤ 100`: Use Newton's method to solve `Pₙ(x)=0`. Evaluate `Pₙ` and `Pₙ'` by three-term recurrence. +* For `n > 100`: Use Newton's method to solve `Pₙ(x)=0`. Evaluate `Pₙ` and `Pₙ'` by an asymptotic expansion (in the interior of `[-1,1]`) and the three-term recurrence `O(n^-2)` close to the endpoints. (This is a small modification to the algorithm described in [[3]](http://epubs.siam.org/doi/abs/10.1137/120889873).) +* For `max(a,b) > 5`: Use the Golub-Welsch algorithm requiring `O(n^2)` operations. ## The algorithm for Gauss-Radau Gauss quadrature for the weight function `w(x)=1`, except the endpoint `-1` is included as a quadrature node. -The Gauss-Radau nodes and weights can be computed via the `(0,1)` Gauss-Jacobi nodes and weights [3]. +The Gauss-Radau nodes and weights can be computed via the `(0,1)` Gauss-Jacobi nodes and weights[[3]](http://epubs.siam.org/doi/abs/10.1137/120889873). ## The algorithm for Gauss-Lobatto Gauss quadrature for the weight function `w(x)=1`, except the endpoints `-1` and `1` are included as nodes. -The Gauss-Lobatto nodes and weights can be computed via the `(1,1)` Gauss-Jacobi nodes and weights [3]. +The Gauss-Lobatto nodes and weights can be computed via the `(1,1)` Gauss-Jacobi nodes and weights[[3]](http://epubs.siam.org/doi/abs/10.1137/120889873). ## The algorithm for Gauss-Laguerre Gauss quadrature for the weight function `w(x) = exp(-x)` on `[0,Inf)` -* For `n<128`: Use the Golub-Welsch algorithm. - -* For `method=GLR`: Use the Glaser-Lui-Rohklin algorithm. Evaluate `Ln` and `Ln'` by using Taylor series expansions near roots generated by solving the second-order differential equation that `Ln` satisfies, see [2]. - -* For `n>=128`: Use a Newton procedure on Riemann-Hilbert asymptotics of Laguerre polynomials, see [5], based on [8]. There are some heuristics to decide which expression to use, it allows a general weight `w(x) = x^alpha exp(-q_m x^m)` and this is O(sqrt(n)) when allowed to stop when the weights are below the smallest positive floating point number. +* For `n < 128`: Use the Golub-Welsch algorithm. +* For `method=GLR`: Use the Glaser-Lui-Rohklin algorithm. Evaluate Laguerre polynomials `Lₙ` and their derivatives `Lₙ'` by using Taylor series expansions near roots generated by solving the second-order differential equation that `Lₙ` satisfies, see [[2]](http://epubs.siam.org/doi/pdf/10.1137/06067016X). +* For `n ≥ 128`: Use a Newton procedure on Riemann-Hilbert asymptotics of Laguerre polynomials, see [5], based on [8]. There are some heuristics to decide which expression to use, it allows a general weight `w(x) = x^alpha exp(-q_m x^m)` and this is O(sqrt(n)) when allowed to stop when the weights are below the smallest positive floating point number. ## The algorithm for Gauss-Hermite Gauss quadrature for the weight function `w(x) = exp(-x^2)` on the real line. -* For `n<200`: Use Newton's method to solve `Hn(x)=0`. Evaluate `Hn` and `Hn'` by three-term recurrence. +* For `n < 200`: Use Newton's method to solve `Hₙ(x)=0`. Evaluate Hermite polynomials `Hₙ` and their derivatives `Hₙ'` by three-term recurrence. +* For `n ≥ 200`: Use Newton's method to solve `Hₙ(x)=0`. Evaluate `Hₙ` and `Hₙ'` by a uniform asymptotic expansion, see [[7]](http://arxiv.org/abs/1410.5286). -* For `n>=200`: Use Newton's method to solve `Hn(x)=0`. Evaluate `Hn` and `Hn'` by a uniform asymptotic expansion, see [7]. -* -The paper [7] also derives an `O(n)` algorithm for generalized Gauss-Hermite nodes and weights associated to weight functions of the form `exp(-V(x))`, where `V(x)` is a real polynomial. +The paper [[7]](http://arxiv.org/abs/1410.5286) also derives an `O(n)` algorithm for generalized Gauss-Hermite nodes and weights associated to weight functions of the form `exp(-V(x))`, where `V(x)` is a real polynomial. ## Example usage - - -``` -@time nodes, weights = gausslegendre( 100000 ); -0.007890 seconds (19 allocations: 2.671 MB) +```julia +julia> @time nodes, weights = gausslegendre( 100000 ); + 0.002192 seconds (10 allocations: 2.289 MiB) # integrates f(x) = x^2 from -1 to 1 -@time dot( weights, nodes.^2 ) -0.004264 seconds (7 allocations: 781.484 KB) -0.666666666666666 +julia> @time dot( weights, nodes.^2 ) + 0.000184 seconds (7 allocations: 781.422 KiB) +0.6666666666666665 ``` ## References: -[1] I. Bogaert, "Iteration-free computation of Gauss-Legendre quadrature nodes and weights", SIAM J. Sci. Comput., 36(3), A1008-A1026, 2014. - -[2] A. Glaser, X. Liu, and V. Rokhlin. "A fast algorithm for the calculation of the roots of special functions." SIAM J. Sci. Comput., 29 (2007), 1420-1438. +[1] I. Bogaert, ["Iteration-free computation of Gauss-Legendre quadrature nodes and weights"](http://epubs.siam.org/doi/abs/10.1137/140954969), SIAM J. Sci. Comput., 36(3), A1008-A1026, 2014. + +[2] A. Glaser, X. Liu, and V. Rokhlin. ["A fast algorithm for the calculation of the roots of special functions."](http://epubs.siam.org/doi/pdf/10.1137/06067016X) SIAM J. Sci. Comput., 29 (2007), 1420-1438. -[3] N. Hale and A. Townsend, "Fast and accurate computation of Gauss-Legendre and Gauss-Jacobi quadrature - nodes and weights", SIAM J. Sci. Comput., 2012. +[3] N. Hale and A. Townsend, ["Fast and accurate computation of Gauss-Legendre and Gauss-Jacobi quadrature nodes and weights"](http://epubs.siam.org/doi/abs/10.1137/120889873), SIAM J. Sci. Comput., 2012. -[4] J. C. Mason and D. C. Handscomb, "Chebyshev Polynomials", CRC Press, 2002. +[4] J. C. Mason and D. C. Handscomb, ["Chebyshev Polynomials"](https://books.google.co.jp/books?id=8FHf0P3to0UC), CRC Press, 2002. [5] P. Opsomer, (in preparation). -[6] A. Townsend, The race for high order Gauss-Legendre quadrature, in SIAM News, March 2015. +[6] A. Townsend, [The race for high order Gauss-Legendre quadrature](http://pi.math.cornell.edu/~ajt/papers/QuadratureEssay.pdf), in SIAM News, March 2015. -[7] A. Townsend, T. Trogdon, and S. Olver, "Fast computation of Gauss quadrature nodes and weights on the whole real line", to appear in IMA Numer. Anal., 2014. +[7] A. Townsend, T. Trogdon, and S. Olver, ["Fast computation of Gauss quadrature nodes and weights on the whole real line"](http://arxiv.org/abs/1410.5286), to appear in IMA Numer. Anal., 2014. [8] M. Vanlessen, "Strong asymptotics of Laguerre-Type orthogonal polynomials and applications in Random Matrix Theory", Constr. Approx., 25:125-175, 2007. diff --git a/docs/Project.toml b/docs/Project.toml new file mode 100644 index 0000000..84283cc --- /dev/null +++ b/docs/Project.toml @@ -0,0 +1,3 @@ +[deps] +Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" +FastGaussQuadrature = "442a2c76-b920-505d-bb47-c5924d526838" diff --git a/docs/make.jl b/docs/make.jl new file mode 100644 index 0000000..e1e558a --- /dev/null +++ b/docs/make.jl @@ -0,0 +1,23 @@ +using Documenter +using FastGaussQuadrature + +makedocs(; + modules = [FastGaussQuadrature], + format = Documenter.HTML( + canonical = "https://JuliaApproximation.github.io/FastGaussQuadrature.jl/stable/", + assets = ["assets/favicon.ico"], + ), + pages = [ + "Home" => "index.md", + "Gaussian Quadrature" => "gaussquadrature.md", + "Benchmark" => "benchmark.md", + "Roots of Bessel function" => "besselroots.md", + "References" => "reference.md", + ], + repo = "https://github.com/JuliaApproximation/FastGaussQuadrature.jl/blob/{commit}{path}#L{line}", + sitename = "FastGaussQuadrature.jl", + authors = "Alex Townsend, Sheehan Olver, Peter Opsomer, and contributors.", + assets = String[], +) + +deploydocs(; repo = "github.com/JuliaApproximation/FastGaussQuadrature.jl") diff --git a/docs/src/benchmark.md b/docs/src/benchmark.md new file mode 100644 index 0000000..279bed5 --- /dev/null +++ b/docs/src/benchmark.md @@ -0,0 +1,35 @@ +# Benchmark + +Here we compute `100000` nodes and weights of the Gauss rules. +Try a million or ten million. + +```julia +julia> @time gausschebyshev( 100000 ); + 0.001788 seconds (4 allocations: 1.526 MiB) + +julia> @time gausslegendre( 100000 ); + 0.002976 seconds (10 allocations: 2.289 MiB) + +julia> @time gaussjacobi( 100000, .9, -.1 ); + 0.894373 seconds (3.59 k allocations: 1.255 GiB, 36.38% gc time) + +julia> @time gaussradau( 100000 ); + 0.684122 seconds (3.59 k allocations: 1.256 GiB, 21.71% gc time) + +julia> @time gausslobatto( 100000 ); + 0.748166 seconds (3.57 k allocations: 1.256 GiB, 27.78% gc time) + +julia> @time gausslaguerre( 100000 ); + 0.156867 seconds (7 allocations: 2.292 MiB) + +julia> @time gausshermite( 100000 ); + 0.175055 seconds (386 allocations: 67.916 MiB, 9.18% gc time) +``` + +The paper [[1]](http://epubs.siam.org/doi/abs/10.1137/140954969) computed a billion Gauss-Legendre nodes. +So here we will do a billion + 1. +```julia +julia> @time gausslegendre( 1000000001 ); + 24.441304 seconds (10 allocations: 22.352 GiB, 2.08% gc time) +``` +(The nodes near the endpoints coalesce in 16-digits of precision.) diff --git a/docs/src/besselroots.md b/docs/src/besselroots.md new file mode 100644 index 0000000..f327fc5 --- /dev/null +++ b/docs/src/besselroots.md @@ -0,0 +1,9 @@ +# Roots of Bessel function + +Since [SpecialFunctions.jl](https://github.com/JuliaMath/SpecialFunctions.jl) doesn't have a method to calculate roots of [Bessel function](https://en.wikipedia.org/wiki/Bessel_function), we implemented `besselroots`. + +```@docs +besselroots(ν::Real, n::Integer) +``` + +This method `besselroots` is used to calculate `gaussjacobi` and `gausslaguerre`. diff --git a/docs/src/gaussquadrature.md b/docs/src/gaussquadrature.md new file mode 100644 index 0000000..1a491f4 --- /dev/null +++ b/docs/src/gaussquadrature.md @@ -0,0 +1,55 @@ +# Gaussian Quadrature + + +## Gauss-Legendre quadrature + +```@docs +gausslegendre(n::Integer) +``` + + +## Gauss-Hermite quadrature + +```@docs +gausshermite(n::Integer) +``` + + +## Gauss-Laguerre quadrature + +```@docs +gausslaguerre(n::Integer) +``` + +```@docs +gausslaguerre(n::Integer, α::Real) +``` + + +## Gauss-Chebyshev quadrature + +```@docs +gausschebyshev(n::Integer, kind::Integer) +``` + + +## Gauss-Jacobi quadrature + +```@docs +gaussjacobi(n::Integer, α::Real, β::Real) +``` + + +## Gauss-Radau quadrature + +```@docs +gaussradau(n::Integer) +``` + + +## Gauss-Lobatto quadrature + +```@docs +gausslobatto(n::Integer) +``` + diff --git a/docs/src/index.md b/docs/src/index.md new file mode 100644 index 0000000..8594530 --- /dev/null +++ b/docs/src/index.md @@ -0,0 +1,34 @@ +# FastGaussQuadrature.jl + +## Abstract + +FastGaussQuadrature.jl is a Julia package to compute `n`-point Gauss quadrature nodes and weights to 16-digit accuracy and in `O(n)` time. +So far the package includes `gausschebyshev()`, `gausslegendre()`, `gaussjacobi()`, `gaussradau()`, `gausslobatto()`, `gausslaguerre()`, and `gausshermite()`. +This package is heavily influenced by [Chebfun](http://www.chebfun.org). + +An introduction to Gauss quadrature can be found [here](http://en.wikipedia.org/wiki/Gaussian_quadrature). +For a quirky account on the history of computing Gauss-Legendre quadrature, see [[6]](http://pi.math.cornell.edu/~ajt/papers/QuadratureEssay.pdf). + +## Our Aims + +* The fastest Julia code for Gauss quadrature nodes and weights (without tabulation). +* Change the perception that Gauss quadrature rules are expensive to compute. + +## First example +To check an integral +```math +\int_{-1}^{1} x^4 dx = \frac{2}{5} +``` +by numerically, try following code. +```julia +julia> using FastGaussQuadrature, LinearAlgebra + +julia> x, w = gausslegendre(3); + +julia> f(x) = x^4; + +julia> I = dot(w, f.(x)); + +julia> I ≈ 2/5 +true +``` diff --git a/docs/src/reference.md b/docs/src/reference.md new file mode 100644 index 0000000..312a061 --- /dev/null +++ b/docs/src/reference.md @@ -0,0 +1,17 @@ +# References + +[1] I. Bogaert, ["Iteration-free computation of Gauss-Legendre quadrature nodes and weights"](http://epubs.siam.org/doi/abs/10.1137/140954969), SIAM J. Sci. Comput., 36(3), A1008-A1026, 2014. + +[2] A. Glaser, X. Liu, and V. Rokhlin. ["A fast algorithm for the calculation of the roots of special functions."](http://epubs.siam.org/doi/pdf/10.1137/06067016X) SIAM J. Sci. Comput., 29 (2007), 1420-1438. + +[3] N. Hale and A. Townsend, ["Fast and accurate computation of Gauss-Legendre and Gauss-Jacobi quadrature nodes and weights"](http://epubs.siam.org/doi/abs/10.1137/120889873), SIAM J. Sci. Comput., 2012. + +[4] J. C. Mason and D. C. Handscomb, ["Chebyshev Polynomials"](https://books.google.co.jp/books?id=8FHf0P3to0UC), CRC Press, 2002. + +[5] P. Opsomer, (in preparation). + +[6] A. Townsend, [The race for high order Gauss-Legendre quadrature](http://pi.math.cornell.edu/~ajt/papers/QuadratureEssay.pdf), in SIAM News, March 2015. + +[7] A. Townsend, T. Trogdon, and S. Olver, ["Fast computation of Gauss quadrature nodes and weights on the whole real line"](http://arxiv.org/abs/1410.5286), to appear in IMA Numer. Anal., 2014. + +[8] M. Vanlessen, "Strong asymptotics of Laguerre-Type orthogonal polynomials and applications in Random Matrix Theory", Constr. Approx., 25:125-175, 2007. diff --git a/src/besselroots.jl b/src/besselroots.jl index 6fe8508..a9d84eb 100644 --- a/src/besselroots.jl +++ b/src/besselroots.jl @@ -1,61 +1,80 @@ -function besselroots(nu::Real, n::Integer) +@doc raw""" + gausslegendre(ν::Real, n::Integer) -> Vector{Float64} + +Return the first ``n`` roots of [Bessel function](https://en.wikipedia.org/wiki/Bessel_function). + +```math +J_{\nu}(x) = \sum_{m=0}^{\infty}\frac{(-1)^j}{\Gamma(\nu+j+1)j!} \left(\frac{x}{2}\right)^{2j+\nu} +``` + +# Examples +```jldoctest; setup = :(using FastGaussQuadrature, SpecialFunctions) +julia> ν = 0.3; + +julia> roots = besselroots(ν, 10); + +julia> zeros = (x -> besselj(ν, x)).(roots); + +julia> all(zeros .< 1e-12) +true +``` +""" +function besselroots(ν::Real, n::Integer) # FIXME (related issue #22 and #80) - return besselroots(Float64(nu), n) + return besselroots(Float64(ν), n) end -function besselroots(nu::Float64, n::Integer) -#BESSELROOTS The first N roots of the function J_v(x) - +function besselroots(ν::Float64, n::Integer) # DEVELOPERS NOTES: -# V = 0 --> Full Float64 precision for N <= 20 (Wolfram Alpha), and very -# accurate approximations for N > 20 (McMahon's expansion) -# -1 <= V <= 5 : V ~= 0 --> 12 decimal figures for the 6 first zeros +# ν = 0 --> Full Float64 precision for n ≤ 20 (Wolfram Alpha), and very +# accurate approximations for n > 20 (McMahon's expansion) +# -1 ≤ ν ≤ 5 : ν ~= 0 --> 12 decimal figures for the 6 first zeros # (Piessens's Chebyshev series approximations), and very accurate # approximations for the others (McMahon's expansion) -# V > 5 --> moderately accurate for the 6 first zeros and good +# ν > 5 --> moderately accurate for the 6 first zeros and good # approximations for the others (McMahon's expansion) # This code was originally written by L. L. Peixoto in MATLAB. # Later modified by A. Townsend to work in Julia if n < 0 - throw(DomainError(n, "Input N must be a non-negative integer")) + throw(DomainError(n, "Input n must be a non-negative integer")) end x = zeros(n) - if n > 0 && nu == 0 + if n > 0 && ν == 0 for k in 1:min(n,20) x[k] = J0_roots[k] end for k in min(n,20)+1:n - x[k] = McMahon(nu, k) + x[k] = McMahon(ν, k) end - elseif n > 0 && nu >= -1 && nu <= 5 - correctFirstFew = Piessens( nu ) + elseif n > 0 && ν ≥ -1 && ν ≤ 5 + correctFirstFew = Piessens( ν ) for k in 1:min(n,6) x[k] = correctFirstFew[k] end for k in min(n,6)+1:n - x[k] = McMahon(nu, k) + x[k] = McMahon(ν, k) end - elseif nu > 5 + elseif ν > 5 for k in 1:n - x[k] = McMahon(nu, k) + x[k] = McMahon(ν, k) end end return x end -function McMahon(nu::Real, k::Integer) +function McMahon(ν::Real, k::Integer) # FIXME (related issue #22 and #80) - return McMahon(Float64(nu), k) + return McMahon(Float64(ν), k) end -function McMahon(nu::Float64, k::Integer) +function McMahon(ν::Float64, k::Integer) # McMahon's expansion. This expansion gives very accurate approximation - # for the sth zero (s >= 7) in the whole region V >=- 1, and moderate + # for the sth zero (s ≥ 7) in the whole region ν ≥ -1, and moderate # approximation in other cases. - mu = 4nu^2 + mu = 4ν^2 a1 = 1 / 8 a3 = (7mu-31) / 384 a5 = 4*(3779+mu*(-982+83mu)) / 61440 # Evaluate via Horner's method. @@ -65,7 +84,7 @@ function McMahon(nu::Float64, k::Integer) mu*(-287149133 + 5592657mu))))) / 10463949619200 a13 = 576 *(423748443625564327 + mu*(-100847472093088506 + mu*(8929489333108377 + mu*(-426353946885548+mu*(13172003634537+mu*(-291245357370 + 4148944183mu)))))) / 13059009124761600 - b = 0.25 * (2nu+4k-1)*pi + b = 0.25 * (2ν+4k-1)*π # Evaluate using Horner's scheme: x = b - (mu-1)*( ((((((a13/b^2 + a11)/b^2 + a9)/b^2 + a7)/b^2 + a5)/b^2 + a3)/b^2 + a1)/b) return x @@ -129,17 +148,17 @@ const Piessens_C = [ 0.000000000001 0.000000000002 0 0 0 0] -function Piessens(nu::Float64) +function Piessens(ν::Float64) # Piessens's Chebyshev series approximations (1984). Calculates the 6 first - # zeros to at least 12 decimal figures in region -1 <= V <= 5: + # zeros to at least 12 decimal figures in region -1 ≤ ν ≤ 5: C = Piessens_C T = Array{Float64}(undef,size(C,1)) - pt = (nu-2)/3 + pt = (ν-2)/3 T[1], T[2] = 1., pt for k = 2:size(C,1)-1 T[k+1] = 2*pt*T[k] - T[k-1] end y = C'*T - y[1] *= sqrt(nu+1) # Scale the first root. + y[1] *= sqrt(ν+1) # Scale the first root. return y end diff --git a/src/gausschebyshev.jl b/src/gausschebyshev.jl index cdcf4a1..76d5128 100644 --- a/src/gausschebyshev.jl +++ b/src/gausschebyshev.jl @@ -1,3 +1,91 @@ +@doc raw""" + gausschebyshev(n::Integer) -> Tuple{Vector{Float64},Vector{Float64}} + gausschebyshev(n::Integer, 1) -> Tuple{Vector{Float64},Vector{Float64}} + +Return nodes and weights of [Gauss-Chebyshev quadrature](https://en.wikipedia.org/wiki/Chebyshev%E2%80%93Gauss_quadrature) of the 1st kind. + +```math +\int_{-1}^{1} \frac{f(x)}{\sqrt{1-x^2}} dx \approx \sum_{i=1}^{n} w_i f(x_i) +``` + +# Examples +```jldoctest; setup = :(using FastGaussQuadrature, LinearAlgebra) +julia> x, w = gausschebyshev(3); + +julia> f(x) = x^4; + +julia> I = dot(w, f.(x)); + +julia> I ≈ 3π/8 +true +``` + +--- + + gausschebyshev(n::Integer, 2) -> Tuple{Vector{Float64},Vector{Float64}} + +Return nodes and weights of [Gauss-Chebyshev quadrature](https://en.wikipedia.org/wiki/Chebyshev%E2%80%93Gauss_quadrature) of the 2nd kind. + +```math +\int_{-1}^{1} f(x)\sqrt{1-x^2} dx \approx \sum_{i=1}^{n} w_i f(x_i) +``` + +# Examples +```jldoctest; setup = :(using FastGaussQuadrature, LinearAlgebra) +julia> x, w = gausschebyshev(3, 2); + +julia> f(x) = x^4; + +julia> I = dot(w, f.(x)); + +julia> I ≈ π/16 +true +``` + +--- + + gausschebyshev(n::Integer, 3) -> Tuple{Vector{Float64},Vector{Float64}} + +Return nodes and weights of [Gauss-Chebyshev quadrature](https://en.wikipedia.org/wiki/Chebyshev%E2%80%93Gauss_quadrature) of the 3rd kind. + +```math +\int_{-1}^{1} f(x)\sqrt{1-x^2} dx \approx \sum_{i=1}^{n} w_i f(x_i) +``` + +# Examples +```jldoctest; setup = :(using FastGaussQuadrature, LinearAlgebra) +julia> x, w = gausschebyshev(3, 3); + +julia> f(x) = x^4; + +julia> I = dot(w, f.(x)); + +julia> I ≈ 3π/8 +true +``` + +--- + + gausschebyshev(n::Integer, 4) -> Tuple{Vector{Float64},Vector{Float64}} + +Return nodes and weights of [Gauss-Chebyshev quadrature](https://en.wikipedia.org/wiki/Chebyshev%E2%80%93Gauss_quadrature) of the 4th kind. + +```math +\int_{-1}^{1} f(x)\sqrt{1-x^2} dx \approx \sum_{i=1}^{n} w_i f(x_i) +``` + +# Examples +```jldoctest; setup = :(using FastGaussQuadrature, LinearAlgebra) +julia> x, w = gausschebyshev(3, 4); + +julia> f(x) = x^4; + +julia> I = dot(w, f.(x)); + +julia> I ≈ 3π/8 +true +``` +""" function gausschebyshev(n::Integer, kind::Integer=1) # GAUSS-CHEBYSHEV NODES AND WEIGHTS. @@ -16,7 +104,7 @@ function gausschebyshev(n::Integer, kind::Integer=1) elseif kind == 3 # Gauss-ChebyshevV quadrature, i.e., w(x) = sqrt((1+x)/(1-x)) return ([cos((k - .5) * π / (n + .5)) for k = n:-1:1], - [2π / (n + .5) * cos((k - .5) * π / (2 * (n + .5)))^2 for k = n:-1:1]) + [2π / (n + .5) * cos((k - .5) * π / (2 * (n + .5)))^2 for k = n:-1:1]) elseif kind == 4 # Gauss-ChebyshevW quadrature, i.e., w(x) = sqrt((1-x)/(1+x)) return ([cos(k * π / (n + .5)) for k = n:-1:1], diff --git a/src/gausshermite.jl b/src/gausshermite.jl index 609e901..00d6fdc 100644 --- a/src/gausshermite.jl +++ b/src/gausshermite.jl @@ -1,3 +1,24 @@ +@doc raw""" + gausshermite(n::Integer) -> Tuple{Vector{Float64},Vector{Float64}} + +Return nodes and weights of [Gauss-Hermite quadrature](https://en.wikipedia.org/wiki/Gauss%E2%80%93Hermite_quadrature). + +```math +\int_{-\infty}^{+\infty} f(x) \exp(-x^2) dx \approx \sum_{i=1}^{n} w_i f(x_i) +``` + +# Examples +```jldoctest; setup = :(using FastGaussQuadrature, LinearAlgebra) +julia> x, w = gausshermite(3); + +julia> f(x) = x^4; + +julia> I = dot(w, f.(x)); + +julia> I ≈ 3(√π)/4 +true +``` +""" function gausshermite(n::Integer) x,w = unweightedgausshermite(n) w .*= exp.(-x.^2) @@ -10,11 +31,11 @@ function unweightedgausshermite(n::Integer) elseif n == 0 return Float64[],Float64[] elseif n == 1 - return [0.0],[sqrt(pi)] - elseif n <= 20 + return [0.0],[sqrt(π)] + elseif n ≤ 20 # GW algorithm x = hermpts_gw(n) - elseif n <= 200 + elseif n ≤ 200 # REC algorithm x = hermpts_rec(n) else @@ -38,22 +59,22 @@ end function hermpts_asy(n::Integer) # Compute Hermite nodes and weights using asymptotic formula - x0 = HermiteInitialGuesses(n) # get initial guesses + x0 = HermiteInitialGuesses(n) # get initial guesses t0 = x0./sqrt(2n+1) - theta0 = acos.(t0) # convert to theta-variable + theta0 = acos.(t0) # convert to theta-variable val = x0; for k = 1:20 val = hermpoly_asy_airy(n, theta0); dt = -val[1]./(sqrt(2).*sqrt(2n+1).*val[2].*sin.(theta0)) - theta0 .-= dt; # Newton update + theta0 .-= dt # Newton update if norm(dt,Inf) < sqrt(eps(Float64))/10 break end end t0 = cos.(theta0) - x = sqrt(2n+1)*t0 #back to x-variable + x = sqrt(2n+1)*t0 #back to x-variable w = x.*val[1] .+ sqrt(2).*val[2] - w .= 1 ./ w.^2; # quadrature weights + w .= 1 ./ w.^2 # quadrature weights return x, w end @@ -74,7 +95,7 @@ function hermpts_rec(n::Integer) end end x0 ./= sqrt(2) - w = 1 ./ last.(val).^2 # quadrature weights + w = 1 ./ last.(val).^2 # quadrature weights return x0, w end @@ -90,7 +111,7 @@ function hermpoly_rec(n::Integer, x0) H = x0 for k = 1:n-1 Hold, H = H, (x0*H/sqrt(k+1) - Hold/sqrt(1+1/k)) - while abs(H) ≥ 100 && wc < n # regularise + while abs(H) ≥ 100 && wc < n # regularise H *= w Hold *= w wc += 1 @@ -121,7 +142,7 @@ function hermpoly_rec(r::Base.OneTo, x0) push!(ret, H) for k = 1:n-1 Hold, H = H, (x0*H/sqrt(k+1) - Hold/sqrt(1+1/k)) - while abs(H) ≥ 100 && wc < p # regularise + while abs(H) ≥ 100 && wc < p # regularise ret .*= w H *= w Hold *= w @@ -149,7 +170,7 @@ function hermpoly_asy_airy(n::Integer, theta) eta = 0.5 .* theta .- 0.25 .* sin2T chi = -(3*eta/2).^(2/3) phi = (-chi./sinT.^2).^(1/4) - C = 2*sqrt(pi)*musq^(1/6)*phi + C = 2*sqrt(π)*musq^(1/6)*phi Airy0 = real.(airyai.(musq.^(2/3).*chi)) Airy1 = real.(airyaiprime.(musq.^(2/3).*chi)) @@ -188,7 +209,7 @@ function hermpoly_asy_airy(n::Integer, theta) eta = .5*theta - .25*sin2T chi = -(3*eta/2).^(2/3) phi = (-chi./sinT.^2).^(1/4) - C = sqrt(2*pi)*musq^(1/3)./phi + C = sqrt(2*π)*musq^(1/3)./phi # v polynomials in (12.10.10) v0 = 1; @@ -228,22 +249,22 @@ let T(t) = @. t^(2/3)*(1+5/48*t^(-2)-5/36*t^(-4)+(77125/82944)*t^(-6) -108056875 # rappresentazione asintotica, Ann. Mat. Pura Appl. 26 (1947), pp. 283-300. # Error if n < 20 because initial guesses are based on asymptotic expansions: - @assert n>=20 + @assert n ≥ 20 # Gatteschi formula involving airy roots [1]. # These initial guess are good near x = sqrt(n+1/2); if mod(n,2) == 1 m = (n-1)>>1 - bess = (1:m)*pi + bess = (1:m)*π a = .5 else m = n>>1 - bess = ((0:m-1) .+ 0.5)*pi + bess = ((0:m-1) .+ 0.5)*π a = -.5 end nu = 4*m + 2*a + 2 - airyrts = -T(3/8*pi*(4*(1:m) .- 1)) + airyrts = -T(3/8*π*(4*(1:m) .- 1)) # Roots of Airy function in Float64 # https://mathworld.wolfram.com/AiryFunctionZeros.html @@ -266,11 +287,11 @@ let T(t) = @. t^(2/3)*(1+5/48*t^(-2)-5/36*t^(-4)+(77125/82944)*t^(-6) -108056875 # Tricomi initial guesses. Equation (2.1) in [1]. Originally in [2]. # These initial guesses are good near x = 0 . Note: zeros of besselj(+/-.5,x) - # are integer and half-integer multiples of pi. + # are integer and half-integer multiples of π. # x_init_bess = bess/sqrt(nu).*sqrt((1+ (bess.^2+2*(a^2-1))/3/nu^2) ); - Tnk0 = fill(pi/2,m) + Tnk0 = fill(π/2,m) nu = (4*m+2*a+2) - rhs = ((4*m+3) .- 4*(1:m))/nu*pi + rhs = ((4*m+3) .- 4*(1:m))/nu*π for k = 1:7 val = Tnk0 .- sin.(Tnk0) .- rhs @@ -300,14 +321,14 @@ end function hermpts_gw(n::Integer) - # Golub--Welsch algorithm. Used here for n<=20. + # Golub--Welsch algorithm. Used here for n ≤ 20. - beta = sqrt.((1:n-1)/2) # 3-term recurrence coeffs + beta = sqrt.((1:n-1)/2) # 3-term recurrence coeffs T = SymTridiagonal(zeros(n), beta) # Jacobi matrix - (D, V) = eigen(T) # Eigenvalue decomposition - indx = sortperm(D) # Hermite points - x = D[indx] - w = sqrt(pi)*V[1,indx].^2 # weights + (D, V) = eigen(T) # Eigenvalue decomposition + indx = sortperm(D) # Hermite points + x = D[indx] # nodes + w = sqrt(π)*V[1,indx].^2 # weights # Enforce symmetry: ii = floor(Int, n/2)+1:n diff --git a/src/gaussjacobi.jl b/src/gaussjacobi.jl index d1413f8..d8a2012 100644 --- a/src/gaussjacobi.jl +++ b/src/gaussjacobi.jl @@ -1,41 +1,62 @@ -function gaussjacobi(n::Integer, a::Real, b::Real) +@doc raw""" + gaussjacobi(n::Integer) -> Tuple{Vector{Float64},Vector{Float64}} + +Return nodes and weights of [Gauss-Jacobi quadrature](https://en.wikipedia.org/wiki/Gauss%E2%80%93Jacobi_quadrature). + +```math +\int_{-1}^{1} f(x) (1-x)^\alpha(1+x)^\beta dx \approx \sum_{i=1}^{n} w_i f(x_i) +``` + +# Examples +```jldoctest; setup = :(using FastGaussQuadrature, LinearAlgebra) +julia> x, w = gaussjacobi(3, 1/3, -1/3); + +julia> f(x) = x^4; + +julia> I = dot(w, f.(x)); + +julia> I ≈ 268π/729(√3) +true +``` +""" +function gaussjacobi(n::Integer, α::Real, β::Real) #GAUSS-JACOBI QUADRATURE NODES AND WEIGHTS if n < 0 - throw(DomainError(n, "gaussjacobi($n,$a,$b) not defined: n must be non-negative.")) - elseif a == 0. && b == 0. + throw(DomainError(n, "gaussjacobi($n,$α,$β) not defined: n must be non-negative.")) + elseif α == 0. && β == 0. gausslegendre(n) - elseif a == -0.5 && b == -0.5 + elseif α == -0.5 && β == -0.5 gausschebyshev(n, 1) - elseif a == 0.5 && b == 0.5 + elseif α == 0.5 && β == 0.5 gausschebyshev(n, 2) - elseif a == -0.5 && b == 0.5 + elseif α == -0.5 && β == 0.5 gausschebyshev(n, 3) - elseif a == 0.5 && b == -0.5 + elseif α == 0.5 && β == -0.5 gausschebyshev(n, 4) elseif n == 0 Float64[], Float64[] elseif n == 1 - [(b - a) / (a + b + 2)], [2^(a + b + 1) * beta(a + 1, b + 1)] - elseif min(a,b) <= -1. - throw(DomainError((a,b), "The Jacobi parameters correspond to a nonintegrable weight function")) - elseif n <= 100 && max(a,b) < 5. - jacobi_rec(n, a, b) - elseif n > 100 && max(a,b) < 5. - jacobi_asy(n, a, b) - elseif n <= 4000 && max(a,b)>=5. - jacobi_gw(n, a, b) + [(β - α) / (α + β + 2)], [2^(α + β + 1) * beta(α + 1, β + 1)] + elseif min(α,β) ≤ -1. + throw(DomainError((α,β), "The Jacobi parameters correspond to a nonintegrable weight function")) + elseif n ≤ 100 && max(α,β) < 5. + jacobi_rec(n, α, β) + elseif n > 100 && max(α,β) < 5. + jacobi_asy(n, α, β) + elseif n ≤ 4000 && max(α,β) ≥ 5. + jacobi_gw(n, α, β) else - error("gaussjacobi($n,$a,$b) is not implemented: n must be ≤ 4000 for max(a,b)≥5.") + error("gaussjacobi($n,$α,$β) is not implemented: n must be ≤ 4000 for max(α,β) ≥ 5.") end end # Convenience function: convert any kind of numbers a and b to a joint floating point type -jacobi_rec(n::Integer, a::Real, b::Real) = jacobi_rec(n, promote(float(a), float(b))...) +jacobi_rec(n::Integer, α::Real, β::Real) = jacobi_rec(n, promote(float(α), float(β))...) -function jacobi_rec(n::Integer, a::T, b::T) where {T <: AbstractFloat} +function jacobi_rec(n::Integer, α::T, β::T) where {T <: AbstractFloat} #Compute nodes and weights using recurrrence relation. - x11, x12 = HalfRec(n, a, b, 1) - x21, x22 = HalfRec(n, b, a, 0) + x11, x12 = HalfRec(n, α, β, 1) + x21, x22 = HalfRec(n, β, α, 0) x = Array{T}(undef,n) w = Array{T}(undef,n) @@ -60,24 +81,23 @@ function jacobi_rec(n::Integer, a::T, b::T) where {T <: AbstractFloat} x[idx] = xi sum_w += wi end - c = (2^(a + b + 1) * gamma(2 + a) * - gamma(2 + b) / (gamma(2 + a + b) * (a + 1) * (b + 1))) + c = (2^(α+β+1)*gamma(2+α)*gamma(2+β)/(gamma(2+α+β)*(α+1)*(β+1))) rmul!(w, c / sum_w) return x, w end -function HalfRec(n::Integer, a::T, b::T, flag) where {T <: AbstractFloat} +function HalfRec(n::Integer, α::T, β::T, flag) where {T <: AbstractFloat} # HALFREC Jacobi polynomial recurrence relation. # Asymptotic formula - only valid for positive x. r = (flag == 1) ? (ceil(n / 2):-1:1) : (floor(n / 2):-1:1) m = length(r) - c1 = 1 / (2 * n + a + b + 1) - a1 = one(T)/4 - a^2 - b1 = one(T)/4 - b^2 + c1 = 1 / (2 * n + α + β + 1) + a1 = one(T)/4 - α^2 + b1 = one(T)/4 - β^2 c1² = c1^2 x = Array{T}(undef,m) @inbounds for i in 1:m - C = muladd(2*one(T), r[i], a - one(T)/2) * (T(π) * c1) + C = muladd(2*one(T), r[i], α - one(T)/2) * (T(π) * c1) C_2 = C/2 x[i] = cos(muladd(c1², muladd(-b1, tan(C_2), a1 * cot(C_2)), C)) end @@ -86,7 +106,7 @@ function HalfRec(n::Integer, a::T, b::T, flag) where {T <: AbstractFloat} P2 = Array{T}(undef,m) # Loop until convergence: for _ in 1:10 - innerjacobi_rec!(n, x, a, b, P1, P2) + innerjacobi_rec!(n, x, α, β, P1, P2) dx2 = 0.0 @inbounds for i in 1:m dx = P1[i] / P2[i] @@ -97,28 +117,28 @@ function HalfRec(n::Integer, a::T, b::T, flag) where {T <: AbstractFloat} dx2 > eps(T) / 1e6 || break end # Once more for derivatives: - innerjacobi_rec!(n, x, a, b, P1, P2) + innerjacobi_rec!(n, x, α, β, P1, P2) return x, P2 end -function innerjacobi_rec!(n, x, a::T, b::T, P, PP) where {T <: AbstractFloat} +function innerjacobi_rec!(n, x, α::T, β::T, P, PP) where {T <: AbstractFloat} # EVALUATE JACOBI POLYNOMIALS AND ITS DERIVATIVE USING THREE-TERM RECURRENCE. N = length(x) @inbounds for j = 1:N xj = x[j] - Pj = (a - b + (a + b + 2) * xj)/2 + Pj = (α - β + (α + β + 2) * xj)/2 Pm1 = one(T) - PPj = (a + b + 2)/2 + PPj = (α + β + 2)/2 PPm1 = zero(T) for k in 1:n-1 - k0 = muladd(2*one(T), k, a + b) + k0 = muladd(2*one(T), k, α + β) k1 = k0 + 1 k2 = k0 + 2 - A = 2 * (k + 1) * (k + (a + b + 1)) * k0 - B = k1 * (a^2 - b^2) + A = 2 * (k + 1) * (k + (α + β + 1)) * k0 + B = k1 * (α^2 - β^2) C = k0 * k1 * k2 - D = 2 * (k + a) * (k + b) * k2 + D = 2 * (k + α) * (k + β) * k2 c1 = muladd(C, xj, B) Pm1, Pj = Pj, muladd(-D, Pm1, c1 * Pj) / A PPm1, PPj = PPj, muladd(c1, PPj, muladd(-D, PPm1, C * Pm1)) / A @@ -129,29 +149,29 @@ function innerjacobi_rec!(n, x, a::T, b::T, P, PP) where {T <: AbstractFloat} nothing end -function innerjacobi_rec(n, x, a::T, b::T) where {T <: AbstractFloat} +function innerjacobi_rec(n, x, α::T, β::T) where {T <: AbstractFloat} # EVALUATE JACOBI POLYNOMIALS AND ITS DERIVATIVE USING THREE-TERM RECURRENCE. N = length(x) P = Array{T}(undef,N) PP = Array{T}(undef,N) - innerjacobi_rec!(n, x, a, b, P, PP) + innerjacobi_rec!(n, x, α, β, P, PP) return P, PP end -function weightsConstant(n, a, b) +function weightsConstant(n, α, β) # Compute the constant for weights: M = min(20, n - 1) C = 1.0 - p = -a * b / n + p = -α * β / n for m = 1:M C += p - p *= -(m + a) * (m + b) / (m + 1) / (n - m) + p *= -(m + α) * (m + β) / (m + 1) / (n - m) abs(p / C) < eps(Float64) / 100 && break end - return 2^(a + b + 1) * C + return 2^(α + β + 1) * C end -function jacobi_asy(n, a, b) +function jacobi_asy(n, α, β) # ASY Compute nodes and weights using asymptotic formulae. # Determine switch between interior and boundary regions: @@ -160,45 +180,45 @@ function jacobi_asy(n, a, b) bdyidx2 = nbdy:-1:1 # Interior formula: - x, w = asy1(n, a, b, nbdy) + x, w = asy1(n, α, β, nbdy) # Boundary formula (right): - xbdy = boundary(n, a, b, nbdy) + xbdy = boundary(n, α, β, nbdy) x[bdyidx1], w[bdyidx1] = xbdy # Boundary formula (left): - if a != b - xbdy = boundary(n, b, a, nbdy) + if α ≠ β + xbdy = boundary(n, β, α, nbdy) end x[bdyidx2] = -xbdy[1] w[bdyidx2] = xbdy[2] - rmul!(w, weightsConstant(n, a, b)) + rmul!(w, weightsConstant(n, α, β)) return x, w end -function asy1(n::Integer, a::Float64, b::Float64, nbdy::Integer) +function asy1(n::Integer, α::Float64, β::Float64, nbdy::Integer) # Algorithm for computing nodes and weights in the interior. # Approximate roots via asymptotic formula: (Gatteschi and Pittaluga, 1985) - K = π*(2(n:-1:1).+a.-0.5)/(2n+a+b+1) - tt = K .+ (1/(2n+a+b+1)^2).*((0.25-a^2).*cot.(K/2).-(0.25-b^2).*tan.(K/2)) + K = π*(2(n:-1:1).+α.-0.5)/(2n+α+β+1) + tt = K .+ (1/(2n+α+β+1)^2).*((0.25-α^2).*cot.(K/2).-(0.25-β^2).*tan.(K/2)) # First half (x > 0): - t = tt[tt .<= pi/2] + t = tt[tt .≤ π/2] mint = t[end-nbdy+1] idx = 1:max(findfirst(t .< mint)-1, 1) # Newton iteration for _ in 1:10 - vals, ders = feval_asy1(n, a, b, t, idx) # Evaluate + vals, ders = feval_asy1(n, α, β, t, idx) # Evaluate dt = vals./ders t += dt # Next iterate if norm(dt[idx],Inf) < sqrt(eps(Float64))/100 break end end - vals, ders = feval_asy1(n, a, b, t, idx) # Once more for luck + vals, ders = feval_asy1(n, α, β, t, idx) # Once more for luck t += vals./ders # Store @@ -206,21 +226,21 @@ function asy1(n::Integer, a::Float64, b::Float64, nbdy::Integer) w_right = 1 ./ ders.^2 # Second half (x < 0): - a, b = b, a - t = pi .- tt[1:(n-length(x_right))] + α, β = β, α + t = π .- tt[1:(n-length(x_right))] mint = t[nbdy] idx = max(findfirst(t .> mint), 1):length(t) # Newton iteration for _ in 1:10 - vals, ders = feval_asy1(n, a, b, t, idx) # Evaluate. + vals, ders = feval_asy1(n, α, β, t, idx) # Evaluate. dt = vals./ders # Newton update. t += dt if norm(dt[idx],Inf) < sqrt(eps(Float64))/100 break end end - vals, ders = feval_asy1(n, a, b, t, idx) # Once more for luck. + vals, ders = feval_asy1(n, α, β, t, idx) # Once more for luck. t += vals./ders # Newton update. # Store @@ -235,7 +255,7 @@ Evaluate the interior asymptotic formula at x = cos(t). Assumption: * length(t) == n ÷ 2 """ -function feval_asy1(n::Integer, a::Float64, b::Float64, t::AbstractVector, idx) +function feval_asy1(n::Integer, α::Float64, β::Float64, t::AbstractVector, idx) # Number of terms in the expansion: M = 20 @@ -246,7 +266,7 @@ function feval_asy1(n::Integer, a::Float64, b::Float64, t::AbstractVector, idx) onesM = ones(M) # The sine and cosine terms: - A = repeat((2n+a+b).+(1:M),1,N).*repeat(t',M)/2 .- (a+1/2)*π/2 # M × N matrix + A = repeat((2n+α+β).+(1:M),1,N).*repeat(t',M)/2 .- (α+1/2)*π/2 # M × N matrix cosA = cos.(A) sinA = sin.(A) @@ -259,27 +279,27 @@ function feval_asy1(n::Integer, a::Float64, b::Float64, t::AbstractVector, idx) secT = sec.(t/2)/2 j = 0:M-2 - _vec = @. (0.5+a+j)*(0.5-a+j)/(j+1)/(2n+a+b+j+2) + _vec = @. (0.5+α+j)*(0.5-α+j)/(j+1)/(2n+α+β+j+2) P1 = [1;cumprod(_vec,1)] P1[3:4:end] = -P1[3:4:end] P1[4:4:end] = -P1[4:4:end] P2 = Matrix(1.0I, M, M) for l in 0:M-1 j = 0:M-l-2 - _vec = @. (0.5+b+j)*(0.5-b+j)/(j+1)/(2n+a+b+j+l+2) + _vec = @. (0.5+β+j)*(0.5-β+j)/(j+1)/(2n+α+β+j+l+2) P2[l+1,(l+1).+(1:length(j))] = cumprod(_vec,1) end PHI = repeat(P1,1,M).*P2 j = 0:M-2 - _vec = @. (0.5+a+j)*(0.5-a+j)/(j+1)/(2*(n-1)+a+b+j+2) + _vec = @. (0.5+α+j)*(0.5-α+j)/(j+1)/(2*(n-1)+α+β+j+2) P1 = [1;cumprod(_vec,1)] P1[3:4:end] = -P1[3:4:end] P1[4:4:end] = -P1[4:4:end] P2 = Matrix(1.0I, M, M) for l in 0:M-1 j = 0:M-l-2 - _vec = @. (0.5+b+j)*(0.5-b+j)/(j+1)/(2*(n-1)+a+b+j+l+2) + _vec = @. (0.5+β+j)*(0.5-β+j)/(j+1)/(2*(n-1)+α+β+j+l+2) P2[l+1,(l+1).+(1:length(j))] = cumprod(_vec,1) end PHI2 = repeat(P1,1,M).*P2 @@ -308,24 +328,24 @@ function feval_asy1(n::Integer, a::Float64, b::Float64, t::AbstractVector, idx) end # Constant out the front: - dsa = a^2/2n - dsb = b^2/2n - dsab = (a+b)^2/4n + dsa = α^2/2n + dsb = β^2/2n + dsab = (α+β)^2/4n ds = dsa + dsb - dsab s = ds i = 1 - dsold = ds # to fix a = -b bug. + dsold = ds # to fix α = -β bug. while abs(ds/s)+dsold > eps(Float64)/10 dsold = abs(ds/s) i += 1 tmp = -(i-1)/(i+1)/n - dsa = tmp*dsa*a - dsb = tmp*dsb*b - dsab = tmp*dsab*(a+b)/2 + dsa = tmp*dsa*α + dsb = tmp*dsb*β + dsab = tmp*dsab*(α+β)/2 ds = dsa + dsb - dsab s = s + ds end - p2 = exp(s)*sqrt(2π*(n+a)*(n+b)/(2n+a+b))/(2n+a+b+1) + p2 = exp(s)*sqrt(2π*(n+α)*(n+β)/(2n+α+β))/(2n+α+β+1) # g is a vector of coefficients in # ``\Gamma(z) = \frac{z^{z-1/2}}{\exp(z)}\sqrt{2\pi} \left(\sum_{i} B_i z^{-i}\right)``, where B_{i-1} = g[i]. # https://math.stackexchange.com/questions/1714423/what-is-the-pattern-of-the-stirling-series @@ -335,41 +355,41 @@ function feval_asy1(n::Integer, a::Float64, b::Float64, t::AbstractVector, idx) f(g,z) = sum(g.*[1;cumprod(ones(9)./z)]) # Float constant C, C2 - C = 2*p2*(f(g,n+a)*f(g,n+b)/f(g,2n+a+b))/π - C2 = C*(a+b+2n)*(a+b+1+2n)/(4*(a+n)*(b+n)) + C = 2*p2*(f(g,n+α)*f(g,n+β)/f(g,2n+α+β))/π + C2 = C*(α+β+2n)*(α+β+1+2n)/(4*(α+n)*(β+n)) vals = C*S # Use relation for derivative: - ders = (n*((a-b).-(2n+a+b)*cos.(t)).*vals .+ (2*(n+a)*(n+b)*C2).*S2)./(2n+a+b)./sin.(t) - denom = 1 ./ (sin.(abs.(t)/2).^(a+0.5).*cos.(t/2).^(b+0.5)) + ders = (n*((α-β).-(2n+α+β)*cos.(t)).*vals .+ (2*(n+α)*(n+β)*C2).*S2)./(2n+α+β)./sin.(t) + denom = 1 ./ (sin.(abs.(t)/2).^(α+0.5).*cos.(t/2).^(β+0.5)) vals .*= denom ders .*= denom return vals, ders end -function boundary(n::Integer, a::Float64, b::Float64, npts) +function boundary(n::Integer, α::Float64, β::Float64, npts) # Algorithm for computing nodes and weights near the boundary. # Use Newton iterations to find the first few Bessel roots: smallK = min(30, npts) - jk = besselroots(a, min(npts, smallK)) + jk = besselroots(α, min(npts, smallK)) # Approximate roots via asymptotic formula: (see Olver 1974) - phik = jk/(n + .5*(a + b + 1)) - x = cos.( phik .+ ((a^2-0.25).*(1 .-phik.*cot.(phik))./(8*phik) .- 0.25.*(a^2-b^2).*tan.(0.5.*phik))./(n + 0.5*(a + b + 1))^2 ) + phik = jk/(n + .5*(α + β + 1)) + x = cos.( phik .+ ((α^2-0.25).*(1 .-phik.*cot.(phik))./(8*phik) .- 0.25.*(α^2-β^2).*tan.(0.5.*phik))./(n + 0.5*(α + β + 1))^2 ) # Newton iteration: for _ in 1:10 - vals, ders = innerjacobi_rec(n, x, a, b) # Evaluate via asymptotic formula. - dx = -vals./ders # Newton update. - x += dx # Next iterate. + vals, ders = innerjacobi_rec(n, x, α, β) # Evaluate via asymptotic formula. + dx = -vals./ders # Newton update. + x += dx # Next iterate. if norm(dx,Inf) < sqrt(eps(Float64))/200 break end end - vals, ders = innerjacobi_rec(n, x, a, b); # Evaluate via asymptotic formula. + vals, ders = innerjacobi_rec(n, x, α, β) # Evaluate via asymptotic formula. dx = -vals./ders x += dx @@ -382,30 +402,29 @@ function boundary(n::Integer, a::Float64, b::Float64, npts) return x, w end -function jacobi_jacobimatrix(n, a, b) - s = a + b +function jacobi_jacobimatrix(n, α, β) + s = α + β ii = 2:n-1 si = 2*ii .+ s - aa = [(b - a)/(2 + s); - (b^2 - a^2) ./ ((si .- 2).*si); - (b^2 - a^2) ./ ((2n - 2+s).*(2n+s))] - bb = [2*sqrt( (1 + a)*(1 + b)/(s + 3))/(s + 2) ; - 2 .*sqrt.(ii.*(ii .+ a).*(ii .+ b).*(ii .+ s)./(si.^2 .- 1))./si] + aa = [(β - α)/(2 + s); + (β^2 - α^2) ./ ((si .- 2).*si); + (β^2 - α^2) ./ ((2n - 2+s).*(2n+s))] + bb = [2*sqrt( (1 + α)*(1 + β)/(s + 3))/(s + 2) ; + 2 .*sqrt.(ii.*(ii .+ α).*(ii .+ β).*(ii .+ s)./(si.^2 .- 1))./si] return SymTridiagonal(aa, bb) end -function jacobimoment(a,b) - s = a + b +function jacobimoment(α, β) + s = α + β T = float(typeof(s)) - # Same as 2^(a+b+1) * beta(a+1,b+1) - return exp((s+1)*log(convert(T,2)) + loggamma(a+1)+loggamma(b+1)-loggamma(2+s)) + # Same as 2^(α+β+1) * beta(α+1,β+1) + return exp((s+1)*log(convert(T,2)) + loggamma(α+1)+loggamma(β+1)-loggamma(2+s)) end -function jacobi_gw(n::Integer, a, b) - # Golub-Welsh for Gauss--Jacobi quadrature. This is used when max(a,b)>5. - x, V = eigen(jacobi_jacobimatrix(n,a,b)) # Eigenvalue decomposition. +function jacobi_gw(n::Integer, α, β) + # Golub-Welsh for Gauss--Jacobi quadrature. This is used when max(α,β)>5. + x, V = eigen(jacobi_jacobimatrix(n, α, β)) # Eigenvalue decomposition. # Quadrature weights: - w = V[1,:].^2 .* jacobimoment(a,b) + w = V[1,:].^2 .* jacobimoment(α, β) return x, w end - diff --git a/src/gausslaguerre.jl b/src/gausslaguerre.jl index f7f67ff..8ae35b9 100644 --- a/src/gausslaguerre.jl +++ b/src/gausslaguerre.jl @@ -1,6 +1,49 @@ +@doc raw""" + gausslaguerre(n::Integer) -> Tuple{Vector{Float64},Vector{Float64}} + +Return nodes and weights of [Gauss-Laguerre quadrature](https://en.wikipedia.org/wiki/Gauss%E2%80%93Laguerre_quadrature). + +```math +\int_{0}^{+\infty} f(x) e^{-x} dx \approx \sum_{i=1}^{n} w_i f(x_i) +``` + +# Examples +```jldoctest; setup = :(using FastGaussQuadrature, LinearAlgebra) +julia> x, w = gausslaguerre(3); + +julia> f(x) = x^4; + +julia> I = dot(w, f.(x)); + +julia> I ≈ 24 +true +``` """ -(x,w) = gausslaguerre(n) returns n Gauss-Laguerre nodes and weights. -(x,w) = gausslaguerre(n, alpha) allows generalized Gauss-Laguerre quadrature. +function gausslaguerre(n::Integer) + return gausslaguerre(n, 0.0) +end + + +@doc raw""" + gausslaguerre(n::Integer, α::Real) -> Tuple{Vector{Float64},Vector{Float64}} + +Return nodes and weights of generalized [Gauss-Laguerre quadrature](https://en.wikipedia.org/wiki/Gauss%E2%80%93Laguerre_quadrature). + +```math +\int_{0}^{+\infty} f(x) x^\alpha e^{-x} dx \approx \sum_{i=1}^{n} w_i f(x_i) +``` + +# Examples +```jldoctest; setup = :(using FastGaussQuadrature, LinearAlgebra) +julia> x, w = gausslaguerre(3, 1.0); + +julia> f(x) = x^4; + +julia> I = dot(w, f.(x)); + +julia> I ≈ 120 +true +``` Optionally, a reduced quadrature rule can be computed. In that case, only those points and weights are computed for which the weight does not underflow in the @@ -15,38 +58,38 @@ user can manually invoke the following routines: using the recurrence relation - `gausslaguerre_asy`: the asymptotic expansions """ -function gausslaguerre(n::Integer, alpha = 0.0; reduced = false) - if alpha <= -1 - throw(DomainError(alpha, "The Laguerre parameter α <= -1 corresponds to a nonintegrable weight function")) +function gausslaguerre(n::Integer, α::Real; reduced = false) + if α ≤ -1 + throw(DomainError(α, "The Laguerre parameter α ≤ -1 corresponds to a nonintegrable weight function")) end if n < 0 - throw(DomainError(n, "gausslaguerre($n,$alpha) not defined: n must be positive.")) + throw(DomainError(n, "gausslaguerre($n,$α) not defined: n must be positive.")) end - # Guess the numerical type from the supplied type of alpha + # Guess the numerical type from the supplied type of α # Although the code is generic, the heuristics are derived for Float64 precision - T = typeof(float(alpha)) + T = typeof(float(α)) if n == 0 T[],T[] elseif n == 1 - [1+alpha], [gamma(1+alpha)] + [1+α], [gamma(1+α)] elseif n == 2 - [alpha + 2-sqrt(alpha+2),alpha+2+sqrt(alpha+2)], - [((alpha-sqrt(alpha+2)+2)*gamma(alpha+2))/(2*(alpha+2)*(sqrt(alpha+2)-1)^2), - ((alpha+sqrt(alpha+2)+2)*gamma(alpha+2))/(2*(alpha+2)*(sqrt(alpha+2)+1)^2)] + [α + 2-sqrt(α+2),α+2+sqrt(α+2)], + [((α-sqrt(α+2)+2)*gamma(α+2))/(2*(α+2)*(sqrt(α+2)-1)^2), + ((α+sqrt(α+2)+2)*gamma(α+2))/(2*(α+2)*(sqrt(α+2)+1)^2)] elseif n < 15 # Use Golub-Welsch for small n - gausslaguerre_GW(n, alpha) + gausslaguerre_GW(n, α) elseif n < 128 # Use the recurrence relation for moderate n - gausslaguerre_rec(n, alpha) + gausslaguerre_rec(n, α) else # Use explicit asymptotic expansions for larger n - # The restriction to alpha comes from the restriction on nu in besselroots - if alpha < 5 - gausslaguerre_asy(n, alpha, reduced=reduced, T=-1, recompute=true) + # The restriction to α comes from the restriction on nu in besselroots + if α < 5 + gausslaguerre_asy(n, α, reduced=reduced, T=-1, recompute=true) else - gausslaguerre_rec(n, alpha) + gausslaguerre_rec(n, α) end end end @@ -74,31 +117,31 @@ depending on the size of the terms in the expansion the point and weight are recomputed using the (slower) recursion+newton approach, yielding more reliable accurate results. """ -function gausslaguerre_asy(n::Integer, alpha; +function gausslaguerre_asy(n::Integer, α; reduced = false, - T = max(1, ceil(Int, 50/log(n))), # Heuristic for number of terms + T = max(1, ceil(Int, 50/log(n))), # Heuristic for number of terms recompute = false) - if alpha^2/n > 1 - @warn "A large value of alpha may lead to inaccurate results." + if α^2/n > 1 + @warn "A large value of α may lead to inaccurate results." end - ELT = typeof(float(alpha)) + ELT = typeof(float(α)) n_alloc = reduced ? 0 : n x = zeros(ELT, n_alloc) w = zeros(ELT, n_alloc) # The expansions are given in powers of 1/(4n+2α+2) - d = one(ELT)/(4n+2alpha+2) + d = one(ELT)/(4n+2α+2) # Heuristical indices for Bessel and Airy regions k_bessel = max(ceil(Int, sqrt(n) ), 7) k_airy = floor(Int, 0.9*n) # The Bessel region - # First compute the roots of the Bessel function of order alpha - jak_vector = besselroots(alpha, k_bessel) + # First compute the roots of the Bessel function of order α + jak_vector = besselroots(α, k_bessel) bessel_wins = true k = 0 @@ -106,10 +149,10 @@ function gausslaguerre_asy(n::Integer, alpha; k += 1 # We iterate until the estimated error of the bulk expansion is smaller # than the one of the Bessel expansion - jak = (k < k_bessel) ? jak_vector[k] : jak = McMahon(alpha, k) + jak = (k < k_bessel) ? jak_vector[k] : jak = McMahon(α, k) - xk, wk, δ_bessel = gausslaguerre_asy_bessel(n, alpha, jak, d, T) - xkb, wkb, δ_bulk = gausslaguerre_asy_bulk(n, alpha, k, d, T) + xk, wk, δ_bessel = gausslaguerre_asy_bessel(n, α, jak, d, T) + xkb, wkb, δ_bulk = gausslaguerre_asy_bulk(n, α, k, d, T) if δ_bulk < δ_bessel bessel_wins = false xk = xkb @@ -118,7 +161,7 @@ function gausslaguerre_asy(n::Integer, alpha; if recompute δ = min(δ_bessel,δ_bulk) if δ > 1e-13 - xk_rec, wk_rec = gl_rec_newton(xk, n, alpha) + xk_rec, wk_rec = gl_rec_newton(xk, n, α) if abs(xk_rec-xk) < 100δ xk = xk_rec wk = wk_rec @@ -140,10 +183,10 @@ function gausslaguerre_asy(n::Integer, alpha; # - First we go from where we left of to our heuristic while k < k_airy-1 k += 1 - xk, wk, δ_bulk = gausslaguerre_asy_bulk(n, alpha, k, d, T) + xk, wk, δ_bulk = gausslaguerre_asy_bulk(n, α, k, d, T) if recompute if δ_bulk > 1e-13 - xk_rec, wk_rec = gl_rec_newton(xk, n, alpha) + xk_rec, wk_rec = gl_rec_newton(xk, n, α) if abs(xk_rec-xk) < 100δ_bulk xk = xk_rec wk = wk_rec @@ -165,8 +208,8 @@ function gausslaguerre_asy(n::Integer, alpha; bulk_wins = true while bulk_wins && k < n k += 1 - xk, wk, δ_bulk = gausslaguerre_asy_bulk(n, alpha, k, d, T) - xka, wka, δ_airy = gausslaguerre_asy_airy(n, alpha, k, d, T) + xk, wk, δ_bulk = gausslaguerre_asy_bulk(n, α, k, d, T) + xka, wka, δ_airy = gausslaguerre_asy_airy(n, α, k, d, T) if δ_airy < δ_bulk bulk_wins = false xk = xka @@ -175,7 +218,7 @@ function gausslaguerre_asy(n::Integer, alpha; if recompute δ = min(δ_airy,δ_bulk) if δ > 1e-13 - xk_rec, wk_rec = gl_rec_newton(xk, n, alpha) + xk_rec, wk_rec = gl_rec_newton(xk, n, α) if abs(xk_rec-xk) < 100δ xk = xk_rec wk = wk_rec @@ -196,10 +239,10 @@ function gausslaguerre_asy(n::Integer, alpha; # The Airy region while k < n k += 1 - xk, wk, δ_airy = gausslaguerre_asy_airy(n, alpha, k, d, T) + xk, wk, δ_airy = gausslaguerre_asy_airy(n, α, k, d, T) if recompute if δ_airy > 1e-13 - xk_rec, wk_rec = gl_rec_newton(xk, n, alpha) + xk_rec, wk_rec = gl_rec_newton(xk, n, α) if abs(xk_rec-xk) < 100δ_airy xk = xk_rec wk = wk_rec @@ -218,7 +261,7 @@ function gausslaguerre_asy(n::Integer, alpha; end # Sanity check - if ( minimum(x) < 0.0 ) || ( maximum(x) > 4*n + 2*alpha + 2 ) || ( minimum(diff(x)) <= 0.0 ) || (minimum(w) < 0.0) + if ( minimum(x) < 0.0 ) || ( maximum(x) > 4*n + 2*α + 2 ) || ( minimum(diff(x)) ≤ 0.0 ) || (minimum(w) < 0.0) @warn "Unexpected inconsistency in the computation of nodes and weights" end @@ -229,45 +272,45 @@ end # These are explicit formulas of the coefficients, up to a simple postprocessing # that is common to all factors and not included here (see below). # -# General expressions are given in terms of alpha, more specific expressions -# follow for the special case alpha = 0. +# General expressions are given in terms of α, more specific expressions +# follow for the special case α = 0. ## The bulk # Note: there is always one division by an integer, placed such that it preserves the type of `d` -gl_bulk_x3(t, d, alpha) = -(12*alpha^2 + 5*(1-t)^(-2) - 4*(1-t)^(-1) - 4) * d / 12 -gl_bulk_x5(t, d, alpha) = d^3*(1-t)/t/720*(1600*(1-t)^(-6) - 3815*(1-t)^(-5) - + 480*alpha^4 +2814*(1-t)^(-4) - 576*(1-t)^(-3) - 960*alpha^2 - 48*(15*alpha^4 - 30*alpha^2 + 7)*(1-t)^(-1) +gl_bulk_x3(t, d, α) = -(12*α^2 + 5*(1-t)^(-2) - 4*(1-t)^(-1) - 4) * d / 12 +gl_bulk_x5(t, d, α) = d^3*(1-t)/t/720*(1600*(1-t)^(-6) - 3815*(1-t)^(-5) + + 480*α^4 +2814*(1-t)^(-4) - 576*(1-t)^(-3) - 960*α^2 - 48*(15*α^4 - 30*α^2 + 7)*(1-t)^(-1) -16*(1-t)^(-2) + 224) -gl_bulk_x7(t, d, alpha) = -d^5/181440*(1-t)^2/t^2*(10797500*(1-t)^(-10) - 43122800*(1-t)^(-9) - + 66424575*(1-t)^(-8) -48469876*(1-t)^(-7) + 193536*alpha^6 + 16131880*(1-t)^(-6) - + 80*(315*alpha^4 - 630*alpha^2 -221)*(1-t)^(-4) - 1727136*(1-t)^(-5) - - 967680*alpha^4 - 320*(63*alpha^4 - 126*alpha^2 +43)*(1-t)^(-3) - + 384*(945*alpha^6 - 4620*alpha^4 + 6405*alpha^2 - 1346)*(1-t)^(-2) - + 1354752*alpha^2 - 23040*(21*alpha^6 - 105*alpha^4 + 147*alpha^2 - 31)*(1-t)^(-1) -285696) -gl_bulk_x9(t, d, alpha) = d^7/10886400*(1-t)^3/t^3*(43222750000*(1-t)^(-14) - 241928673000*(1-t)^(-13) +gl_bulk_x7(t, d, α) = -d^5/181440*(1-t)^2/t^2*(10797500*(1-t)^(-10) - 43122800*(1-t)^(-9) + + 66424575*(1-t)^(-8) -48469876*(1-t)^(-7) + 193536*α^6 + 16131880*(1-t)^(-6) + + 80*(315*α^4 - 630*α^2 -221)*(1-t)^(-4) - 1727136*(1-t)^(-5) + - 967680*α^4 - 320*(63*α^4 - 126*α^2 +43)*(1-t)^(-3) + + 384*(945*α^6 - 4620*α^4 + 6405*α^2 - 1346)*(1-t)^(-2) + + 1354752*α^2 - 23040*(21*α^6 - 105*α^4 + 147*α^2 - 31)*(1-t)^(-1) -285696) +gl_bulk_x9(t, d, α) = d^7/10886400*(1-t)^3/t^3*(43222750000*(1-t)^(-14) - 241928673000*(1-t)^(-13) + 566519158800*(1-t)^(-12) -714465642135*(1-t)^(-11) + 518401904799*(1-t)^(-10) - + 672*(12000*alpha^4 - 24000*alpha^2 +64957561)*(1-t)^(-8) - 212307298152*(1-t)^(-9) - + 24883200*alpha^8 - 192*(103425*alpha^4 -206850*alpha^2 + 15948182)*(1-t)^(-7) - + 3360*(4521*alpha^4 - 9042*alpha^2 - 7823)*(1-t)^(-6) -232243200*alpha^6 - - 1792*(3375*alpha^6 - 13905*alpha^4 + 17685*alpha^2 - 1598)*(1-t)^(-5) - + 16128*(450*alpha^6 - 2155*alpha^4 + 2960*alpha^2 - 641)*(1-t)^(-4) - + 812851200*alpha^4 -768*(70875*alpha^8 - 631260*alpha^6 + 2163630*alpha^4 - - 2716980*alpha^2 +555239)*(1-t)^(-3) + 768*(143325*alpha^8 - 1324260*alpha^6 - + 4613070*alpha^4 -5826660*alpha^2 + 1193053)*(1-t)^(-2) - 1028505600*alpha^2 - - 5806080*(15*alpha^8 -140*alpha^6 + 490*alpha^4 - 620*alpha^2 + 127)*(1-t)^(-1) + 210677760) - -gl_bulk_w3(t, d, alpha) = d^2/6*(2*t + 3)/(t-1)^3 -gl_bulk_w5(t, d, alpha) = (1-t)^2/720/t^2*d^4*(8000*(1-t)^(-8) - 24860*(1-t)^(-7) + 27517*(1-t)^(-6) - - 12408*(1-t)^(-5) + 1712*(1-t)^(-4) +16*(15*alpha^4 - 30*alpha^2 + 7)*(1-t)^(-2) + 32*(1-t)^(-3)) -gl_bulk_w7(t, d, alpha) = -(1-t)^3/90720/t^3*d^6*(43190000*(1-t)^(-12) -204917300*(1-t)^(-11) + + 672*(12000*α^4 - 24000*α^2 +64957561)*(1-t)^(-8) - 212307298152*(1-t)^(-9) + + 24883200*α^8 - 192*(103425*α^4 -206850*α^2 + 15948182)*(1-t)^(-7) + + 3360*(4521*α^4 - 9042*α^2 - 7823)*(1-t)^(-6) -232243200*α^6 + - 1792*(3375*α^6 - 13905*α^4 + 17685*α^2 - 1598)*(1-t)^(-5) + + 16128*(450*α^6 - 2155*α^4 + 2960*α^2 - 641)*(1-t)^(-4) + + 812851200*α^4 -768*(70875*α^8 - 631260*α^6 + 2163630*α^4 + - 2716980*α^2 +555239)*(1-t)^(-3) + 768*(143325*α^8 - 1324260*α^6 + + 4613070*α^4 -5826660*α^2 + 1193053)*(1-t)^(-2) - 1028505600*α^2 + - 5806080*(15*α^8 -140*α^6 + 490*α^4 - 620*α^2 + 127)*(1-t)^(-1) + 210677760) + +gl_bulk_w3(t, d, α) = d^2/6*(2*t + 3)/(t-1)^3 +gl_bulk_w5(t, d, α) = (1-t)^2/720/t^2*d^4*(8000*(1-t)^(-8) - 24860*(1-t)^(-7) + 27517*(1-t)^(-6) + - 12408*(1-t)^(-5) + 1712*(1-t)^(-4) +16*(15*α^4 - 30*α^2 + 7)*(1-t)^(-2) + 32*(1-t)^(-3)) +gl_bulk_w7(t, d, α) = -(1-t)^3/90720/t^3*d^6*(43190000*(1-t)^(-12) -204917300*(1-t)^(-11) + 393326325*(1-t)^(-10) - 386872990*(1-t)^(-9) + 201908326*(1-t)^(-8) - + 80*(315*alpha^4 - 630*alpha^2 + 53752)*(1-t)^(-6) - 50986344*(1-t)^(-7) - - 320*(189*alpha^4 -378*alpha^2 - 89)*(1-t)^(-5) + 480*(63*alpha^4 - 126*alpha^2 - + 43)*(1-t)^(-4) -384*(315*alpha^6 - 1470*alpha^4 + 1995*alpha^2 - 416)*(1-t)^(-3) - + 2304*(21*alpha^6 -105*alpha^4 + 147*alpha^2 - 31)*(1-t)^(-2) ) + + 80*(315*α^4 - 630*α^2 + 53752)*(1-t)^(-6) - 50986344*(1-t)^(-7) + - 320*(189*α^4 -378*α^2 - 89)*(1-t)^(-5) + 480*(63*α^4 - 126*α^2 + + 43)*(1-t)^(-4) -384*(315*α^6 - 1470*α^4 + 1995*α^2 - 416)*(1-t)^(-3) + + 2304*(21*α^6 -105*α^4 + 147*α^2 - 31)*(1-t)^(-2) ) -# And for alpha = 0 +# And for α = 0 gl_bulk_x3(t, d) = -d/12*(5*(1-t)^(-2) - 4*(1-t)^(-1) - 4) gl_bulk_x5(t, d) = d^3*(1-t)/t/720*(1600*(1-t)^(-6) - 3815*(1-t)^(-5) + 2814*(1-t)^(-4) - 576*(1-t)^(-3) - 48*7*(1-t)^(-1) -16*(1-t)^(-2) + 224) @@ -295,24 +338,24 @@ gl_bulk_w7(t, d) = -(1-t)^3/90720/t^3*d^6*(43190000*(1-t)^(-12) ## The hard edge (Bessel region) -gl_bessel_x3(jak, d, alpha) = (jak^2 + 2*alpha^2 - 2)*d^2 / 3 -gl_bessel_x5(jak, d, alpha) = (11*jak^4 +3*jak^2*(11*alpha^2-19) +46*alpha^4 -140*alpha^2 +94)*d^4 / 45 -gl_bessel_x7(jak, d, alpha) = (657*jak^6 +36*jak^4*(73*alpha^2-181) +2*jak^2*(2459*alpha^4 -10750*alpha^2 +14051) - + 4*(1493*alpha^6 -9303*alpha^4 +19887*alpha^2 - 12077) )*d^6 / 2835 -gl_bessel_x9(jak, d, alpha) = (10644*jak^8 + 60*(887*alpha^2 - 2879)*jak^6 + (125671*alpha^4 -729422*alpha^2 + 1456807)*jak^4 - + 3*(63299*alpha^6 - 507801*alpha^4 + 1678761*alpha^2 - 2201939)*jak^2 + 2*(107959*alpha^8 - - 1146220*alpha^6 + 5095482*alpha^4 -10087180*alpha^2 + 6029959) )*d^8 / 42525 - -gl_bessel_w3(jak, d, alpha) = (alpha^2 + jak^2 -1)*2*d^2 / 3 -gl_bessel_w5(jak, d, alpha) = (46*alpha^4 + 33*jak^4 +6*jak^2*(11*alpha^2 -19) -140*alpha^2 +94)*d^4 / 45 -gl_bessel_w7(jak, d, alpha) = (1493*alpha^6 + 657*jak^6 + 27*(73*alpha^2 - 181)*jak^4 - 9303*alpha^4 - + (2459*alpha^4 -10750*alpha^2 + 14051)*jak^2 + 19887*alpha^2 - 12077)*4*d^6 / 2835 -gl_bessel_w9(jak, d, alpha) = (215918*alpha^8 + 53220*jak^8 + 240*(887*alpha^2 - 2879)*jak^6 -2292440*alpha^6 + - 3*(125671*alpha^4 - 729422*alpha^2 + 1456807)*jak^4 + 10190964*alpha^4 + - 6*(63299*alpha^6 - 507801*alpha^4 + 1678761*alpha^2 -2201939)*jak^2 - - 20174360*alpha^2 + 12059918)*d^8 / 42525 - -# And for alpha = 0: +gl_bessel_x3(jak, d, α) = (jak^2 + 2*α^2 - 2)*d^2 / 3 +gl_bessel_x5(jak, d, α) = (11*jak^4 +3*jak^2*(11*α^2-19) +46*α^4 -140*α^2 +94)*d^4 / 45 +gl_bessel_x7(jak, d, α) = (657*jak^6 +36*jak^4*(73*α^2-181) +2*jak^2*(2459*α^4 -10750*α^2 +14051) + + 4*(1493*α^6 -9303*α^4 +19887*α^2 - 12077) )*d^6 / 2835 +gl_bessel_x9(jak, d, α) = (10644*jak^8 + 60*(887*α^2 - 2879)*jak^6 + (125671*α^4 -729422*α^2 + 1456807)*jak^4 + + 3*(63299*α^6 - 507801*α^4 + 1678761*α^2 - 2201939)*jak^2 + 2*(107959*α^8 + - 1146220*α^6 + 5095482*α^4 -10087180*α^2 + 6029959) )*d^8 / 42525 + +gl_bessel_w3(jak, d, α) = (α^2 + jak^2 -1)*2*d^2 / 3 +gl_bessel_w5(jak, d, α) = (46*α^4 + 33*jak^4 +6*jak^2*(11*α^2 -19) -140*α^2 +94)*d^4 / 45 +gl_bessel_w7(jak, d, α) = (1493*α^6 + 657*jak^6 + 27*(73*α^2 - 181)*jak^4 - 9303*α^4 + + (2459*α^4 -10750*α^2 + 14051)*jak^2 + 19887*α^2 - 12077)*4*d^6 / 2835 +gl_bessel_w9(jak, d, α) = (215918*α^8 + 53220*jak^8 + 240*(887*α^2 - 2879)*jak^6 -2292440*α^6 + + 3*(125671*α^4 - 729422*α^2 + 1456807)*jak^4 + 10190964*α^4 + + 6*(63299*α^6 - 507801*α^4 + 1678761*α^2 -2201939)*jak^2 - + 20174360*α^2 + 12059918)*d^8 / 42525 + +# And for α = 0: gl_bessel_x3(jak, d) = (jak^2 - 2)*d^2 / 3 gl_bessel_x5(jak, d) = (11*jak^4 - 57*jak^2 + 94)d^4 / 45 gl_bessel_x7(jak, d) = (657*jak^6 - 6516*jak^4 + 28102*jak^2 - 48308)*d^6 / 2835 @@ -328,9 +371,9 @@ gl_bessel_w11(jak, d) = (1231947*jak^10 - 24770655*jak^8 + 277804122*jak^6 - 187 ## The soft edge (Airy region) -gl_airy_x1(ak, d, alpha) = 1/d + ak*(d/4)^(-1/3) -gl_airy_x3(ak, d, alpha) = ak^2*(d*16)^(1/3)/5 + (11/35-alpha^2-12/175*ak^3)*d + (16/1575*ak+92/7875*ak^4)*2^(2/3)*d^(5/3) -gl_airy_x5(ak, d, alpha) = -(15152/3031875*ak^5+1088/121275*ak^2)*2^(1/3)*d^(7/3) +gl_airy_x1(ak, d, α) = 1/d + ak*(d/4)^(-1/3) +gl_airy_x3(ak, d, α) = ak^2*(d*16)^(1/3)/5 + (11/35-α^2-12/175*ak^3)*d + (16/1575*ak+92/7875*ak^4)*2^(2/3)*d^(5/3) +gl_airy_x5(ak, d, α) = -(15152/3031875*ak^5+1088/121275*ak^2)*2^(1/3)*d^(7/3) gl_airy_x1(ak, d) = 1/d + ak*(d/4)^(-1/3) gl_airy_x3(ak, d) = ak^2*(d*16)^(1/3)/5 + (11/35-12/175*ak^3)*d + (16/1575*ak+92/7875*ak^4)*2^(2/3)*d^(5/3) @@ -367,13 +410,13 @@ end function gl_bulk_solve_t(n, k, d) T = typeof(d) pt = (4n-4k+3)*d - t = T(pi)^2/16*(pt-1)^2 + t = T(π)^2/16*(pt-1)^2 diff = 100 iter = 0 maxiter = 20 while (abs(diff) > 100eps(T)) && (iter < maxiter) iter += 1 - diff = (pt*pi +2*sqrt(t-t^2) -acos(2*t-1) )*sqrt(t/(1-t))/2 + diff = (pt*π +2*sqrt(t-t^2) -acos(2*t-1) )*sqrt(t/(1-t))/2 t -= diff end if iter == maxiter @@ -382,19 +425,19 @@ function gl_bulk_solve_t(n, k, d) t end -function gausslaguerre_asy_bulk(n, alpha, k, d, T) - if alpha == 0 +function gausslaguerre_asy_bulk(n, α, k, d, T) + if α == 0 return gausslaguerre_asy0_bulk(n, k, d, T) end t = gl_bulk_solve_t(n, k, d) - x3 = gl_bulk_x3(t, d, alpha) - x5 = gl_bulk_x5(t, d, alpha) - x7 = gl_bulk_x7(t, d, alpha) - x9 = gl_bulk_x9(t, d, alpha) - w3 = gl_bulk_w3(t, d, alpha) - w5 = gl_bulk_w5(t, d, alpha) - w7 = gl_bulk_w7(t, d, alpha) + x3 = gl_bulk_x3(t, d, α) + x5 = gl_bulk_x5(t, d, α) + x7 = gl_bulk_x7(t, d, α) + x9 = gl_bulk_x9(t, d, α) + w3 = gl_bulk_w3(t, d, α) + w5 = gl_bulk_w5(t, d, α) + w7 = gl_bulk_w7(t, d, α) xs = (x3, x5, x7, x9) ws = (w3, w5, w7) @@ -404,7 +447,7 @@ function gausslaguerre_asy_bulk(n, alpha, k, d, T) xk += t/d - wfactor = xk^alpha * exp(-xk) * 2pi * sqrt(t/(1-t)) + wfactor = xk^α * exp(-xk) * 2π * sqrt(t/(1-t)) wk = wfactor * (1+wk) wdelta *= wfactor @@ -430,7 +473,7 @@ function gausslaguerre_asy0_bulk(n, k, d, T) xk += t/d - wfactor = exp(-xk) * 2pi * sqrt(t/(1-t)) + wfactor = exp(-xk) * 2π * sqrt(t/(1-t)) wk = wfactor * (1+wk) wdelta *= wfactor @@ -438,18 +481,18 @@ function gausslaguerre_asy0_bulk(n, k, d, T) end -function gausslaguerre_asy_bessel(n, alpha, jak, d, T) - if alpha == 0 +function gausslaguerre_asy_bessel(n, α, jak, d, T) + if α == 0 return gausslaguerre_asy0_bessel(n, jak, d, T) end - x3 = gl_bessel_x3(jak, d, alpha) - x5 = gl_bessel_x5(jak, d, alpha) - x7 = gl_bessel_x7(jak, d, alpha) - x9 = gl_bessel_x9(jak, d, alpha) - w3 = gl_bessel_w3(jak, d, alpha) - w5 = gl_bessel_w5(jak, d, alpha) - w7 = gl_bessel_w7(jak, d, alpha) - w9 = gl_bessel_w9(jak, d, alpha) + x3 = gl_bessel_x3(jak, d, α) + x5 = gl_bessel_x5(jak, d, α) + x7 = gl_bessel_x7(jak, d, α) + x9 = gl_bessel_x9(jak, d, α) + w3 = gl_bessel_w3(jak, d, α) + w5 = gl_bessel_w5(jak, d, α) + w7 = gl_bessel_w7(jak, d, α) + w9 = gl_bessel_w9(jak, d, α) xs = (x3, x5, x7, x9) ws = (w3, w5, w7, w9) @@ -463,7 +506,7 @@ function gausslaguerre_asy_bessel(n, alpha, jak, d, T) # Invoking the besselj function below is the cause of memory # allocation of this routine - wfactor = 4d * xk^alpha * exp(-xk) / besselj(alpha-1, jak)^2 + wfactor = 4d * xk^α * exp(-xk) / besselj(α-1, jak)^2 wk = wfactor * (1 + wk) wdelta *= wfactor @@ -501,30 +544,30 @@ end function compute_airy_root(n, k) index = n-k+1 - if index <= 11 + if index ≤ 11 ak = airy_roots[index] else - t = 3 * pi/2 * (index-0.25) + t = 3 * π/2 * (index-0.25) ak = -t^(2/3)*(1 + 5/48/t^2 - 5/36/t^4 + 77125/82944/t^6 -10856875/6967296/t^8) end ak end -function gausslaguerre_asy_airy(n, alpha, k, d, T) - if alpha == 0 +function gausslaguerre_asy_airy(n, α, k, d, T) + if α == 0 return gausslaguerre_asy0_airy(n, k, d, T) end ak = compute_airy_root(n, k) - x1 = gl_airy_x1(ak, d, alpha) - x3 = gl_airy_x3(ak, d, alpha) - x5 = gl_airy_x5(ak, d, alpha) + x1 = gl_airy_x1(ak, d, α) + x3 = gl_airy_x3(ak, d, α) + x5 = gl_airy_x5(ak, d, α) xs = (x1, x3, x5) xk, xdelta = (T > 0) ? sum_explicit(xs, (T+1)>>1) : sum_adaptive(xs) - wk = 4^(1/3)*xk^(alpha+1/3)*exp(-xk)/(airyaiprime(ak))^2 + wk = 4^(1/3)*xk^(α+1/3)*exp(-xk)/(airyaiprime(ak))^2 wdelta = abs(wk) return xk, wk, max(xdelta,wdelta) @@ -548,25 +591,23 @@ function gausslaguerre_asy0_airy(n, k, d, T) end - - """ Calculate Gauss-Laguerre nodes and weights from the eigenvalue decomposition of the Jacobi matrix. """ function gausslaguerre_GW(n, α) - _α = 2*(1:n) .+ (α-1) # 3-term recurrence coeffs a and b + _α = 2*(1:n) .+ (α-1) # 3-term recurrence coeffs a and b β = sqrt.( (1:n-1).*(α .+ (1:n-1)) ) T = SymTridiagonal(promote(collect(_α), β)...) - x, V = eigen(T) # eigenvalue decomposition - w = gamma(α+1)*V[1,:].^2 # Quadrature weights + x, V = eigen(T) # eigenvalue decomposition + w = gamma(α+1)*V[1,:].^2 # Quadrature weights return x, vec(w) end ########################## Routines for the forward recurrence ########################## -function gl_rec_newton(x0, n, alpha; maxiter = 20, computeweight = true) +function gl_rec_newton(x0, n, α; maxiter = 20, computeweight = true) T = eltype(x0) step = x0 iter = 0 @@ -577,8 +618,8 @@ function gl_rec_newton(x0, n, alpha; maxiter = 20, computeweight = true) pn_deriv = zero(T) while (abs(step) > 40eps(T)*xk) && (iter < maxiter) iter += 1 - pn, pn_deriv = evalLaguerreRec(n, alpha, xk) - if abs(pn) >= abs(pn_prev)*(1-50eps(T)) + pn, pn_deriv = evalLaguerreRec(n, α, xk) + if abs(pn) ≥ abs(pn_prev)*(1-50eps(T)) # The function values do not decrease enough any more due to roundoff errors. xk = xk_prev # Set to the previous value and quit. break @@ -588,21 +629,21 @@ function gl_rec_newton(x0, n, alpha; maxiter = 20, computeweight = true) xk -= step pn_prev = pn end - if ( xk < 0 ) || ( xk > 4n + 2alpha + 2 ) || ( iter == maxiter ) - @warn "Newton method may not have converged in gausslaguerre_rec($n,$alpha)." + if ( xk < 0 ) || ( xk > 4n + 2α + 2 ) || ( iter == maxiter ) + @warn "Newton method may not have converged in gausslaguerre_rec($n,$α)." end wk = oftype(xk, 0) if computeweight - pn_min1, ~ = evalLaguerreRec(n-1, alpha, xk) - wk = (n^2 +alpha*n)^(-1/2)/pn_min1/pn_deriv + pn_min1, ~ = evalLaguerreRec(n-1, α, xk) + wk = (n^2 +α*n)^(-1/2)/pn_min1/pn_deriv end return xk, wk end "Compute Gauss-Laguerre rule based on the recurrence relation, using Newton iterations on an initial guess." -function gausslaguerre_rec(n, alpha; reduced = false) - T = typeof(float(alpha)) +function gausslaguerre_rec(n, α; reduced = false) + T = typeof(float(α)) n_alloc = reduced ? 0 : n w = zeros(T, n_alloc) @@ -611,15 +652,15 @@ function gausslaguerre_rec(n, alpha; reduced = false) # We compute up to 7 starting values for the Newton iterations n_pre = min(n, 7) - nu = 4n + 2alpha + 2 - x_pre = T.(besselroots(alpha, n_pre)).^2 / nu # this is a lower bound by [DLMF 18.16.10] + nu = 4n + 2α + 2 + x_pre = T.(besselroots(α, n_pre)).^2 / nu # this is a lower bound by [DLMF 18.16.10] - noUnderflow = true # this flag turns false once the weights start to underflow + noUnderflow = true # this flag turns false once the weights start to underflow for k in 1:n # Use sextic extrapolation for a new initial guess - xk = (k <= n_pre) ? x_pre[k] : 7*x[k-1] -21*x[k-2] +35*x[k-3] -35*x[k-4] +21*x[k-5] -7*x[k-6] +x[k-7] + xk = (k ≤ n_pre) ? x_pre[k] : 7*x[k-1] -21*x[k-2] +35*x[k-3] -35*x[k-4] +21*x[k-5] -7*x[k-6] +x[k-7] - xk, wk = gl_rec_newton(xk, n, alpha, maxiter = 20, computeweight = noUnderflow) + xk, wk = gl_rec_newton(xk, n, α, maxiter = 20, computeweight = noUnderflow) if noUnderflow && abs(wk) < underflow_threshold(T) noUnderflow = false end @@ -644,18 +685,18 @@ end Evaluate the orthonormal associated Laguerre polynomial with positive leading coefficient, as well as its derivative, in the point x using the recurrence relation. """ -function evalLaguerreRec(n, alpha, x) - T = typeof(alpha) +function evalLaguerreRec(n, α, x) + T = typeof(α) pnprev = zero(T) - pn = 1/sqrt(gamma(alpha+1)) + pn = 1/sqrt(gamma(α+1)) pndprev = zero(T) pnd = zero(T) for k in 1:n pnold = pn - pn = (x -2*k -alpha+1)/sqrt(k*(alpha+k))*pnold-sqrt((k-1+alpha)*(k-1)/k/(k+alpha))*pnprev + pn = (x -2*k -α+1)/sqrt(k*(α+k))*pnold-sqrt((k-1+α)*(k-1)/k/(k+α))*pnprev pnprev = pnold pndold = pnd - pnd = (pnold+(x-2*k-alpha+1)*pndold)/sqrt(k*(alpha+k)) -sqrt((k-1+alpha)*(k-1)/k/(alpha+k))*pndprev + pnd = (pnold+(x-2*k-α+1)*pndold)/sqrt(k*(α+k)) -sqrt((k-1+α)*(k-1)/k/(α+k))*pndprev pndprev = pndold end diff --git a/src/gausslegendre.jl b/src/gausslegendre.jl index a388ead..8082c2f 100644 --- a/src/gausslegendre.jl +++ b/src/gausslegendre.jl @@ -1,3 +1,24 @@ +@doc raw""" + gausslegendre(n::Integer) -> Tuple{Vector{Float64},Vector{Float64}} + +Return nodes and weights of [Gauss-Legendre quadrature](https://en.wikipedia.org/wiki/Gauss%E2%80%93Legendre_quadrature). + +```math +\int_{-1}^{1} f(x) dx \approx \sum_{i=1}^{n} w_i f(x_i) +``` + +# Examples +```jldoctest; setup = :(using FastGaussQuadrature, LinearAlgebra) +julia> x, w = gausslegendre(3); + +julia> f(x) = x^4; + +julia> I = dot(w, f.(x)); + +julia> I ≈ 2/5 +true +``` +""" function gausslegendre(n::Integer) # GAUSSLEGENDRE(n) COMPUTE THE GAUSS-LEGENDRE NODES AND WEIGHTS IN O(n) time. @@ -22,7 +43,7 @@ function gausslegendre(n::Integer) sqrt(5 - b) / 3, sqrt(5 + b) / 3], [(322 - 13 * sqrt(70)) / 900, (322 + 13 * sqrt(70)) / 900, 128 / 225, (322 + 13 * sqrt(70)) / 900, (322 - 13 * sqrt(70)) / 900]) - elseif n <= 60 + elseif n ≤ 60 # NEWTON'S METHOD WITH THREE-TERM RECURRENCE: return rec(n) else @@ -63,7 +84,7 @@ function legpts_nodes(n, a) nodes = cot.(a) vn² = vn * vn vn⁴ = vn² * vn² - @inbounds if n <= 255 + @inbounds if n ≤ 255 vn⁶ = vn⁴ * vn² for i in 1:m u = nodes[i] @@ -82,7 +103,7 @@ function legpts_nodes(n, a) node = muladd(v4, vn⁶, node) nodes[i] = node end - elseif n <= 3950 + elseif n ≤ 3950 for i in 1:m u = nodes[i] u² = u^2 @@ -114,13 +135,13 @@ function legpts_weights(n, m, a) vn = 1 / (n + 0.5) vn² = vn^2 weights = Array{Float64}(undef, m) - if n <= 850000 + if n ≤ 850000 @inbounds for i in 1:m weights[i] = cot(a[i]) end end # Split out the part that can be vectorized by llvm - @inbounds if n <= 170 + @inbounds if n ≤ 170 for i in 1:m u = weights[i] u² = u^2 @@ -142,7 +163,7 @@ function legpts_weights(n, m, a) 3749 / 15360 * u, -1125 / 1024) weights[i] = @evalpoly(vn², 1 / vn² + W1, W2, W3) end - elseif n <= 1500 + elseif n ≤ 1500 for i in 1:m u = weights[i] u² = u^2 @@ -156,7 +177,7 @@ function legpts_weights(n, m, a) muladd(ua, -31.0, 81.0)) / 384 weights[i] = muladd(vn², W2, 1 / vn² + W1) end - elseif n <= 850000 + elseif n ≤ 850000 for i in 1:m u = weights[i] u² = u^2 diff --git a/src/gausslobatto.jl b/src/gausslobatto.jl index 6bcb702..d454474 100644 --- a/src/gausslobatto.jl +++ b/src/gausslobatto.jl @@ -1,3 +1,33 @@ +@doc raw""" + gausslobatto(n::Integer) -> Tuple{Vector{Float64},Vector{Float64}} + +Return nodes and weights of [Gauss-Lobatto quadrature](https://mathworld.wolfram.com/LobattoQuadrature.html). + +```math +\int_{-1}^{1} f(x) dx \approx \sum_{i=1}^{n} w_i f(x_i) +``` + +# Examples +```jldoctest; setup = :(using FastGaussQuadrature, LinearAlgebra) +julia> x, w = gausslobatto(4); + +julia> f(x) = x^4; + +julia> I = dot(w, f.(x)); + +julia> I ≈ 2/5 +true +``` + +Note that the both ends of nodes are fixed at -1 and 1. + +```jldoctest; setup = :(using FastGaussQuadrature, LinearAlgebra) +julia> x, w = gausslobatto(4); + +julia> x[1], x[end] +(-1.0, 1.0) +``` +""" function gausslobatto(n::Integer) # Gauss-Legendre-Lobatto Quadrature Nodes and Weights if n ≤ 1 diff --git a/src/gaussradau.jl b/src/gaussradau.jl index 7c677ef..42ab911 100644 --- a/src/gaussradau.jl +++ b/src/gaussradau.jl @@ -1,7 +1,32 @@ -""" - gaussradau(n) +@doc raw""" + gaussradau(n::Integer) -> Tuple{Vector{Float64},Vector{Float64}} + +Return nodes and weights of [Gauss-Radau quadrature](https://mathworld.wolfram.com/RadauQuadrature.html). + +```math +\int_{-1}^{1} f(x) dx \approx \sum_{i=1}^{n} w_i f(x_i) +``` + +# Examples +```jldoctest; setup = :(using FastGaussQuadrature, LinearAlgebra) +julia> x, w = gaussradau(3); + +julia> f(x) = x^4; + +julia> I = dot(w, f.(x)); + +julia> I ≈ 2/5 +true +``` + +Note that the first node is fixed at -1. + +```jldoctest; setup = :(using FastGaussQuadrature, LinearAlgebra) +julia> x, w = gaussradau(3); -Creates the n-point Gauss-Radau quadrature rule, with the first node fixed at -1. +julia> x[1] +-1.0 +``` """ function gaussradau(n::Integer, T::Type=Float64) a = b = zero(T) @@ -22,18 +47,18 @@ function gaussradau(n::Integer, T::Type=Float64) end end -function gaussradau(n::Integer, a, b) +function gaussradau(n::Integer, α, β) if n ≤ 0 throw(DomainError(n, "Input N must be a positive integer")) end m = n - 1 - s = a + b + s = α + β T = float(eltype(s)) - μ = jacobimoment(a, b) + μ = jacobimoment(α, β) n == 0 && return T[], T[] n == 1 && return [-one(T)], [μ] - J = jacobi_jacobimatrix(n, a, b) - aᴿ = -1 + 2m*convert(T,m+a)/((2m+s)*(2m+s+1)) + J = jacobi_jacobimatrix(n, α, β) + aᴿ = -1 + 2m*convert(T,m+α)/((2m+s)*(2m+s+1)) J.dv[end] = aᴿ x, V = eigen(J) w = V[1,:].^2 .* μ diff --git a/test/test_gausschebyshev.jl b/test/test_gausschebyshev.jl index d8bdb8c..2ef488f 100644 --- a/test/test_gausschebyshev.jl +++ b/test/test_gausschebyshev.jl @@ -12,13 +12,13 @@ @testset "x.^2" begin x, w = gausschebyshev(n,1) - @test dot(x.^2,w) ≈ pi/2 + @test dot(x.^2,w) ≈ π/2 x, w = gausschebyshev(n,2) - @test dot(x.^2,w) ≈ pi/8 + @test dot(x.^2,w) ≈ π/8 x, w = gausschebyshev(n,3) - @test dot(x.^2,w) ≈ pi/2 + @test dot(x.^2,w) ≈ π/2 x, w = gausschebyshev(n,4) - @test dot(x.^2,w) ≈ pi/2 + @test dot(x.^2,w) ≈ π/2 end @testset "x^3" begin @@ -27,8 +27,8 @@ x, w = gausschebyshev(n,2) @test abs(dot(x.^3,w))<1e-15 x, w = gausschebyshev(n,3) - @test dot(x.^3,w) ≈ 3*pi/8 + @test dot(x.^3,w) ≈ 3*π/8 x, w = gausschebyshev(n,4) - @test dot(x.^3,w) ≈ -3*pi/8 + @test dot(x.^3,w) ≈ -3*π/8 end end \ No newline at end of file diff --git a/test/test_gausshermite.jl b/test/test_gausshermite.jl index 680fdee..c907ba7 100644 --- a/test/test_gausshermite.jl +++ b/test/test_gausshermite.jl @@ -16,20 +16,20 @@ using FastGaussQuadrature @testset "Golub-Welsch" begin n = 18; - x,w = gausshermite( n ) + x,w = gausshermite(n) @test isa(x,Vector{Float64}) @test isa(w,Vector{Float64}) @test (length(x) == n && length(w) == n) - @test (dot(w,x) < tol && abs(dot(w,x.^2) - sqrt(pi)/2) < tol) + @test (dot(w,x) < tol && abs(dot(w,x.^2) - sqrt(π)/2) < tol) end @testset "Recurrence" begin n = 42 - x,w = gausshermite( n ) + x,w = gausshermite(n) @test isa(x,Vector{Float64}) @test isa(w,Vector{Float64}) @test (length(x) == n && length(w) == n) - @test (dot(w,x) < tol && abs(dot(w,x.^2) - sqrt(pi)/2) < tol) + @test (dot(w,x) < tol && abs(dot(w,x.^2) - sqrt(π)/2) < tol) @test abs(x[37] - 5.660357581283058) < tol @test abs(w[17] - 0.032202101288908) < tol @@ -39,20 +39,20 @@ using FastGaussQuadrature @testset "Asymptotics" begin n = 251 - x,w = gausshermite( n ) + x,w = gausshermite(n) @test isa(x,Vector{Float64}) @test isa(w,Vector{Float64}) @test (length(x) == n && length(w) == n) - @test (dot(w,x) < tol && abs(dot(w,x.^2) - sqrt(pi)/2) < 300*tol) + @test (dot(w,x) < tol && abs(dot(w,x.^2) - sqrt(π)/2) < 300*tol) @test abs(x[37] - -13.292221459334638) < tol @test abs(w[123] - 0.117419270715955) < 2*tol n = 500 - x,w = gausshermite( n ) + x,w = gausshermite(n) @test isa(x,Vector{Float64}) @test isa(w,Vector{Float64}) @test (length(x) == n && length(w) == n) - @test (dot(w,x) < tol && abs(dot(w,x.^2) - sqrt(pi)/2) < 300*tol) + @test (dot(w,x) < tol && abs(dot(w,x.^2) - sqrt(π)/2) < 300*tol) end @testset "Recurrence" begin @@ -77,7 +77,7 @@ using FastGaussQuadrature @testset "Transform" begin n = 500 - x,w = FastGaussQuadrature.unweightedgausshermite( n ) + x,w = FastGaussQuadrature.unweightedgausshermite(n) @test w[1] ≠ 0 V = Array{Float64}(undef,n,n) @@ -85,7 +85,6 @@ using FastGaussQuadrature V[k,:] = FastGaussQuadrature.hermpoly_rec(0:n-1, sqrt(2)*x[k]) end - f = x -> first(FastGaussQuadrature.hermpoly_rec(1, sqrt(2)*x)) @test V' * (w.* f.(x)) ≈ [0; sqrt(π); zeros(n-2)] f = x -> first(FastGaussQuadrature.hermpoly_rec(2, sqrt(2)*x)) diff --git a/test/test_gausslaguerre.jl b/test/test_gausslaguerre.jl index 3f60f58..2457b9b 100644 --- a/test/test_gausslaguerre.jl +++ b/test/test_gausslaguerre.jl @@ -3,14 +3,14 @@ # Test integration tol = 4.0e-10 -# Evaluate the exact integral of x^alpha * p(x) *exp(-x) on the positive halfline, +# Evaluate the exact integral of x^α * p(x) *exp(-x) on the positive halfline, # where p(x) = sum( coef[i+1]*x^i, i=0..degree(p)) is a polynomial given by its # monomial coefficients -function exact_integral_poly(alpha, coef) +function exact_integral_poly(α, coef) # do calculations in BigFloat precision z = big(0.0) for i in 1:length(coef) - z += gamma(big(alpha)+big(i)) * big(coef[i]) + z += gamma(big(α)+big(i)) * big(coef[i]) end Float64(z) end @@ -24,11 +24,11 @@ function polyval(x, coef) z end -# Evaluate the exact integral of cos(a*x) * x^alpha * exp(-x) on the positive halfline -# - For alpha = 0 +# Evaluate the exact integral of cos(a*x) * x^α * exp(-x) on the positive halfline +# - For α = 0 exact_integral_cos1(a) = 1/(a^2+1) -# - For alpha = 1/2 -exact_integral_cos2(a) = sqrt(pi)*cos(3/2*atan(a))/2 / (a^2+1)^(3/4) +# - For α = 1/2 +exact_integral_cos2(a) = sqrt(π)*cos(3/2*atan(a))/2 / (a^2+1)^(3/4) Random.seed!(0) @@ -65,11 +65,11 @@ Random.seed!(0) ########## n = 42 ########## - alpha = 0.0 + α = 0.0 coef = rand(17) - Z = exact_integral_poly(alpha, coef) + Z = exact_integral_poly(α, coef) - x, w = gausslaguerre(n, alpha) + x, w = gausslaguerre(n, α) @test isa(x, Vector{Float64}) @test isa(w, Vector{Float64}) @test abs(sum(w) - 1) < tol @@ -78,21 +78,21 @@ Random.seed!(0) @test abs(w[7] - 0.055372813167092) < tol @test abs(dot(w, polyval(x, coef)) - Z)/abs(Z) < tol - x_gw, w_gw = FastGaussQuadrature.gausslaguerre_GW(n, alpha) + x_gw, w_gw = FastGaussQuadrature.gausslaguerre_GW(n, α) @test abs(x[37] - 98.388267163326702) < tol @test abs(w[7] - 0.055372813167092) < tol @test abs(dot(w, polyval(x, coef)) - Z)/abs(Z) < tol - x_rec, w_rec = FastGaussQuadrature.gausslaguerre_rec(n, alpha) + x_rec, w_rec = FastGaussQuadrature.gausslaguerre_rec(n, α) @test abs(x[37] - 98.388267163326702) < tol @test abs(w[7] - 0.055372813167092) < tol @test abs(dot(w, polyval(x, coef)) - Z)/abs(Z) < tol - alpha = 0.5 + α = 0.5 coef = rand(17) - Z = exact_integral_poly(alpha, coef) - x, w = gausslaguerre(n, alpha) + Z = exact_integral_poly(α, coef) + x, w = gausslaguerre(n, α) @test abs(dot(w, polyval(x, coef)) - Z)/abs(Z) < tol @@ -100,8 +100,8 @@ Random.seed!(0) ########## n = 251 ########## - alpha = 0.0 - x, w = gausslaguerre(n, alpha) + α = 0.0 + x, w = gausslaguerre(n, α) a = 4 Z = exact_integral_cos1(a) @test isa(x, Vector{Float64}) @@ -110,8 +110,8 @@ Random.seed!(0) @test abs(w[3] - 0.050091759039996) < tol @test abs(dot(w, cos.(a*x)) - Z) < tol - alpha = 0.5 - x, w = gausslaguerre(n, alpha) + α = 0.5 + x, w = gausslaguerre(n, α) @test abs(x[44] -19.095577327730616 ) < tol @test abs(w[18] - 0.026245779174690266) < tol a = 4 @@ -122,15 +122,15 @@ Random.seed!(0) ############ n = 350000 ############ - alpha = 0.0 + α = 0.0 a = 50 Z = exact_integral_cos1(a) - x, w = gausslaguerre(n, alpha) + x, w = gausslaguerre(n, α) @test isa(x, Vector{Float64}) @test isa(w, Vector{Float64}) @test abs(dot(w, cos.(a*x)) - Z) < tol - alpha = 0.44 - x, w = gausslaguerre(n, alpha; reduced = true) + α = 0.44 + x, w = gausslaguerre(n, α; reduced = true) @test w[end] < 100*floatmin(Float64) end \ No newline at end of file