From 219b9f4282c576e4f7cbcd66d67d791cc4d7cad4 Mon Sep 17 00:00:00 2001 From: Hong Ge Date: Mon, 28 Aug 2017 18:12:42 +0100 Subject: [PATCH 01/27] Initial support for infix ~ (#173). --- src/core/compiler.jl | 13 +++++++++++++ test/compiler.jl/tilde.jl | 18 ++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 test/compiler.jl/tilde.jl diff --git a/src/core/compiler.jl b/src/core/compiler.jl index ec2580183..bac96e3ea 100644 --- a/src/core/compiler.jl +++ b/src/core/compiler.jl @@ -168,6 +168,7 @@ macro model(fexpr) dprintln(1, fexpr) + fexpr = translate(fexpr) fname = fexpr.args[1].args[1] # Get model name f fargs = fexpr.args[1].args[2:end] # Get model parameters (x,y;z=..) @@ -326,3 +327,15 @@ getvsym(expr::Expr) = begin end curr end + + +translate!(ex::Any) = ex +translate!(ex::Expr) = begin + if (ex.head === :call && ex.args[1] === :(~)) + ex.head = :macrocall; ex.args[1] = Symbol("@~") + else + map(translate!, ex.args) + end + ex +end +translate(ex::Expr) = translate!(deepcopy(ex)) diff --git a/test/compiler.jl/tilde.jl b/test/compiler.jl/tilde.jl new file mode 100644 index 000000000..12f7c68ee --- /dev/null +++ b/test/compiler.jl/tilde.jl @@ -0,0 +1,18 @@ +using Turing +import Turing.translate! + +ex = quote + x = 1 + y = rand() + y ~ Normal(0,1) +end + +res = translate!(:(y~Normal(1,1))) + +Base.@assert res.head == :macrocall +Base.@assert res.args == [Symbol("@~"), :y, :(Normal(1, 1))] + +res2 = translate!(ex) + +Base.@assert res2.args[end].head == :macrocall +Base.@assert res2.args[end].args == [Symbol("@~"), :y, :(Normal(1, 1))] From ffb9407868667bc33a10694d76a41dce1d9f3f23 Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Tue, 3 Oct 2017 01:53:07 +0100 Subject: [PATCH 02/27] seperate model wrapper call in sample.jl test --- test/compiler.jl/sample.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/compiler.jl/sample.jl b/test/compiler.jl/sample.jl index e48c42560..6fbc57710 100644 --- a/test/compiler.jl/sample.jl +++ b/test/compiler.jl/sample.jl @@ -15,7 +15,8 @@ x = [2.0, 3.0] alg = Gibbs(1000, HMC(1, 0.2, 3, :m), PG(10, 1, :s)) # NOTE: want to translate below to # chn = sample(gdemo, Dict(:x => x), alg) -chn = sample(gdemo2(x), alg); +modelf = gdemo2(x) +chn = sample(modelf, alg); mean(chn[:s]) # Turing.TURING[:modelex] From 2a9466d3da251f65c9f41bd6070a50fb1c876cd5 Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Tue, 3 Oct 2017 01:55:16 +0100 Subject: [PATCH 03/27] update ForwardDiff.Dual signature --- src/core/ad.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/ad.jl b/src/core/ad.jl index a042ea3aa..7f767171e 100644 --- a/src/core/ad.jl +++ b/src/core/ad.jl @@ -55,12 +55,12 @@ gradient(vi::VarInfo, model::Function, spl::Union{Void, Sampler}) = begin vals = getval(vi, vns[i]) if vns[i] in vn_chunk # for each variable to compute gradient in this round for i = 1:l - vi[range[i]] = ForwardDiff.Dual{CHUNKSIZE, Float64}(realpart(vals[i]), SEEDS[dim_count]) + vi[range[i]] = ForwardDiff.Dual{Void, Float64, CHUNKSIZE}(realpart(vals[i]), SEEDS[dim_count]) dim_count += 1 # count end else # for other varilables (no gradient in this round) for i = 1:l - vi[range[i]] = ForwardDiff.Dual{CHUNKSIZE, Float64}(realpart(vals[i])) + vi[range[i]] = ForwardDiff.Dual{Void, Float64, CHUNKSIZE}(realpart(vals[i])) end end end From 008f41e26006bfdfb3fe6a33cf91615b1ca88c6d Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Wed, 4 Oct 2017 08:09:06 +0100 Subject: [PATCH 04/27] sample.jl test passed --- src/core/compiler.jl | 21 +++++++++++++-------- src/samplers/gibbs.jl | 2 +- src/samplers/hmc.jl | 2 +- src/samplers/is.jl | 2 +- src/samplers/support/hmc_core.jl | 3 ++- src/trace/trace.jl | 2 +- test/compiler.jl/sample.jl | 3 +-- test/varinfo.jl/varinfo.jl | 2 +- 8 files changed, 21 insertions(+), 16 deletions(-) diff --git a/src/core/compiler.jl b/src/core/compiler.jl index 701f45db9..45222e4f7 100644 --- a/src/core/compiler.jl +++ b/src/core/compiler.jl @@ -154,7 +154,7 @@ macro model(fexpr) # Compiler design: sample(fname_compiletime(x,y), sampler) # fname_compiletime(x=nothing,y=nothing; data=data,compiler=compiler) = begin # ex = quote - # fname_runtime(;vi=VarInfo,sampler=nothing) = begin + # fname_runtime(vi::VarInfo,sampler::Sampler) = begin # x=x,y=y # # pour all variables in data dictionary, e.g. # k = data[:k] @@ -179,6 +179,7 @@ macro model(fexpr) # ==> f(x,y;) # f(x,y; c=1) # ==> unchanged + if (length(fargs) == 0 || # e.g. f() isa(fargs[1], Symbol) || # e.g. f(x,y) fargs[1].head == :kw) # e.g. f(x,y=1) @@ -186,7 +187,7 @@ macro model(fexpr) end dprintln(1, fname) - dprintln(1, fargs) + # dprintln(1, fargs) dprintln(1, fbody) # Remove positional arguments from inner function, e.g. @@ -194,16 +195,16 @@ macro model(fexpr) # ==> f(; c=1) # f(x,y;) # ==> f(;) - fargs_inner = deepcopy(fargs)[1:1] + # fargs_inner = deepcopy(fargs)[1:1] # Add keyword arguments, e.g. # f(; c=1) # ==> f(; c=1, :vi=VarInfo(), :sample=nothing) # f(;) # ==> f(; :vi=VarInfo(), :sample=nothing) - push!(fargs_inner[1].args, Expr(:kw, :vi, :(Turing.VarInfo()))) - push!(fargs_inner[1].args, Expr(:kw, :sampler, :(nothing))) - dprintln(1, fargs_inner) + # push!(fargs_inner[1].args, Expr(:kw, :vi, :(Turing.VarInfo()))) + # push!(fargs_inner[1].args, Expr(:kw, :sampler, :(nothing))) + # dprintln(1, fargs_inner) # Modify fbody, so that we always return VarInfo fbody_inner = deepcopy(fbody) @@ -238,7 +239,11 @@ macro model(fexpr) fname_inner = Symbol("$(fname)_model") fdefn_inner = Expr(:(=), fname_inner, Expr(:function, Expr(:call, fname_inner))) # fdefn = :( $fname() ) - push!(fdefn_inner.args[2].args[1].args, fargs_inner...) # set parameters (x,y;data..) + # push!(fdefn_inner.args[2].args[1].args, fargs_inner...) # set parameters (x,y;data..) + + push!(fdefn_inner.args[2].args[1].args, :(vi::Turing.VarInfo)) + push!(fdefn_inner.args[2].args[1].args, :(sampler::Union{Void,Turing.Sampler})) + push!(fdefn_inner.args[2].args, deepcopy(fbody_inner)) # set function definition dprintln(1, fdefn_inner) @@ -264,7 +269,7 @@ macro model(fexpr) fdefn_outer = Expr(:function, Expr(:call, fname, fargs_outer...), Expr(:block, Expr(:return, fname_inner))) - + unshift!(fdefn_outer.args[2].args, :(Main.eval(fdefn_inner))) unshift!(fdefn_outer.args[2].args, quote # Check fargs, data diff --git a/src/samplers/gibbs.jl b/src/samplers/gibbs.jl index 0a2912f8f..4961a8fb1 100644 --- a/src/samplers/gibbs.jl +++ b/src/samplers/gibbs.jl @@ -88,7 +88,7 @@ sample(model::Function, alg::Gibbs; # Init parameters varInfo = resume_from == nothing ? - model() : + Base.invokelatest(model, VarInfo(), nothing) : resume_from.info[:vi] n = spl.alg.n_iters; i_thin = 1 diff --git a/src/samplers/hmc.jl b/src/samplers/hmc.jl index 55a7f3d32..a68b81162 100644 --- a/src/samplers/hmc.jl +++ b/src/samplers/hmc.jl @@ -100,7 +100,7 @@ function sample{T<:Hamiltonian}(model::Function, alg::T; end vi = resume_from == nothing ? - model() : + Base.invokelatest(model, VarInfo(), nothing) : deepcopy(resume_from.info[:vi]) if spl.alg.gid == 0 diff --git a/src/samplers/is.jl b/src/samplers/is.jl index fe2337582..c9cf8ecdb 100644 --- a/src/samplers/is.jl +++ b/src/samplers/is.jl @@ -41,7 +41,7 @@ sample(model::Function, alg::IS) = begin n = spl.alg.n_particles for i = 1:n - vi = model(vi=VarInfo(), sampler=spl) + vi = model(VarInfo(), spl) samples[i] = Sample(vi) end diff --git a/src/samplers/support/hmc_core.jl b/src/samplers/support/hmc_core.jl index cad8c9697..3eecd39e4 100644 --- a/src/samplers/support/hmc_core.jl +++ b/src/samplers/support/hmc_core.jl @@ -5,7 +5,8 @@ runmodel(model::Function, vi::VarInfo, spl::Union{Void,Sampler}) = begin vi.index = 0 setlogp!(vi, zero(Real)) if spl != nothing spl.info[:total_eval_num] += 1 end - model(vi=vi, sampler=spl) # run model + # model(vi=vi, sampler=spl) # run model + Base.invokelatest(model, vi, spl) end sample_momentum(vi::VarInfo, spl::Sampler) = begin diff --git a/src/trace/trace.jl b/src/trace/trace.jl index bd16d0f85..60c0508de 100644 --- a/src/trace/trace.jl +++ b/src/trace/trace.jl @@ -61,7 +61,7 @@ function (::Type{Trace{T}}){T}(f::Function, spl::Sampler, vi :: VarInfo) res.vi = deepcopy(vi) res.vi.index = 0 res.vi.num_produce = 0 - res.task = Task( () -> begin _=f(vi=res.vi, sampler=spl); produce(Val{:done}); _; end ) + res.task = Task( () -> begin _=f(vi, spl); produce(Val{:done}); _; end ) if isa(res.task.storage, Void) res.task.storage = ObjectIdDict() end diff --git a/test/compiler.jl/sample.jl b/test/compiler.jl/sample.jl index 6fbc57710..e48c42560 100644 --- a/test/compiler.jl/sample.jl +++ b/test/compiler.jl/sample.jl @@ -15,8 +15,7 @@ x = [2.0, 3.0] alg = Gibbs(1000, HMC(1, 0.2, 3, :m), PG(10, 1, :s)) # NOTE: want to translate below to # chn = sample(gdemo, Dict(:x => x), alg) -modelf = gdemo2(x) -chn = sample(modelf, alg); +chn = sample(gdemo2(x), alg); mean(chn[:s]) # Turing.TURING[:modelex] diff --git a/test/varinfo.jl/varinfo.jl b/test/varinfo.jl/varinfo.jl index 1b9823be6..5e96c660d 100644 --- a/test/varinfo.jl/varinfo.jl +++ b/test/varinfo.jl/varinfo.jl @@ -96,5 +96,5 @@ vi = g_demo_f() vi = step(g_demo_f, pg, vi) @test vi.gids == [1,1,1,0,0] -vi = g_demo_f(vi=vi, sampler=hmc) +vi = g_demo_f(vi, hmc) @test vi.gids == [1,1,1,2,2] From 80df8fe76d87ad3c1043f856386f12468eebe791 Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Wed, 4 Oct 2017 21:51:16 +0100 Subject: [PATCH 05/27] fix some tests for Julia 0.6 --- test/varinfo.jl/test_varname.jl | 10 +++++----- test/varinfo.jl/varinfo.jl | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/test/varinfo.jl/test_varname.jl b/test/varinfo.jl/test_varname.jl index bb08000a8..36b6e5dab 100644 --- a/test/varinfo.jl/test_varname.jl +++ b/test/varinfo.jl/test_varname.jl @@ -26,7 +26,7 @@ v_mat = eval(varname(:((x[1,2][1+5][45][3][i])))[1]) @model mat_name_test() = begin - p = Array{Dual}((2, 2)) + p = Array{Any}((2, 2)) for i in 1:2, j in 1:2 p[i,j] ~ Normal(0, 1) end @@ -34,7 +34,7 @@ v_mat = eval(varname(:((x[1,2][1+5][45][3][i])))[1]) end chain = sample(mat_name_test(), HMC(1000, 0.2, 4)) -@test_approx_eq_eps mean(chain[Symbol("p[1,1]")]) 0 0.25 +@test_approx_eq_eps mean(chain[Symbol("p[1, 1]")]) 0 0.25 # Multi array i, j = 1, 2 @@ -42,9 +42,9 @@ v_arrarr = eval(varname(:(x[i][j]))[1]) @test v_arrarr == "[1][2]" @model marr_name_test() = begin - p = Array{Array{Dual}}(2) - p[1] = Array{Dual}(2) - p[2] = Array{Dual}(2) + p = Array{Array{Any}}(2) + p[1] = Array{Any}(2) + p[2] = Array{Any}(2) for i in 1:2, j in 1:2 p[i][j] ~ Normal(0, 1) end diff --git a/test/varinfo.jl/varinfo.jl b/test/varinfo.jl/varinfo.jl index 5e96c660d..326e723fc 100644 --- a/test/varinfo.jl/varinfo.jl +++ b/test/varinfo.jl/varinfo.jl @@ -92,7 +92,7 @@ g = Turing.Sampler(Gibbs(1000, PG(10, 2, :x, :y, :z), HMC(1, 0.4, 8, :w, :u))) pg, hmc = g.info[:samplers] -vi = g_demo_f() +vi = g_demo_f(Turing.VarInfo(), nothing) vi = step(g_demo_f, pg, vi) @test vi.gids == [1,1,1,0,0] From 74c7c5f7fd387342d9ceb61c9c92e9277f1a4c5b Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Wed, 4 Oct 2017 21:56:24 +0100 Subject: [PATCH 06/27] remove typealias for 0.6 --- src/core/container.jl | 2 +- src/core/varinfo.jl | 2 +- src/samplers/gibbs.jl | 2 +- src/samplers/support/transform.jl | 22 +++++++++------------- src/trace/trace.jl | 4 ++-- 5 files changed, 14 insertions(+), 18 deletions(-) diff --git a/src/core/container.jl b/src/core/container.jl index a405ddda8..e948b5a72 100644 --- a/src/core/container.jl +++ b/src/core/container.jl @@ -5,7 +5,7 @@ Data structure for particle filters - consume(pc::ParticleContainer): return incremental likelihood """ -typealias Particle Trace +const Particle = Trace type ParticleContainer{T<:Particle} model :: Function diff --git a/src/core/varinfo.jl b/src/core/varinfo.jl index 3932af88f..57fad8aee 100644 --- a/src/core/varinfo.jl +++ b/src/core/varinfo.jl @@ -62,7 +62,7 @@ type VarInfo end end -typealias VarView Union{Int,UnitRange,Vector{Int},Vector{UnitRange}} +const VarView = Union{Int,UnitRange,Vector{Int},Vector{UnitRange}} getidx(vi::VarInfo, vn::VarName) = vi.idcs[vn] diff --git a/src/samplers/gibbs.jl b/src/samplers/gibbs.jl index 4961a8fb1..24e00cfe7 100644 --- a/src/samplers/gibbs.jl +++ b/src/samplers/gibbs.jl @@ -18,7 +18,7 @@ immutable Gibbs <: InferenceAlgorithm Gibbs(alg::Gibbs, new_gid) = new(alg.n_iters, alg.algs, alg.thin, new_gid) end -typealias GibbsComponent Union{Hamiltonian,PG} +const GibbsComponent = Union{Hamiltonian,PG} function Sampler(alg::Gibbs) n_samplers = length(alg.algs) diff --git a/src/samplers/support/transform.jl b/src/samplers/support/transform.jl index a53f8239f..3c8781679 100644 --- a/src/samplers/support/transform.jl +++ b/src/samplers/support/transform.jl @@ -30,8 +30,7 @@ # a ≦ x ≦ b # ############# -typealias TransformDistribution{T<:ContinuousUnivariateDistribution} - Union{T, Truncated{T}} +const TransformDistribution{T<:ContinuousUnivariateDistribution} = Union{T, Truncated{T}} link{T<:Real}(d::TransformDistribution, x::Union{T,Vector{T}}) = begin a, b = minimum(d), maximum(d) @@ -81,9 +80,8 @@ end # -∞ < x < -∞ # ############### -typealias RealDistribution - Union{Cauchy, Gumbel, Laplace, Logistic, - NoncentralT, Normal, NormalCanon, TDist} +const RealDistribution = Union{Cauchy, Gumbel, Laplace, Logistic, + NoncentralT, Normal, NormalCanon, TDist} link{T<:Real}(d::RealDistribution, x::Union{T,Vector{T}}) = x @@ -96,10 +94,9 @@ logpdf_with_trans{T<:Real}(d::RealDistribution, x::Union{T,Vector{T}}, transform # 0 < x # ######### -typealias PositiveDistribution - Union{BetaPrime, Chi, Chisq, Erlang, Exponential, FDist, Frechet, - Gamma, InverseGamma, InverseGaussian, Kolmogorov, LogNormal, - NoncentralChisq, NoncentralF, Rayleigh, Weibull} +const PositiveDistribution = Union{BetaPrime, Chi, Chisq, Erlang, Exponential, FDist, Frechet, + Gamma, InverseGamma, InverseGaussian, Kolmogorov, LogNormal, + NoncentralChisq, NoncentralF, Rayleigh, Weibull} link{T<:Real}(d::PositiveDistribution, x::Union{T,Vector{T}}) = log(x) @@ -115,8 +112,7 @@ end # 0 < x < 1 # ############# -typealias UnitDistribution - Union{Beta, KSOneSided, NoncentralBeta} +const UnitDistribution = Union{Beta, KSOneSided, NoncentralBeta} link{T<:Real}(d::UnitDistribution, x::Union{T,Vector{T}}) = logit(x) @@ -131,7 +127,7 @@ end # ∑xᵢ = 1 # ########### -typealias SimplexDistribution Union{Dirichlet} +const SimplexDistribution = Union{Dirichlet} link{T}(d::SimplexDistribution, x::Vector{T}) = link!(similar(x), d, x) link!{T}(y, d::SimplexDistribution, x::Vector{T}) = begin @@ -231,7 +227,7 @@ end # Positive definite # ##################### -typealias PDMatDistribution Union{InverseWishart, Wishart} +const PDMatDistribution = Union{InverseWishart, Wishart} link{T<:Real}(d::PDMatDistribution, x::Array{T,2}) = begin z = chol(x)' diff --git a/src/trace/trace.jl b/src/trace/trace.jl index 60c0508de..f329d110c 100644 --- a/src/trace/trace.jl +++ b/src/trace/trace.jl @@ -69,8 +69,8 @@ function (::Type{Trace{T}}){T}(f::Function, spl::Sampler, vi :: VarInfo) res end -typealias TraceR Trace{:R} # Task Copy -typealias TraceC Trace{:C} # Replay +const TraceR = Trace{:R} # Task Copy +const TraceC = Trace{:C} # Replay # step to the next observe statement, return log likelihood Base.consume(t::Trace) = (t.vi.num_produce += 1; Base.consume(t.task)) From 6d7feacf19fc341d68fba6edb32d344aa48fb30c Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Wed, 4 Oct 2017 22:07:12 +0100 Subject: [PATCH 07/27] make some functions 0.6-ish --- src/core/ad.jl | 2 +- src/core/varinfo.jl | 2 +- src/samplers/support/hmc_core.jl | 2 +- test/trace.jl/trace.jl | 2 +- test/util.jl/util.jl | 8 ++++---- test/varinfo.jl/varinfo.jl | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/core/ad.jl b/src/core/ad.jl index 7f767171e..6673b83bb 100644 --- a/src/core/ad.jl +++ b/src/core/ad.jl @@ -83,7 +83,7 @@ gradient(vi::VarInfo, model::Function, spl::Union{Void, Sampler}) = begin end verifygrad(grad::Vector{Float64}) = begin - if any(isnan(grad)) || any(isinf(grad)) + if any(isnan.(grad)) || any(isinf.(grad)) dwarn(0, "Numerical error has been found in gradients.") dwarn(1, "grad = $(grad)") false diff --git a/src/core/varinfo.jl b/src/core/varinfo.jl index 57fad8aee..dd9b10d00 100644 --- a/src/core/varinfo.jl +++ b/src/core/varinfo.jl @@ -297,7 +297,7 @@ end ####################################### # Check if a vn is set to NULL -isnan(vi::VarInfo, vn::VarName) = any(isnan(getval(vi, vn))) +isnan(vi::VarInfo, vn::VarName) = any(isnan.(getval(vi, vn))) # Sanity check for VarInfo.index checkindex(vn::VarName, vi::VarInfo) = checkindex(vn, vi, nothing) diff --git a/src/samplers/support/hmc_core.jl b/src/samplers/support/hmc_core.jl index 3eecd39e4..14681a053 100644 --- a/src/samplers/support/hmc_core.jl +++ b/src/samplers/support/hmc_core.jl @@ -56,7 +56,7 @@ find_H(p::Vector, model::Function, vi::VarInfo, spl::Sampler) = begin p_orig = p ./ spl.info[:wum][:stds] H = dot(p_orig, p_orig) / 2 + realpart(-getlogp(vi)) - if isnan(H) H = Inf else H end + if isnan.(H) H = Inf else H end end find_good_eps{T}(model::Function, vi::VarInfo, spl::Sampler{T}) = begin diff --git a/test/trace.jl/trace.jl b/test/trace.jl/trace.jl index 4085b0bb9..52f931564 100644 --- a/test/trace.jl/trace.jl +++ b/test/trace.jl/trace.jl @@ -48,7 +48,7 @@ Base.@assert consume(t) == 2 Base.@assert consume(a) == 4 a2 = fork(t) -a2_vals = filter(x -> ~isnan(x), a2.vi.vals[end]) +a2_vals = filter(x -> ~isnan.(x), a2.vi.vals[end]) Base.@assert length(a2_vals) == 5 Base.@assert t.vi.vals[end] == a2_vals Base.@assert t.vi.index == a2.vi.index diff --git a/test/util.jl/util.jl b/test/util.jl/util.jl index 25b992d69..40038a29e 100644 --- a/test/util.jl/util.jl +++ b/test/util.jl/util.jl @@ -8,10 +8,10 @@ i = 1 @test @VarName(x[1,2][1+5][45][3][i])[1:end-1] == (:x,([1,2],[6],[45],[3],[1])) @test invlogit(1.1) == 1.0 / (exp(-1.1) + 1.0) @test_approx_eq_eps logit(0.3) -0.8472978603872036 1e-9 -@test isnan(logit(1.0)) == false -@test isinf(logit(1.0)) == true -@test isnan(logit(0.)) == false -@test isinf(logit(0.)) == true +@test isnan.(logit(1.0)) == false +@test isinf.(logit(1.0)) == true +@test isnan.(logit(0.)) == false +@test isinf.(logit(0.)) == true randcat([0.1, 0.9]) @test kl(Normal(0, 1), Normal(0, 1)) == 0 @test align([1, 2, 3], [1]) == ([1,2,3],[1,0,0]) diff --git a/test/varinfo.jl/varinfo.jl b/test/varinfo.jl/varinfo.jl index 326e723fc..b20f5b66d 100644 --- a/test/varinfo.jl/varinfo.jl +++ b/test/varinfo.jl/varinfo.jl @@ -70,12 +70,12 @@ vi[getretain(vi, 1, spl2)] = NULL vals_of_1 = collect(getvals(vi, spl1)) # println(vals_of_1) -filter!(v -> ~any(map(x -> isnan(x), v)), vals_of_1) +filter!(v -> ~any(map(x -> isnan.(x), v)), vals_of_1) @test length(vals_of_1) == 3 vals_of_2 = collect(getvals(vi, spl2)) # println(vals_of_2) -filter!(v -> ~any(map(x -> isnan(x), v)), vals_of_2) +filter!(v -> ~any(map(x -> isnan.(x), v)), vals_of_2) @test length(vals_of_2) == 1 @model gdemo() = begin From b14035ecc9b46b7b2a0ca6c1ac24cf1b7244ab53 Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Wed, 4 Oct 2017 22:14:52 +0100 Subject: [PATCH 08/27] make abstract 0.6-ish --- src/Turing.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Turing.jl b/src/Turing.jl index 2496101ee..5f24f49ae 100644 --- a/src/Turing.jl +++ b/src/Turing.jl @@ -53,8 +53,8 @@ global const CACHERANGES = 0b01 # Sampler abstraction # ####################### -abstract InferenceAlgorithm -abstract Hamiltonian <: InferenceAlgorithm +abstract type InferenceAlgorithm end +abstract type Hamiltonian <: InferenceAlgorithm end doc""" Sampler{T} From e038605e86988318afe8cc8b98f0ddbcdd271325 Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Wed, 4 Oct 2017 23:39:34 +0100 Subject: [PATCH 09/27] change some decpreated functions --- src/core/compiler.jl | 6 +++--- src/samplers/hmc.jl | 6 +++--- src/samplers/hmcda.jl | 6 +++--- src/samplers/is.jl | 6 +++--- src/samplers/nuts.jl | 6 +++--- src/samplers/pgibbs.jl | 6 +++--- src/samplers/sghmc.jl | 2 +- src/samplers/sgld.jl | 2 +- src/samplers/smc.jl | 6 +++--- src/samplers/support/adapt.jl | 4 ++-- test/ad.jl/ad1.jl | 10 +++++----- test/ad.jl/ad2.jl | 6 +++--- test/compiler.jl/forbid_global.jl | 8 ++++---- test/compiler.jl/new_grammar.jl | 4 ++-- test/compiler.jl/noreturn.jl | 4 ++-- test/compiler.jl/sample.jl | 4 ++-- test/geweke.jl | 10 +++++----- test/gibbs.jl/gibbs.jl | 4 ++-- test/gibbs.jl/gibbs_constructor.jl | 6 +++--- test/hmc.jl/multivariate_support.jl | 4 ++-- test/hmcda.jl/hmcda.jl | 6 +++--- test/hmcda.jl/hmcda_geweke.jl | 10 +++++----- test/io.jl/save_resume_chain.jl | 6 +++--- test/nuts.jl/nuts.jl | 6 +++--- test/nuts.jl/nuts_geweke.jl | 10 +++++----- test/pmmh.jl/pmmh.jl | 6 +++--- test/pmmh.jl/pmmh_cons.jl | 6 +++--- test/sampler.jl/vec_assume.jl | 2 +- test/sampler.jl/vectorize_observe.jl | 6 +++--- test/sghmc.jl/sghmc.jl | 6 +++--- test/sgld.jl/sgld.jl | 6 +++--- test/stan-interface.jl/stan-interface.jl | 6 +++--- test/varinfo.jl/replay.jl | 6 +++--- 33 files changed, 96 insertions(+), 96 deletions(-) diff --git a/src/core/compiler.jl b/src/core/compiler.jl index 45222e4f7..ef596dd68 100644 --- a/src/core/compiler.jl +++ b/src/core/compiler.jl @@ -143,9 +143,9 @@ Example: ```julia @model gauss() = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - 1.5 ~ Normal(m, sqrt(s)) - 2.0 ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + 1.5 ~ Normal(m, sqrt.(s)) + 2.0 ~ Normal(m, sqrt.(s)) return(s, m) end ``` diff --git a/src/samplers/hmc.jl b/src/samplers/hmc.jl index 4e668f239..d62ddf0b5 100644 --- a/src/samplers/hmc.jl +++ b/src/samplers/hmc.jl @@ -15,9 +15,9 @@ Example: # Define a simple Normal model with unknown mean and variance. @model gdemo(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - x[1] ~ Normal(m, sqrt(s)) - x[2] ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + x[1] ~ Normal(m, sqrt.(s)) + x[2] ~ Normal(m, sqrt.(s)) return s, m end diff --git a/src/samplers/hmcda.jl b/src/samplers/hmcda.jl index 07b9b0875..c8531b6ad 100644 --- a/src/samplers/hmcda.jl +++ b/src/samplers/hmcda.jl @@ -15,9 +15,9 @@ Example: # Define a simple Normal model with unknown mean and variance. @model gdemo(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - x[1] ~ Normal(m, sqrt(s)) - x[2] ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + x[1] ~ Normal(m, sqrt.(s)) + x[2] ~ Normal(m, sqrt.(s)) return s, m end diff --git a/src/samplers/is.jl b/src/samplers/is.jl index c9cf8ecdb..2baa381d3 100644 --- a/src/samplers/is.jl +++ b/src/samplers/is.jl @@ -17,9 +17,9 @@ Example: # Define a simple Normal model with unknown mean and variance. @model gdemo(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - x[1] ~ Normal(m, sqrt(s)) - x[2] ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + x[1] ~ Normal(m, sqrt.(s)) + x[2] ~ Normal(m, sqrt.(s)) return s, m end diff --git a/src/samplers/nuts.jl b/src/samplers/nuts.jl index 2ff97989a..59c71e571 100644 --- a/src/samplers/nuts.jl +++ b/src/samplers/nuts.jl @@ -15,9 +15,9 @@ Example: # Define a simple Normal model with unknown mean and variance. @model gdemo(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - x[1] ~ Normal(m, sqrt(s)) - x[2] ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + x[1] ~ Normal(m, sqrt.(s)) + x[2] ~ Normal(m, sqrt.(s)) return s, m end diff --git a/src/samplers/pgibbs.jl b/src/samplers/pgibbs.jl index 9e64d5d3b..f280888ea 100644 --- a/src/samplers/pgibbs.jl +++ b/src/samplers/pgibbs.jl @@ -15,9 +15,9 @@ Example: # Define a simple Normal model with unknown mean and variance. @model gdemo(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - x[1] ~ Normal(m, sqrt(s)) - x[2] ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + x[1] ~ Normal(m, sqrt.(s)) + x[2] ~ Normal(m, sqrt.(s)) return s, m end diff --git a/src/samplers/sghmc.jl b/src/samplers/sghmc.jl index 276085008..985ec4c0d 100644 --- a/src/samplers/sghmc.jl +++ b/src/samplers/sghmc.jl @@ -83,7 +83,7 @@ function step(model, spl::Sampler{SGHMC}, vi::VarInfo, is_first::Bool) # Implements the update equations from (15) of Chen et al. (2014). for k in 1:size(old_θ, 1) θ[k,:] = old_θ[k,:] + old_v[k,:] - noise = rand(MvNormal(zeros(length(old_θ[k,:])), sqrt(2 * η * α)*ones(length(old_θ[k,:])))) + noise = rand(MvNormal(zeros(length(old_θ[k,:])), sqrt.(2 * η * α)*ones(length(old_θ[k,:])))) v[k,:] = (1. - α) * old_v[k,:] - η * grad[k,:] + noise # NOTE: divide η by batch size end end diff --git a/src/samplers/sgld.jl b/src/samplers/sgld.jl index 9e2e3fb90..de28598aa 100644 --- a/src/samplers/sgld.jl +++ b/src/samplers/sgld.jl @@ -84,7 +84,7 @@ function step(model, spl::Sampler{SGLD}, vi::VarInfo, is_first::Bool) dprintln(2, "update latent variables...") v = zeros(Float64, size(old_θ)) for k in 1:size(old_θ, 1) - noise = rand(MvNormal(zeros(length(old_θ[k,:])), sqrt(ϵ_t)*ones(length(old_θ[k,:])))) + noise = rand(MvNormal(zeros(length(old_θ[k,:])), sqrt.(ϵ_t)*ones(length(old_θ[k,:])))) θ[k,:] = old_θ[k,:] - 0.5 * ϵ_t * grad[k,:] + noise end end diff --git a/src/samplers/smc.jl b/src/samplers/smc.jl index 050d7bec4..ff89a3d11 100644 --- a/src/samplers/smc.jl +++ b/src/samplers/smc.jl @@ -16,9 +16,9 @@ Example: # Define a simple Normal model with unknown mean and variance. @model gdemo(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - x[1] ~ Normal(m, sqrt(s)) - x[2] ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + x[1] ~ Normal(m, sqrt.(s)) + x[2] ~ Normal(m, sqrt.(s)) return s, m end diff --git a/src/samplers/support/adapt.jl b/src/samplers/support/adapt.jl index 70e6ce314..214f80579 100644 --- a/src/samplers/support/adapt.jl +++ b/src/samplers/support/adapt.jl @@ -58,7 +58,7 @@ adapt_step_size(wum::WarmUpManager, stats::Float64) = begin μ = wum[:μ]; ϵ_bar = wum[:ϵ_bar]; H_bar = wum[:H_bar] H_bar = (1 - 1 / (m + t_0)) * H_bar + 1 / (m + t_0) * (δ - stats) - ϵ = exp(μ - sqrt(m) / γ * H_bar) + ϵ = exp(μ - sqrt.(m) / γ * H_bar) dprintln(1, " ϵ = $ϵ, stats = $stats") ϵ_bar = exp(m^(-κ) * log(ϵ) + (1 - m^(-κ)) * log(ϵ_bar)) @@ -94,7 +94,7 @@ update_pre_cond(wum::WarmUpManager, θ_new) = begin end if (t - wum[:fast_start]) % (wum[:slow_window_size] * 2^wum[:slow_window_counter]) == 0 - wum[:stds] = sqrt(wum[:vars]) + wum[:stds] = sqrt.(wum[:vars]) # wum[:stds] = wum[:stds] / min(wum[:stds]...) # old wum[:stds] = wum[:stds] / mean([wum[:stds]...]) wum[:slow_window_counter] += 1 diff --git a/test/ad.jl/ad1.jl b/test/ad.jl/ad1.jl index 5408ad36f..41dc77814 100644 --- a/test/ad.jl/ad1.jl +++ b/test/ad.jl/ad1.jl @@ -8,9 +8,9 @@ using Base.Test # Define model @model ad_test() = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - 1.5 ~ Normal(m, sqrt(s)) - 2.0 ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + 1.5 ~ Normal(m, sqrt.(s)) + 2.0 ~ Normal(m, sqrt.(s)) return s, m end # Turing.TURING[:modelex] @@ -34,8 +34,8 @@ function logp(x::Vector) s = x[2] # s = invlink(dist_s, s) m = x[1] - lik_dist = Normal(m, sqrt(s)) - lp = logpdf(dist_s, s, false) + logpdf(Normal(0,sqrt(s)), m, false) + lik_dist = Normal(m, sqrt.(s)) + lp = logpdf(dist_s, s, false) + logpdf(Normal(0,sqrt.(s)), m, false) lp += logpdf(lik_dist, 1.5) + logpdf(lik_dist, 2.0) lp end diff --git a/test/ad.jl/ad2.jl b/test/ad.jl/ad2.jl index 3481c8ee3..0db6ed6e3 100644 --- a/test/ad.jl/ad2.jl +++ b/test/ad.jl/ad2.jl @@ -5,9 +5,9 @@ using Base.Test # Define model @model ad_test2(xs) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - xs[1] ~ Normal(m, sqrt(s)) - xs[2] ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + xs[1] ~ Normal(m, sqrt.(s)) + xs[2] ~ Normal(m, sqrt.(s)) s, m end diff --git a/test/compiler.jl/forbid_global.jl b/test/compiler.jl/forbid_global.jl index 41338f4f7..8e69e2267 100644 --- a/test/compiler.jl/forbid_global.jl +++ b/test/compiler.jl/forbid_global.jl @@ -7,13 +7,13 @@ xs = [1.5 2.0] @model fggibbstest(xs) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - # xx ~ Normal(m, sqrt(s)) # this is illegal + m ~ Normal(0,sqrt.(s)) + # xx ~ Normal(m, sqrt.(s)) # this is illegal for i = 1:length(xs) - xs[i] ~ Normal(m, sqrt(s)) + xs[i] ~ Normal(m, sqrt.(s)) # for xx in xs - # xx ~ Normal(m, sqrt(s)) + # xx ~ Normal(m, sqrt.(s)) end s, m end diff --git a/test/compiler.jl/new_grammar.jl b/test/compiler.jl/new_grammar.jl index 65b466fce..f9da7c437 100644 --- a/test/compiler.jl/new_grammar.jl +++ b/test/compiler.jl/new_grammar.jl @@ -7,9 +7,9 @@ priors = 0 @model gauss(x) = begin priors = TArray{Float64}(2) priors[1] ~ InverseGamma(2,3) # s - priors[2] ~ Normal(0,sqrt(priors[1])) # m + priors[2] ~ Normal(0,sqrt.(priors[1])) # m for i in 1:length(x) - x[i] ~ Normal(priors[2], sqrt(priors[1])) + x[i] ~ Normal(priors[2], sqrt.(priors[1])) end priors end diff --git a/test/compiler.jl/noreturn.jl b/test/compiler.jl/noreturn.jl index 6f0dc3b29..3579ea9f5 100644 --- a/test/compiler.jl/noreturn.jl +++ b/test/compiler.jl/noreturn.jl @@ -7,9 +7,9 @@ xnoreturn = [1.5 2.0] @model noreturn(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) + m ~ Normal(0,sqrt.(s)) for i in 1:length(x) - x[i] ~ Normal(m, sqrt(s)) + x[i] ~ Normal(m, sqrt.(s)) end end diff --git a/test/compiler.jl/sample.jl b/test/compiler.jl/sample.jl index e48c42560..e1ec99a60 100644 --- a/test/compiler.jl/sample.jl +++ b/test/compiler.jl/sample.jl @@ -3,9 +3,9 @@ using Turing @model gdemo2(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) + m ~ Normal(0,sqrt.(s)) for i = 1:length(x) - x[i] ~ Normal(m, sqrt(s)) + x[i] ~ Normal(m, sqrt.(s)) end return(s, m, x) end diff --git a/test/geweke.jl b/test/geweke.jl index 155122aac..4890fb207 100644 --- a/test/geweke.jl +++ b/test/geweke.jl @@ -15,17 +15,17 @@ NSamples = 5000 @model gdemo_fw() = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - y ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) + m ~ Normal(0,sqrt.(s)) + y ~ MvNormal([m; m; m], [sqrt.(s) 0 0; 0 sqrt.(s) 0; 0 0 sqrt.(s)]) end @model gdemo_bk(x) = begin # Backward Step 1: theta ~ theta | x s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - x ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) + m ~ Normal(0,sqrt.(s)) + x ~ MvNormal([m; m; m], [sqrt.(s) 0 0; 0 sqrt.(s) 0; 0 0 sqrt.(s)]) # Backward Step 2: x ~ x | theta - y ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) + y ~ MvNormal([m; m; m], [sqrt.(s) 0 0; 0 sqrt.(s) 0; 0 0 sqrt.(s)]) end fw = HMCDA(NSamples, 0.9, 0.1) diff --git a/test/gibbs.jl/gibbs.jl b/test/gibbs.jl/gibbs.jl index b6e673999..d4706e9a2 100644 --- a/test/gibbs.jl/gibbs.jl +++ b/test/gibbs.jl/gibbs.jl @@ -6,9 +6,9 @@ x = [1.5 2.0] @model gibbstest(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) + m ~ Normal(0,sqrt.(s)) for i in 1:length(x) - x[i] ~ Normal(m, sqrt(s)) + x[i] ~ Normal(m, sqrt.(s)) end s, m end diff --git a/test/gibbs.jl/gibbs_constructor.jl b/test/gibbs.jl/gibbs_constructor.jl index 072ff1044..4467c1785 100644 --- a/test/gibbs.jl/gibbs_constructor.jl +++ b/test/gibbs.jl/gibbs_constructor.jl @@ -3,9 +3,9 @@ using Base.Test @model gdemo() = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - 1.5 ~ Normal(m, sqrt(s)) - 2.0 ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + 1.5 ~ Normal(m, sqrt.(s)) + 2.0 ~ Normal(m, sqrt.(s)) return s, m end diff --git a/test/hmc.jl/multivariate_support.jl b/test/hmc.jl/multivariate_support.jl index 6daf81050..9e10cb350 100644 --- a/test/hmc.jl/multivariate_support.jl +++ b/test/hmc.jl/multivariate_support.jl @@ -4,7 +4,7 @@ using Turing, Distributions # Define helper functions function sigmoid(t) - return 1 / (1 + e^(-t)) + return 1 / (1 + exp(-t)) end # Define NN flow @@ -29,7 +29,7 @@ ts = [ones(M); ones(M); zeros(M); zeros(M)] # Define model alpha = 0.16 # regularizatin term -var = sqrt(1.0 / alpha) # variance of the Gaussian prior +var = sqrt.(1.0 / alpha) # variance of the Gaussian prior @model bnn(ts) = begin b1 ~ MvNormal([0 ;0; 0], [var 0 0; 0 var 0; 0 0 var]) diff --git a/test/hmcda.jl/hmcda.jl b/test/hmcda.jl/hmcda.jl index b9cc2ff75..0a1da5eea 100644 --- a/test/hmcda.jl/hmcda.jl +++ b/test/hmcda.jl/hmcda.jl @@ -9,9 +9,9 @@ alg3 = Gibbs(1500, PG(30, 10, :s), HMCDA(1, 500, 0.65, 0.05, :m)) @model gdemo(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - x[1] ~ Normal(m, sqrt(s)) - x[2] ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + x[1] ~ Normal(m, sqrt.(s)) + x[2] ~ Normal(m, sqrt.(s)) return s, m end diff --git a/test/hmcda.jl/hmcda_geweke.jl b/test/hmcda.jl/hmcda_geweke.jl index 3360a6d71..6a0686441 100644 --- a/test/hmcda.jl/hmcda_geweke.jl +++ b/test/hmcda.jl/hmcda_geweke.jl @@ -16,18 +16,18 @@ NSamples = 5000 @model gdemo_fw() = begin # s ~ InverseGamma(2,3) s = 1 - m ~ Normal(0,sqrt(s)) - y ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) + m ~ Normal(0,sqrt.(s)) + y ~ MvNormal([m; m; m], [sqrt.(s) 0 0; 0 sqrt.(s) 0; 0 0 sqrt.(s)]) end @model gdemo_bk(x) = begin # Backward Step 1: theta ~ theta | x # s ~ InverseGamma(2,3) s = 1 - m ~ Normal(0,sqrt(s)) - x ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) + m ~ Normal(0,sqrt.(s)) + x ~ MvNormal([m; m; m], [sqrt.(s) 0 0; 0 sqrt.(s) 0; 0 0 sqrt.(s)]) # Backward Step 2: x ~ x | theta - y ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) + y ~ MvNormal([m; m; m], [sqrt.(s) 0 0; 0 sqrt.(s) 0; 0 0 sqrt.(s)]) end fw = PG(50, NSamples) diff --git a/test/io.jl/save_resume_chain.jl b/test/io.jl/save_resume_chain.jl index fd1ced5a2..99e4645b9 100644 --- a/test/io.jl/save_resume_chain.jl +++ b/test/io.jl/save_resume_chain.jl @@ -9,9 +9,9 @@ alg3 = Gibbs(500, PG(30, 10, :s), HMCDA(1, 500, 0.65, 0.05, :m)) @model gdemo(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - x[1] ~ Normal(m, sqrt(s)) - x[2] ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + x[1] ~ Normal(m, sqrt.(s)) + x[2] ~ Normal(m, sqrt.(s)) return s, m end diff --git a/test/nuts.jl/nuts.jl b/test/nuts.jl/nuts.jl index 4f1086ca2..dc57c9eab 100644 --- a/test/nuts.jl/nuts.jl +++ b/test/nuts.jl/nuts.jl @@ -5,9 +5,9 @@ using Mamba: describe @model gdemo(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - x[1] ~ Normal(m, sqrt(s)) - x[2] ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + x[1] ~ Normal(m, sqrt.(s)) + x[2] ~ Normal(m, sqrt.(s)) return s, m end diff --git a/test/nuts.jl/nuts_geweke.jl b/test/nuts.jl/nuts_geweke.jl index 6c50a92d0..ff3b8772e 100644 --- a/test/nuts.jl/nuts_geweke.jl +++ b/test/nuts.jl/nuts_geweke.jl @@ -16,18 +16,18 @@ NSamples = 5000 @model gdemo_fw() = begin s ~ InverseGamma(2,3) # s = 1 - m ~ Normal(0,sqrt(s)) - y ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) + m ~ Normal(0,sqrt.(s)) + y ~ MvNormal([m; m; m], [sqrt.(s) 0 0; 0 sqrt.(s) 0; 0 0 sqrt.(s)]) end @model gdemo_bk(x) = begin # Backward Step 1: theta ~ theta | x s ~ InverseGamma(2,3) # s = 1 - m ~ Normal(0,sqrt(s)) - x ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) + m ~ Normal(0,sqrt.(s)) + x ~ MvNormal([m; m; m], [sqrt.(s) 0 0; 0 sqrt.(s) 0; 0 0 sqrt.(s)]) # Backward Step 2: x ~ x | theta - y ~ MvNormal([m; m; m], [sqrt(s) 0 0; 0 sqrt(s) 0; 0 0 sqrt(s)]) + y ~ MvNormal([m; m; m], [sqrt.(s) 0 0; 0 sqrt.(s) 0; 0 0 sqrt.(s)]) end fw = IS(NSamples) diff --git a/test/pmmh.jl/pmmh.jl b/test/pmmh.jl/pmmh.jl index 866bcc566..5a7019118 100644 --- a/test/pmmh.jl/pmmh.jl +++ b/test/pmmh.jl/pmmh.jl @@ -9,15 +9,15 @@ x = [1.5 2.0] @model pmmhtest(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) + m ~ Normal(0,sqrt.(s)) for i in 1:length(x) - x[i] ~ Normal(m, sqrt(s)) + x[i] ~ Normal(m, sqrt.(s)) end s, m end check_numerical( - sample(pmmhtest(x), PMMH(100, SMC(30, :m), (:s, (s) -> Normal(s, sqrt(10))))), + sample(pmmhtest(x), PMMH(100, SMC(30, :m), (:s, (s) -> Normal(s, sqrt.(10))))), [:s, :m], [49/24, 7/6] ) diff --git a/test/pmmh.jl/pmmh_cons.jl b/test/pmmh.jl/pmmh_cons.jl index 06035f405..a21427f2c 100644 --- a/test/pmmh.jl/pmmh_cons.jl +++ b/test/pmmh.jl/pmmh_cons.jl @@ -3,9 +3,9 @@ using Base.Test @model gdemo() = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - 1.5 ~ Normal(m, sqrt(s)) - 2.0 ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + 1.5 ~ Normal(m, sqrt.(s)) + 2.0 ~ Normal(m, sqrt.(s)) return s, m end diff --git a/test/sampler.jl/vec_assume.jl b/test/sampler.jl/vec_assume.jl index 1cf60e96e..5817dec59 100644 --- a/test/sampler.jl/vec_assume.jl +++ b/test/sampler.jl/vec_assume.jl @@ -9,7 +9,7 @@ alg = HMC(1000, 0.2, 4) @model vdemo() = begin x = Vector{Real}(N) for i = 1:N - x[i] ~ Normal(0, sqrt(4)) + x[i] ~ Normal(0, sqrt.(4)) end end diff --git a/test/sampler.jl/vectorize_observe.jl b/test/sampler.jl/vectorize_observe.jl index 4246679dc..f7c4e4baf 100644 --- a/test/sampler.jl/vectorize_observe.jl +++ b/test/sampler.jl/vectorize_observe.jl @@ -37,10 +37,10 @@ using Distributions, Turing # Test for vectorize UnivariateDistribution @model vdemo(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - x ~ [Normal(m, sqrt(s))] + m ~ Normal(0,sqrt.(s)) + x ~ [Normal(m, sqrt.(s))] # for i = 1:length(x) - # x[i] ~ Normal(m, sqrt(s)) + # x[i] ~ Normal(m, sqrt.(s)) # end return s, m end diff --git a/test/sghmc.jl/sghmc.jl b/test/sghmc.jl/sghmc.jl index 4d9a1dd41..29b9a024d 100644 --- a/test/sghmc.jl/sghmc.jl +++ b/test/sghmc.jl/sghmc.jl @@ -7,9 +7,9 @@ alg1 = SGHMC(3000, 0.01, 0.5) @model gdemo(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - x[1] ~ Normal(m, sqrt(s)) - x[2] ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + x[1] ~ Normal(m, sqrt.(s)) + x[2] ~ Normal(m, sqrt.(s)) return s, m end diff --git a/test/sgld.jl/sgld.jl b/test/sgld.jl/sgld.jl index 35023bd03..7e403c4b1 100644 --- a/test/sgld.jl/sgld.jl +++ b/test/sgld.jl/sgld.jl @@ -5,9 +5,9 @@ srand(125) @model gdemo(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - x[1] ~ Normal(m, sqrt(s)) - x[2] ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + x[1] ~ Normal(m, sqrt.(s)) + x[2] ~ Normal(m, sqrt.(s)) return s, m end diff --git a/test/stan-interface.jl/stan-interface.jl b/test/stan-interface.jl/stan-interface.jl index 3e409e6dc..0c0e9e794 100644 --- a/test/stan-interface.jl/stan-interface.jl +++ b/test/stan-interface.jl/stan-interface.jl @@ -2,9 +2,9 @@ using Turing, Stan @model gdemo(x) = begin s ~ InverseGamma(2,3) - m ~ Normal(0,sqrt(s)) - x[1] ~ Normal(m, sqrt(s)) - x[2] ~ Normal(m, sqrt(s)) + m ~ Normal(0,sqrt.(s)) + x[1] ~ Normal(m, sqrt.(s)) + x[2] ~ Normal(m, sqrt.(s)) return s, m end diff --git a/test/varinfo.jl/replay.jl b/test/varinfo.jl/replay.jl index b12f059f1..bff17e827 100644 --- a/test/varinfo.jl/replay.jl +++ b/test/varinfo.jl/replay.jl @@ -10,11 +10,11 @@ xs = rand(Normal(0.5, 1), 100) @model priorsinarray(xs) = begin priors = Vector{Real}(2) priors[1] ~ InverseGamma(2, 3) - priors[2] ~ Normal(0, sqrt(priors[1])) + priors[2] ~ Normal(0, sqrt.(priors[1])) for i = 1:length(xs) - xs[i] ~ Normal(priors[2], sqrt(priors[1])) + xs[i] ~ Normal(priors[2], sqrt.(priors[1])) # for x in xs - # x ~ Normal(priors[2], sqrt(priors[1])) + # x ~ Normal(priors[2], sqrt.(priors[1])) end priors end From 80b4db64045e69f71b12410836b205756cfb2406 Mon Sep 17 00:00:00 2001 From: Hong Ge Date: Thu, 5 Oct 2017 11:01:47 +0100 Subject: [PATCH 10/27] Update .travis.yml --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 27c8b2f9c..a36441a3f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,7 +8,7 @@ addons: - g++-5 language: julia julia: - - 0.5 + - 0.6 os: - linux - osx From ac74f77e5222ffa9a98c8ece2a8c268e511992de Mon Sep 17 00:00:00 2001 From: Hong Ge Date: Thu, 5 Oct 2017 11:17:09 +0100 Subject: [PATCH 11/27] Update appveyor.yml --- appveyor.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 9b7618a4c..768d4086f 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,11 +1,11 @@ environment: matrix: - - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x64/0.5/julia-0.5.0-win64.exe" + - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x64/0.5/julia-0.6.0-win64.exe" MINGW_DIR: mingw64 # MINGW_URL: https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Personal%20Builds/mingw-builds/5.3.0/threads-win32/seh/x86_64-5.3.0-release-win32-seh-rt_v4-rev0.7z/download MINGW_URL: http://mlg.eng.cam.ac.uk/hong/x86_64-5.3.0-release-win32-seh-rt_v4-rev0.7z MINGW_ARCHIVE: x86_64-5.3.0-release-win32-seh-rt_v4-rev0.7z - - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x86/0.5/julia-0.5.0-win32.exe" + - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x86/0.5/julia-0.6.0-win32.exe" MINGW_DIR: mingw32 # MINGW_URL: https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Personal%20Builds/mingw-builds/5.3.0/threads-win32/dwarf/i686-5.3.0-release-win32-dwarf-rt_v4-rev0.7z/download MINGW_URL: http://mlg.eng.cam.ac.uk/hong/i686-5.3.0-release-win32-dwarf-rt_v4-rev0.7z From 06520fcdf5923fd35620210bb4c9376375b7a359 Mon Sep 17 00:00:00 2001 From: Hong Ge Date: Thu, 5 Oct 2017 11:22:06 +0100 Subject: [PATCH 12/27] Update appveyor.yml --- appveyor.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 768d4086f..1e57c821a 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,11 +1,11 @@ environment: matrix: - - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x64/0.5/julia-0.6.0-win64.exe" + - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x64/0.6/julia-0.6.0-win64.exe" MINGW_DIR: mingw64 # MINGW_URL: https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Personal%20Builds/mingw-builds/5.3.0/threads-win32/seh/x86_64-5.3.0-release-win32-seh-rt_v4-rev0.7z/download MINGW_URL: http://mlg.eng.cam.ac.uk/hong/x86_64-5.3.0-release-win32-seh-rt_v4-rev0.7z MINGW_ARCHIVE: x86_64-5.3.0-release-win32-seh-rt_v4-rev0.7z - - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x86/0.5/julia-0.6.0-win32.exe" + - JULIA_URL: "https://julialang-s3.julialang.org/bin/winnt/x86/0.6/julia-0.6.0-win32.exe" MINGW_DIR: mingw32 # MINGW_URL: https://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Personal%20Builds/mingw-builds/5.3.0/threads-win32/dwarf/i686-5.3.0-release-win32-dwarf-rt_v4-rev0.7z/download MINGW_URL: http://mlg.eng.cam.ac.uk/hong/i686-5.3.0-release-win32-dwarf-rt_v4-rev0.7z From 1d8139f3136c4c43c1c57f226e50d4d6e38e245e Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Thu, 5 Oct 2017 15:05:58 +0100 Subject: [PATCH 13/27] fix test --- test/ad.jl/ad1.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/ad.jl/ad1.jl b/test/ad.jl/ad1.jl index 41dc77814..62ae1978a 100644 --- a/test/ad.jl/ad1.jl +++ b/test/ad.jl/ad1.jl @@ -17,7 +17,7 @@ end # Call Turing's AD # The result out is the gradient information on R ad_test_f = ad_test() -vi = ad_test_f() +vi = ad_test_f(Turing.VarInfo(), nothing) svn = collect(filter(vn -> vn.sym == :s, keys(vi)))[1] mvn = collect(filter(vn -> vn.sym == :m, keys(vi)))[1] _s = realpart(getval(vi, svn)[1]) From 8c74ec658337bca33f248d8f685859536d643758 Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Mon, 9 Oct 2017 10:31:00 +0100 Subject: [PATCH 14/27] Deprecations on package loading fixed --- src/core/compiler.jl | 8 ++++---- src/core/container.jl | 2 +- src/samplers/pgibbs.jl | 2 +- src/trace/tarray.jl | 2 +- src/trace/trace.jl | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/core/compiler.jl b/src/core/compiler.jl index ef596dd68..4a9016a2e 100644 --- a/src/core/compiler.jl +++ b/src/core/compiler.jl @@ -7,8 +7,8 @@ macro VarName(ex::Union{Expr, Symbol}) # return: (:x,[1,2],6,45,3) s = string(gensym()) if isa(ex, Symbol) - _ = string(ex) - return :(Symbol($_), Symbol($s)) + ex_str = string(ex) + return :(Symbol($ex_str), Symbol($s)) elseif ex.head == :ref _2 = ex _1 = "" @@ -301,7 +301,7 @@ macro model(fexpr) if _k != nothing _k_str = string(_k) dprintln(1, _k_str, " = ", _k) - _ = quote + data_check_ex = quote if haskey(data, keytype(data)($_k_str)) if nothing != $_k Turing.dwarn(0, " parameter "*$_k_str*" found twice, value in data dictionary will be used.") @@ -311,7 +311,7 @@ macro model(fexpr) data[keytype(data)($_k_str)] == nothing && Turing.derror(0, "Data `"*$_k_str*"` is not provided.") end end - unshift!(fdefn_outer.args[2].args, _) + unshift!(fdefn_outer.args[2].args, data_check_ex) end end unshift!(fdefn_outer.args[2].args, quote data = copy(data) end) diff --git a/src/core/container.jl b/src/core/container.jl index e948b5a72..d41fa21ea 100644 --- a/src/core/container.jl +++ b/src/core/container.jl @@ -16,7 +16,7 @@ type ParticleContainer{T<:Particle} # conditional :: Union{Void,Conditional} # storing parameters, helpful for implementing rejuvenation steps conditional :: Void # storing parameters, helpful for implementing rejuvenation steps n_consume :: Int # helpful for rejuvenation steps, e.g. in SMC2 - ParticleContainer(m::Function,n::Int) = new(m,n,Array{Particle,1}(),Array{Float64,1}(),0.0,nothing,0) + ParticleContainer{T}(m::Function,n::Int) where {T} = new(m,n,Array{Particle,1}(),Array{Float64,1}(),0.0,nothing,0) end (::Type{ParticleContainer{T}}){T}(m) = ParticleContainer{T}(m, 0) diff --git a/src/samplers/pgibbs.jl b/src/samplers/pgibbs.jl index f280888ea..9e5a2321c 100644 --- a/src/samplers/pgibbs.jl +++ b/src/samplers/pgibbs.jl @@ -39,7 +39,7 @@ immutable PG <: InferenceAlgorithm PG(alg::PG, new_gid::Int) = new(alg.n_particles, alg.n_iters, alg.resampler, alg.resampler_threshold, alg.space, new_gid) end -typealias CSMC PG # Conditional SMC +const CSMC = PG # type alias of PG as Conditional SMC Sampler(alg::PG) = begin info = Dict{Symbol, Any}() diff --git a/src/trace/tarray.jl b/src/trace/tarray.jl index c66eb74d6..cf9037a6b 100644 --- a/src/trace/tarray.jl +++ b/src/trace/tarray.jl @@ -25,7 +25,7 @@ Array(ta) # convert to 4-element Array{Int64,1}: [1, 2, 3, 4] """ immutable TArray{T,N} <: DenseArray{T,N} ref :: Symbol # object_id - TArray() = new(gensym()) + TArray{T,N}() where {T,N} = new(gensym()) end (::Type{TArray{T,1}}){T}(d::Integer) = TArray(T, d) diff --git a/src/trace/trace.jl b/src/trace/trace.jl index f329d110c..d8d7beb70 100644 --- a/src/trace/trace.jl +++ b/src/trace/trace.jl @@ -39,7 +39,7 @@ type Trace{T} task :: Task vi :: VarInfo spl :: Union{Void, Sampler} - Trace() = (res = new(); res.vi = VarInfo(); res.spl = nothing; res) + Trace{T}() where {T} = (res = new(); res.vi = VarInfo(); res.spl = nothing; res) end # NOTE: this function is called by `forkr` @@ -61,7 +61,7 @@ function (::Type{Trace{T}}){T}(f::Function, spl::Sampler, vi :: VarInfo) res.vi = deepcopy(vi) res.vi.index = 0 res.vi.num_produce = 0 - res.task = Task( () -> begin _=f(vi, spl); produce(Val{:done}); _; end ) + res.task = Task( () -> begin vi_new=f(vi, spl); produce(Val{:done}); vi_new; end ) if isa(res.task.storage, Void) res.task.storage = ObjectIdDict() end From d976b455bc86eeea2529cc5ef6b39e9499469092 Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Mon, 9 Oct 2017 12:07:21 +0100 Subject: [PATCH 15/27] fix deprecations --- example-models/nips-2017/gmm.model.jl | 2 +- example-models/nips-2017/sv.model.jl | 4 ++-- example-models/nips-2017/sv.sim.jl | 2 +- example-models/nuts-paper/lr_helper.jl | 2 +- example-models/nuts-paper/sv_nuts.jl | 2 +- example-models/sgld-paper/lr_helper.jl | 2 +- example-models/stan-models/normal-mixture.model.jl | 2 +- src/core/container.jl | 4 ++-- src/core/io.jl | 6 +++--- src/core/util.jl | 4 ++-- src/samplers/hmcda.jl | 2 +- src/samplers/is.jl | 2 +- src/samplers/nuts.jl | 2 +- src/samplers/pgibbs.jl | 4 ++-- src/samplers/pmmh.jl | 2 +- src/samplers/support/adapt.jl | 4 ++-- src/samplers/support/distributions.jl | 10 +++++----- src/samplers/support/hmc_core.jl | 4 ++-- src/samplers/support/transform.jl | 8 ++++---- test/ad.jl/ad1.jl | 6 +++--- test/ad.jl/ad3.jl | 4 ++-- test/ad.jl/pass_dual_to_dists.jl | 12 ++++++------ test/compiler.jl/assume.jl | 4 ++-- test/compiler.jl/explicit_ret.jl | 6 +++--- test/compiler.jl/opt_param_of_dist.jl | 2 +- test/geweke.jl | 4 ++-- test/hmc.jl/matrix_support.jl | 2 +- test/hmc.jl/multivariate_support.jl | 2 +- test/hmcda.jl/hmcda.jl | 4 ++-- test/is.jl/importance_sampling.jl | 2 +- test/pmmh.jl/pmmh2.jl | 12 ++++++------ test/resample.jl/resample.jl | 10 +++++----- test/sghmc.jl/sghmc.jl | 4 ++-- test/sgld.jl/sgld.jl | 4 ++-- test/trace.jl/trace.jl | 2 +- test/transform.jl/transform.jl | 12 ++++++------ test/util.jl/util.jl | 4 ++-- test/varinfo.jl/test_varname.jl | 4 ++-- 38 files changed, 84 insertions(+), 84 deletions(-) diff --git a/example-models/nips-2017/gmm.model.jl b/example-models/nips-2017/gmm.model.jl index 108cedf1e..9d9e4c67c 100644 --- a/example-models/nips-2017/gmm.model.jl +++ b/example-models/nips-2017/gmm.model.jl @@ -28,4 +28,4 @@ p = [ 0.2, 0.2, 0.2, 0.2, 0.2] # μ = [ 0, 1, 2, 3.5, 4.25] + 0.5 * collect(0:4) μ = [ 0, 1, 2, 3.5, 4.25] + 2.5 * collect(0:4) s = [-0.5, -1.5, -0.75, -2, -0.5] -σ = exp(s) +σ = exp.(s) diff --git a/example-models/nips-2017/sv.model.jl b/example-models/nips-2017/sv.model.jl index 88ad42c56..81336ec59 100644 --- a/example-models/nips-2017/sv.model.jl +++ b/example-models/nips-2017/sv.model.jl @@ -5,9 +5,9 @@ μ ~ Cauchy(0, 10) h = tzeros(Real, T) h[1] ~ Normal(μ, σ / sqrt(1 - ϕ^2)) - y[1] ~ Normal(0, exp(h[1] / 2)) + y[1] ~ Normal(0, exp.(h[1] / 2)) for t = 2:T h[t] ~ Normal(μ + ϕ * (h[t-1] - μ) , σ) - y[t] ~ Normal(0, exp(h[t] / 2)) + y[t] ~ Normal(0, exp.(h[t] / 2)) end end diff --git a/example-models/nips-2017/sv.sim.jl b/example-models/nips-2017/sv.sim.jl index 2ed36d70b..03ae841fb 100644 --- a/example-models/nips-2017/sv.sim.jl +++ b/example-models/nips-2017/sv.sim.jl @@ -16,7 +16,7 @@ for t in 2:T end y = Vector{Float64}(T); for t in 1:T - y[t] = rand(Normal(0, exp(h[t] / 2))); + y[t] = rand(Normal(0, exp.(h[t] / 2))); end diff --git a/example-models/nuts-paper/lr_helper.jl b/example-models/nuts-paper/lr_helper.jl index 7601c628b..c212cd7d7 100644 --- a/example-models/nuts-paper/lr_helper.jl +++ b/example-models/nuts-paper/lr_helper.jl @@ -7,7 +7,7 @@ readlrdata() = begin open("lr_nuts.data") do f while !eof(f) raw_line = readline(f) - data_str = filter(str -> length(str) > 0, split(raw_line, r"[ ]+")[1:end-1]) + data_str = Iterators.filter(str -> length(str) > 0, split(raw_line, r"[ ]+")[1:end-1]) data = map(str -> parse(str), data_str) x = cat(1, x, data[1:end-1]') y = cat(1, y, data[end] - 1) # turn {1, 2} to {0, 1} diff --git a/example-models/nuts-paper/sv_nuts.jl b/example-models/nuts-paper/sv_nuts.jl index fe58d03c5..896768b41 100644 --- a/example-models/nuts-paper/sv_nuts.jl +++ b/example-models/nuts-paper/sv_nuts.jl @@ -14,7 +14,7 @@ y = readsvdata() s[1] ~ Exponential(1/100) for i = 2:N s[i] ~ Normal(log(s[i-1]), τ) - s[i] = exp(s[i]) + s[i] = exp.(s[i]) dy = log(y[i] / y[i-1]) / s[i] dy ~ TDist(ν) end diff --git a/example-models/sgld-paper/lr_helper.jl b/example-models/sgld-paper/lr_helper.jl index 4fb921f88..abf57ede8 100644 --- a/example-models/sgld-paper/lr_helper.jl +++ b/example-models/sgld-paper/lr_helper.jl @@ -8,7 +8,7 @@ readlrdata() = begin open("a9a2000.data") do f while !eof(f) raw_line = readline(f) - data_str = filter(str -> length(str) > 0, split(raw_line, r"[ ]+")[1:end-1]) + data_str = Iterators.filter(str -> length(str) > 0, split(raw_line, r"[ ]+")[1:end-1]) data = map(str -> parse(str), data_str) x_tmp = zeros(Int32, d) x_tmp[data[2:end]] = 1 diff --git a/example-models/stan-models/normal-mixture.model.jl b/example-models/stan-models/normal-mixture.model.jl index e2a8d88d2..1bf7b2419 100644 --- a/example-models/stan-models/normal-mixture.model.jl +++ b/example-models/stan-models/normal-mixture.model.jl @@ -25,6 +25,6 @@ using ForwardDiff: Dual # logtheta_p = map(yᵢ -> [log(theta) + logpdf(Normal(mu[1], 1.0), yᵢ), log(1 - theta) + logpdf(Normal(mu[2], 1.0), yᵢ)], y) # map!(logtheta_pᵢ -> logtheta_pᵢ - logsumexp(logtheta_pᵢ), logtheta_p) # normalization # for i = 1:N - # k[i] ~ Categorical(exp(logtheta_p[i])) + # k[i] ~ Categorical(exp.(logtheta_p[i])) # end end diff --git a/src/core/container.jl b/src/core/container.jl index d41fa21ea..b3704e9c6 100644 --- a/src/core/container.jl +++ b/src/core/container.jl @@ -127,7 +127,7 @@ end function weights(pc :: ParticleContainer) @assert pc.num_particles == length(pc) logWs = pc.logWs - Ws = exp(logWs-maximum(logWs)) + Ws = exp.(logWs-maximum(logWs)) logZ = log(sum(Ws)) + maximum(logWs) Ws = Ws ./ sum(Ws) return Ws, logZ @@ -196,5 +196,5 @@ getsample(pc :: ParticleContainer) = begin w = pc.logE Ws, z = weights(pc) s = map((i)->getsample(pc, i, Ws[i]), 1:length(pc)) - return exp(w), s + return exp.(w), s end diff --git a/src/core/io.jl b/src/core/io.jl index 5466c4753..41bb49c55 100644 --- a/src/core/io.jl +++ b/src/core/io.jl @@ -17,14 +17,14 @@ getjuliatype(s::Sample, v::Symbol, cached_syms=nothing) = begin # NOTE: cached_syms is used to cache the filter entiries in svalue. This is helpful when the dimension of model is huge. if cached_syms == nothing # Get all keys associated with the given symbol - syms = collect(filter(k -> search(string(k), string(v)*"[") != 0:-1, keys(s.value))) + syms = collect(Iterators.filter(k -> search(string(k), string(v)*"[") != 0:-1, keys(s.value))) else - syms = filter(k -> search(string(k), string(v)) != 0:-1, cached_syms) + syms = collect((Iterators.filter(k -> search(string(k), string(v)) != 0:-1, cached_syms))) end # Map to the corresponding indices part idx_str = map(sym -> replace(string(sym), string(v), ""), syms) # Get the indexing component - idx_comp = map(idx -> filter(str -> str != "", split(string(idx), [']','['])), idx_str) + idx_comp = map(idx -> collect(Iterators.filter(str -> str != "", split(string(idx), [']','[']))), idx_str) # Deal with v is really a symbol, e.g. :x if length(idx_comp) == 0 diff --git a/src/core/util.jl b/src/core/util.jl index 522146ed5..022f3db00 100644 --- a/src/core/util.jl +++ b/src/core/util.jl @@ -2,7 +2,7 @@ # Math # ######## -@inline invlogit{T<:Real}(x::Union{T,Vector{T},Matrix{T}}) = one(T) ./ (one(T) + exp(-x)) +@inline invlogit{T<:Real}(x::Union{T,Vector{T},Matrix{T}}) = one(T) ./ (one(T) + exp.(-x)) @inline logit{T<:Real}(x::Union{T,Vector{T},Matrix{T}}) = log(x ./ (one(T) - x)) # More stable, faster version of rand(Categorical) @@ -22,7 +22,7 @@ type NotImplementedException <: Exception end # Numerically stable sum of values represented in log domain. logsum{T<:Real}(xs::Vector{T}) = begin largest = maximum(xs) - ys = map(x -> exp(x - largest), xs) + ys = map(x -> exp.(x - largest), xs) log(sum(ys)) + largest end diff --git a/src/samplers/hmcda.jl b/src/samplers/hmcda.jl index c8531b6ad..f7adf8855 100644 --- a/src/samplers/hmcda.jl +++ b/src/samplers/hmcda.jl @@ -96,7 +96,7 @@ function step(model, spl::Sampler{HMCDA}, vi::VarInfo, is_first::Bool) H = τ_valid == 0 ? Inf : find_H(p, model, vi, spl) dprintln(2, "computing accept rate α...") - α = min(1, exp(-(H - old_H))) + α = min(1, exp.(-(H - old_H))) if PROGRESS && spl.alg.gid == 0 stds_str = string(spl.info[:wum][:stds]) diff --git a/src/samplers/is.jl b/src/samplers/is.jl index 2baa381d3..7d2793b72 100644 --- a/src/samplers/is.jl +++ b/src/samplers/is.jl @@ -47,7 +47,7 @@ sample(model::Function, alg::IS) = begin le = logsum(map(x->x[:lp], samples)) - log(n) - Chain(exp(le), samples) + Chain(exp.(le), samples) end assume(spl::Sampler{IS}, dist::Distribution, vn::VarName, vi::VarInfo) = begin diff --git a/src/samplers/nuts.jl b/src/samplers/nuts.jl index 59c71e571..9a76e5602 100644 --- a/src/samplers/nuts.jl +++ b/src/samplers/nuts.jl @@ -147,7 +147,7 @@ function build_tree(θ::Union{Vector,SubArray}, r::Vector, logu::Float64, v::Int H′ = τ_valid == 0 ? Inf : find_H(r′, model, vi, spl) n′ = (logu <= -H′) ? 1 : 0 s′ = (logu < Δ_max + -H′) ? 1 : 0 - α′ = exp(min(0, -H′ - (-H0))) + α′ = exp.(min(0, -H′ - (-H0))) θ′, r′, θ′, r′, θ′, getlogp(vi), n′, s′, α′, 1 else diff --git a/src/samplers/pgibbs.jl b/src/samplers/pgibbs.jl index 9e5a2321c..862269220 100644 --- a/src/samplers/pgibbs.jl +++ b/src/samplers/pgibbs.jl @@ -127,13 +127,13 @@ sample(model::Function, alg::PG; println("[PG] Finished with") println(" Running time = $time_total;") - loge = exp(mean(spl.info[:logevidence])) + loge = exp.(mean(spl.info[:logevidence])) if resume_from != nothing # concat samples unshift!(samples, resume_from.value2...) pre_loge = resume_from.weight # Calculate new log-evidence pre_n = length(resume_from.value2) - loge = exp((log(pre_loge) * pre_n + log(loge) * n) / (pre_n + n)) + loge = exp.((log(pre_loge) * pre_n + log(loge) * n) / (pre_n + n)) end c = Chain(loge, samples) # wrap the result by Chain diff --git a/src/samplers/pmmh.jl b/src/samplers/pmmh.jl index 0b58ed008..47026caaa 100644 --- a/src/samplers/pmmh.jl +++ b/src/samplers/pmmh.jl @@ -89,7 +89,7 @@ step(model::Function, spl::Sampler{PMMH}, vi::VarInfo, is_first::Bool) = begin # Compute marginal likehood unbiased estimator log_Ws = particles.logWs - Ws_sum_prev # particles.logWs is the running sum over time Ws_sum_prev = copy(particles.logWs) - relative_Ws = exp(log_Ws-maximum(log_Ws)) + relative_Ws = exp.(log_Ws-maximum(log_Ws)) logZs = log(sum(relative_Ws)) + maximum(log_Ws) new_likelihood_estimator += logZs diff --git a/src/samplers/support/adapt.jl b/src/samplers/support/adapt.jl index 214f80579..577828aa2 100644 --- a/src/samplers/support/adapt.jl +++ b/src/samplers/support/adapt.jl @@ -58,10 +58,10 @@ adapt_step_size(wum::WarmUpManager, stats::Float64) = begin μ = wum[:μ]; ϵ_bar = wum[:ϵ_bar]; H_bar = wum[:H_bar] H_bar = (1 - 1 / (m + t_0)) * H_bar + 1 / (m + t_0) * (δ - stats) - ϵ = exp(μ - sqrt.(m) / γ * H_bar) + ϵ = exp.(μ - sqrt.(m) / γ * H_bar) dprintln(1, " ϵ = $ϵ, stats = $stats") - ϵ_bar = exp(m^(-κ) * log(ϵ) + (1 - m^(-κ)) * log(ϵ_bar)) + ϵ_bar = exp.(m^(-κ) * log(ϵ) + (1 - m^(-κ)) * log(ϵ_bar)) push!(wum[:ϵ], ϵ) wum[:ϵ_bar], wum[:H_bar] = ϵ_bar, H_bar diff --git a/src/samplers/support/distributions.jl b/src/samplers/support/distributions.jl index b14a50e0c..04fc95292 100644 --- a/src/samplers/support/distributions.jl +++ b/src/samplers/support/distributions.jl @@ -62,9 +62,9 @@ function Distributions._mixlogpdf1(d::AbstractMixtureModel, x) # using the formula below for numerical stability # # logpdf(d, x) = log(sum_i pri[i] * pdf(cs[i], x)) - # = log(sum_i pri[i] * exp(logpdf(cs[i], x))) - # = log(sum_i exp(logpri[i] + logpdf(cs[i], x))) - # = m + log(sum_i exp(logpri[i] + logpdf(cs[i], x) - m)) + # = log(sum_i pri[i] * exp.(logpdf(cs[i], x))) + # = log(sum_i exp.(logpri[i] + logpdf(cs[i], x))) + # = m + log(sum_i exp.(logpri[i] + logpdf(cs[i], x) - m)) # # m is chosen to be the maximum of logpri[i] + logpdf(cs[i], x) # such that the argument of exp is in a reasonable range @@ -90,7 +90,7 @@ function Distributions._mixlogpdf1(d::AbstractMixtureModel, x) v = 0.0 @inbounds for i = 1:K if p[i] > 0.0 - v += exp(lp[i] - m) + v += exp.(lp[i] - m) end end return m + log(v) @@ -127,7 +127,7 @@ function Distributions._mixlogpdf!(r::AbstractArray, d::AbstractMixtureModel, x) if p[i] > 0.0 lp_i = view(Lp, :, i) for j = 1:n - r[j] += exp(lp_i[j] - m[j]) + r[j] += exp.(lp_i[j] - m[j]) end end end diff --git a/src/samplers/support/hmc_core.jl b/src/samplers/support/hmc_core.jl index 14681a053..ae5c608b8 100644 --- a/src/samplers/support/hmc_core.jl +++ b/src/samplers/support/hmc_core.jl @@ -62,7 +62,7 @@ end find_good_eps{T}(model::Function, vi::VarInfo, spl::Sampler{T}) = begin println("[Turing] looking for good initial eps...") ϵ, p = 1.0, sample_momentum(vi, spl) # set initial epsilon and momentums - log_p_r_Θ = -find_H(p, model, vi, spl) # calculate p(Θ, r) = exp(-H(Θ, r)) + log_p_r_Θ = -find_H(p, model, vi, spl) # calculate p(Θ, r) = exp.(-H(Θ, r)) θ = realpart(vi[spl]) θ_prime, p_prime, τ = leapfrog(θ, p, 1, ϵ, model, vi, spl) @@ -71,7 +71,7 @@ find_good_eps{T}(model::Function, vi::VarInfo, spl::Sampler{T}) = begin # Heuristically find optimal ϵ iter_num = 1 a = 2.0 * (log_p_r_Θ′ - log_p_r_Θ > log(0.5) ? 1 : 0) - 1 - while (exp(log_p_r_Θ′ - log_p_r_Θ))^a > 2.0^(-a) && iter_num <= 12 + while (exp.(log_p_r_Θ′ - log_p_r_Θ))^a > 2.0^(-a) && iter_num <= 12 ϵ = 2.0^a * ϵ θ_prime, p_prime, τ = leapfrog(θ, p, 1, ϵ, model, vi, spl) log_p_r_Θ′ = τ == 0 ? -Inf : -find_H(p_prime, model, vi, spl) diff --git a/src/samplers/support/transform.jl b/src/samplers/support/transform.jl index 3c8781679..9cbafff96 100644 --- a/src/samplers/support/transform.jl +++ b/src/samplers/support/transform.jl @@ -52,9 +52,9 @@ invlink{T<:Real}(d::TransformDistribution, x::Union{T,Vector{T}}) = begin if lowerbounded && upperbounded (b - a) .* invlogit(x) + a elseif lowerbounded - exp(x) + a + exp.(x) + a elseif upperbounded - b - exp(x) + b - exp.(x) else x end @@ -100,7 +100,7 @@ const PositiveDistribution = Union{BetaPrime, Chi, Chisq, Erlang, Exponential, F link{T<:Real}(d::PositiveDistribution, x::Union{T,Vector{T}}) = log(x) -invlink{T<:Real}(d::PositiveDistribution, x::Union{T,Vector{T}}) = exp(x) +invlink{T<:Real}(d::PositiveDistribution, x::Union{T,Vector{T}}) = exp.(x) logpdf_with_trans{T<:Real}(d::PositiveDistribution, x::Union{T,Vector{T}}, transform::Bool) = begin lp = logpdf(d, x) @@ -252,7 +252,7 @@ end invlink{T<:Real}(d::PDMatDistribution, z::Array{T,2}) = begin dim = size(z) for m in 1:dim[1] - z[m, m] = exp(z[m, m]) + z[m, m] = exp.(z[m, m]) end for m in 1:dim[1], n in m+1:dim[2] z[m, n] = zero(T) diff --git a/test/ad.jl/ad1.jl b/test/ad.jl/ad1.jl index 62ae1978a..b1f05bd07 100644 --- a/test/ad.jl/ad1.jl +++ b/test/ad.jl/ad1.jl @@ -18,8 +18,8 @@ end # The result out is the gradient information on R ad_test_f = ad_test() vi = ad_test_f(Turing.VarInfo(), nothing) -svn = collect(filter(vn -> vn.sym == :s, keys(vi)))[1] -mvn = collect(filter(vn -> vn.sym == :m, keys(vi)))[1] +svn = collect(Iterators.filter(vn -> vn.sym == :s, keys(vi)))[1] +mvn = collect(Iterators.filter(vn -> vn.sym == :m, keys(vi)))[1] _s = realpart(getval(vi, svn)[1]) _m = realpart(getval(vi, mvn)[1]) ∇E = gradient(vi, ad_test_f) @@ -47,4 +47,4 @@ _x = [_m, _s] grad_FWAD = sort(-g(_x)) # Compare result -@test_approx_eq_eps grad_Turing grad_FWAD 1e-9 +@test grad_Turing ≈ grad_FWAD atol=1e-9 diff --git a/test/ad.jl/ad3.jl b/test/ad.jl/ad3.jl index 3cf828808..4fa6af31c 100644 --- a/test/ad.jl/ad3.jl +++ b/test/ad.jl/ad3.jl @@ -16,7 +16,7 @@ end # The result out is the gradient information on R ad_test_3_f = ad_test_3() vi = ad_test_3_f() -vvn = collect(filter(vn -> vn.sym == :v, keys(vi)))[1] +vvn = collect(Iterators.filter(vn -> vn.sym == :v, keys(vi)))[1] _v = map(d -> realpart(d), vi[vvn]) grad_Turing = gradient(vi, ad_test_3_f) @@ -35,4 +35,4 @@ g = x -> ForwardDiff.gradient(logp3, vec(x)); grad_FWAD = -g(_v) # Compare result -@test_approx_eq_eps grad_Turing grad_FWAD 1e-9 +@test grad_Turing ≈ grad_FWAD atol=1e-9 diff --git a/test/ad.jl/pass_dual_to_dists.jl b/test/ad.jl/pass_dual_to_dists.jl index 2f63df649..c4eb5785d 100644 --- a/test/ad.jl/pass_dual_to_dists.jl +++ b/test/ad.jl/pass_dual_to_dists.jl @@ -10,10 +10,10 @@ float2 = 2.3 d1 = Dual(1.1) d2 = Dual(2.3) -@test_approx_eq_eps logpdf(Normal(0, 1), d1).value logpdf(Normal(0, 1), float1) 0.001 -@test_approx_eq_eps logpdf(Gamma(2, 3), d2).value logpdf(Gamma(2, 3), float2) 0.001 -@test_approx_eq_eps logpdf(Beta(2, 3), (d2 - d1) / 2).value logpdf(Beta(2, 3), (float2 - float1) / 2) 0.001 +@test logpdf(Normal(0, 1), d1).value ≈ logpdf(Normal(0, 1), float1) atol=0.001 +@test logpdf(Gamma(2, 3), d2).value ≈ logpdf(Gamma(2, 3), float2) atol=0.001 +@test logpdf(Beta(2, 3), (d2 - d1) / 2).value ≈ logpdf(Beta(2, 3), (float2 - float1) / 2) atol=0.001 -@test_approx_eq_eps pdf(Normal(0, 1), d1).value pdf(Normal(0, 1), float1) 0.001 -@test_approx_eq_eps pdf(Gamma(2, 3), d2).value pdf(Gamma(2, 3), float2) 0.001 -@test_approx_eq_eps pdf(Beta(2, 3), (d2 - d1) / 2).value pdf(Beta(2, 3), (float2 - float1) / 2) 0.001 +@test pdf(Normal(0, 1), d1).value ≈ pdf(Normal(0, 1), float1) atol=0.001 +@test pdf(Gamma(2, 3), d2).value ≈ pdf(Gamma(2, 3), float2) atol=0.001 +@test pdf(Beta(2, 3), (d2 - d1) / 2).value ≈ pdf(Beta(2, 3), (float2 - float1) / 2) atol=0.001 diff --git a/test/compiler.jl/assume.jl b/test/compiler.jl/assume.jl index 4fa541be2..32f3139d0 100644 --- a/test/compiler.jl/assume.jl +++ b/test/compiler.jl/assume.jl @@ -16,9 +16,9 @@ pg = PG(10,1000) res = sample(test_assume(), smc) @test reduce(&, res[:x]) == 1 # check that x is always 1 -@test_approx_eq_eps mean(res[:y]) 0.5 0.1 # check that the mean of y is between 0.4 and 0.6 +@test mean(res[:y]) ≈ 0.5 atol=0.1 # check that the mean of y is between 0.4 and 0.6 res = sample(test_assume(), pg) @test reduce(&, res[:x]) == 1 # check that x is always 1 -@test_approx_eq_eps mean(res[:y]) 0.5 0.1 # check that the mean of y is between 0.4 and 0.6 +@test mean(res[:y]) ≈ 0.5 atol=0.1 # check that the mean of y is between 0.4 and 0.6 diff --git a/test/compiler.jl/explicit_ret.jl b/test/compiler.jl/explicit_ret.jl index bb4bda513..bf0eead24 100644 --- a/test/compiler.jl/explicit_ret.jl +++ b/test/compiler.jl/explicit_ret.jl @@ -15,7 +15,7 @@ mf = test_ex_rt() for alg = [HMC(2000, 0.2, 3), PG(20, 2000), SMC(10000), IS(10000), Gibbs(2000, PG(20, 1, :x), HMC(1, 0.2, 3, :y))] println("[explicit_ret.jl] testing $alg") chn = sample(mf, alg) - @test_approx_eq_eps mean(chn[:x]) 9.0 0.2 - @test_approx_eq_eps mean(chn[:y]) 5.0 0.2 - @test_approx_eq_eps mean(chn[:z]) 6.0 0.2 + @test mean(chn[:x]) ≈ 9.0 atol=0.2 + @test mean(chn[:y]) ≈ 5.0 atol=0.2 + @test mean(chn[:z]) ≈ 6.0 atol=0.2 end diff --git a/test/compiler.jl/opt_param_of_dist.jl b/test/compiler.jl/opt_param_of_dist.jl index c126f251e..1dbd198da 100644 --- a/test/compiler.jl/opt_param_of_dist.jl +++ b/test/compiler.jl/opt_param_of_dist.jl @@ -16,4 +16,4 @@ res = sample(testassume, s) @test reduce(&, res[:x]) == 1 # check that x is always 1 -@test_approx_eq_eps mean(res[:y]) 0.5 0.1 # check that the mean of y is between 0.4 and 0.6 +@test mean(res[:y]) ≈ 0.5 atol=0.1 # check that the mean of y is between 0.4 and 0.6 diff --git a/test/geweke.jl b/test/geweke.jl index 4890fb207..837ae22f9 100644 --- a/test/geweke.jl +++ b/test/geweke.jl @@ -64,7 +64,7 @@ qqm = qqbuild(s[:m], s2[:m]) X = qqm.qx y = qqm.qy slope = (1 / (transpose(X) * X)[1] * transpose(X) * y)[1] -@test_approx_eq_eps slope 1.0 0.1 +@test slope ≈ 1.0 atol=0.1 # NOTE: test for s is not stable # probably due to the transformation @@ -72,4 +72,4 @@ slope = (1 / (transpose(X) * X)[1] * transpose(X) * y)[1] # X = qqs.qx # y = qqs.qy # slope = (1 / (transpose(X) * X)[1] * transpose(X) * y)[1] -# @test_approx_eq_eps slope 1.0 0.1 +# @test slope ≈ 1.0 atol=0.1 diff --git a/test/hmc.jl/matrix_support.jl b/test/hmc.jl/matrix_support.jl index cb88c7d31..5275ecb09 100644 --- a/test/hmc.jl/matrix_support.jl +++ b/test/hmc.jl/matrix_support.jl @@ -15,4 +15,4 @@ for _ in 1:5 push!(vs, mean(chain[:v])) end -@test_approx_eq_eps mean(vs) (7 * [1 0.5; 0.5 1]) 0.5 +@test mean(vs) ≈ (7 * [1 0.5; 0.5 1]) atol=0.5 diff --git a/test/hmc.jl/multivariate_support.jl b/test/hmc.jl/multivariate_support.jl index 9e10cb350..5e31839ae 100644 --- a/test/hmc.jl/multivariate_support.jl +++ b/test/hmc.jl/multivariate_support.jl @@ -4,7 +4,7 @@ using Turing, Distributions # Define helper functions function sigmoid(t) - return 1 / (1 + exp(-t)) + return 1 / (1 + exp.(-t)) end # Define NN flow diff --git a/test/hmcda.jl/hmcda.jl b/test/hmcda.jl/hmcda.jl index 0a1da5eea..64c2cde17 100644 --- a/test/hmcda.jl/hmcda.jl +++ b/test/hmcda.jl/hmcda.jl @@ -21,8 +21,8 @@ check_numerical(res1, [:s, :m], [49/24, 7/6]) # res2 = sample(gdemo([1.5, 2.0]), alg2) # -# @test_approx_eq_eps mean(res2[:s]) 49/24 0.2 -# @test_approx_eq_eps mean(res2[:m]) 7/6 0.2 +# @test mean(res2[:s]) ≈ 49/24 atol=0.2 +# @test mean(res2[:m]) ≈ 7/6 atol=0.2 res3 = sample(gdemo([1.5, 2.0]), alg3) diff --git a/test/is.jl/importance_sampling.jl b/test/is.jl/importance_sampling.jl index 329dd9f2e..ea4b25245 100644 --- a/test/is.jl/importance_sampling.jl +++ b/test/is.jl/importance_sampling.jl @@ -8,7 +8,7 @@ using Base.Test function logsum(xs :: Vector{Float64}) largest = maximum(xs) - ys = map(x -> exp(x - largest), xs) + ys = map(x -> exp.(x - largest), xs) result = log(sum(ys)) + largest return result end diff --git a/test/pmmh.jl/pmmh2.jl b/test/pmmh.jl/pmmh2.jl index eb6fab5af..b8562c097 100644 --- a/test/pmmh.jl/pmmh2.jl +++ b/test/pmmh.jl/pmmh2.jl @@ -41,9 +41,9 @@ end gibbs = PMMH(500, SMC(10, :z1, :z2, :z3, :z4), :mu1, :mu2) chain = sample(MoGtest(D), gibbs) -@test_approx_eq_eps mean(chain[:z1]) 1.0 0.1 -@test_approx_eq_eps mean(chain[:z2]) 1.0 0.1 -@test_approx_eq_eps mean(chain[:z3]) 2.0 0.1 -@test_approx_eq_eps mean(chain[:z4]) 2.0 0.1 -@test_approx_eq_eps mean(chain[:mu1]) 1.0 0.1 -@test_approx_eq_eps mean(chain[:mu2]) 4.0 0.1 +@test mean(chain[:z1]) ≈ 1.0 atol=0.1 +@test mean(chain[:z2]) ≈ 1.0 atol=0.1 +@test mean(chain[:z3]) ≈ 2.0 atol=0.1 +@test mean(chain[:z4]) ≈ 2.0 atol=0.1 +@test mean(chain[:mu1]) ≈ 1.0 atol=0.1 +@test mean(chain[:mu2]) ≈ 4.0 atol=0.1 diff --git a/test/resample.jl/resample.jl b/test/resample.jl/resample.jl index 0f041c5dc..97775a4f3 100644 --- a/test/resample.jl/resample.jl +++ b/test/resample.jl/resample.jl @@ -13,8 +13,8 @@ resResidual = Turing.resampleResidual( [0.3, 0.4, 0.3], num_samples ) Turing.resample( [0.3, 0.4, 0.3]) resSystematic2=Turing.resample( [0.3, 0.4, 0.3], num_samples ) -@test_approx_eq_eps sum(resSystematic .== 2) (num_samples * 0.4) 1e-3*num_samples -@test_approx_eq_eps sum(resSystematic2 .== 2) (num_samples * 0.4) 1e-3*num_samples -@test_approx_eq_eps sum(resStratified .== 2) (num_samples * 0.4) 1e-3*num_samples -@test_approx_eq_eps sum(resMultinomial .== 2) (num_samples * 0.4) 1e-2*num_samples -@test_approx_eq_eps sum(resResidual .== 2) (num_samples * 0.4) 1e-2*num_samples +@test sum(resSystematic .== 2) ≈ (num_samples * 0.4) atol=1e-3*num_samples +@test sum(resSystematic2 .== 2) ≈ (num_samples * 0.4) atol=1e-3*num_samples +@test sum(resStratified .== 2) ≈ (num_samples * 0.4) atol=1e-3*num_samples +@test sum(resMultinomial .== 2) ≈ (num_samples * 0.4) atol=1e-2*num_samples +@test sum(resResidual .== 2) ≈ (num_samples * 0.4) atol=1e-2*num_samples diff --git a/test/sghmc.jl/sghmc.jl b/test/sghmc.jl/sghmc.jl index 29b9a024d..c6b5795c3 100644 --- a/test/sghmc.jl/sghmc.jl +++ b/test/sghmc.jl/sghmc.jl @@ -16,8 +16,8 @@ end res1 = sample(gdemo([1.5, 2.0]), alg1) println("E[s] = $(mean(res1[:s]))") println("E[m] = $(mean(res1[:m]))") -@test_approx_eq_eps mean(res1[:s]) 49/24 0.2 -@test_approx_eq_eps mean(res1[:m]) 7/6 0.2 +@test mean(res1[:s]) ≈ 49/24 atol=0.2 +@test mean(res1[:m]) ≈ 7/6 atol=0.2 res1 = sample(gdemo([1.5, 2.0]), HMC(3000, 0.2, 4)) println("HMC") diff --git a/test/sgld.jl/sgld.jl b/test/sgld.jl/sgld.jl index 7e403c4b1..54ca0d0de 100644 --- a/test/sgld.jl/sgld.jl +++ b/test/sgld.jl/sgld.jl @@ -17,8 +17,8 @@ s_res1weightedMean = sum(res1[:lf_eps] .* res1[:s]) / sum(res1[:lf_eps]) m_res1weightedMean = sum(res1[:lf_eps] .* res1[:m]) / sum(res1[:lf_eps]) println("E[s] = $(s_res1weightedMean)") println("E[m] = $(m_res1weightedMean)") -@test_approx_eq_eps s_res1weightedMean 49/24 0.2 -@test_approx_eq_eps m_res1weightedMean 7/6 0.2 +@test s_res1weightedMean ≈ 49/24 atol=0.2 +@test m_res1weightedMean ≈ 7/6 atol=0.2 res2 = sample(gdemo([1.5, 2.0]), HMC(3000, 0.2, 4)) println("HMC") diff --git a/test/trace.jl/trace.jl b/test/trace.jl/trace.jl index 52f931564..b54c53136 100644 --- a/test/trace.jl/trace.jl +++ b/test/trace.jl/trace.jl @@ -48,7 +48,7 @@ Base.@assert consume(t) == 2 Base.@assert consume(a) == 4 a2 = fork(t) -a2_vals = filter(x -> ~isnan.(x), a2.vi.vals[end]) +a2_vals = collect(Iterators.filter(x -> ~isnan.(x), a2.vi.vals[end])) Base.@assert length(a2_vals) == 5 Base.@assert t.vi.vals[end] == a2_vals Base.@assert t.vi.index == a2.vi.index diff --git a/test/transform.jl/transform.jl b/test/transform.jl/transform.jl index e7c05a8b5..0c8ced93f 100644 --- a/test/transform.jl/transform.jl +++ b/test/transform.jl/transform.jl @@ -8,16 +8,16 @@ for dist in dists x = rand(dist) # sample y = link(dist, x) # X -> R # Test if R -> X is equal to the original value - @test_approx_eq_eps invlink(dist, y) x 1e-9 + @test invlink(dist, y) ≈ x atol=1e-9 end -# julia> logpdf_with_trans(Dirichlet([1., 1., 1.]), exp([-1000., -1000., -1000.]), true) +# julia> logpdf_with_trans(Dirichlet([1., 1., 1.]), exp.([-1000., -1000., -1000.]), true) # NaN # julia> logpdf_with_trans(Dirichlet([1., 1., 1.]), [-1000., -1000., -1000.], true, true) # -1999.30685281944 # -# julia> logpdf_with_trans(Dirichlet([1., 1., 1.]), exp([-1., -2., -3.]), true) +# julia> logpdf_with_trans(Dirichlet([1., 1., 1.]), exp.([-1., -2., -3.]), true) # -3.006450206744678 # julia> logpdf_with_trans(Dirichlet([1., 1., 1.]), [-1., -2., -3.], true, true) # -3.006450206744678 @@ -25,6 +25,6 @@ d = Dirichlet([1., 1., 1.]) r = [-1000., -1000., -1000.] r2 = [-1., -2., -3.] -@test_approx_eq_eps invlink(d, r) [0., 0., 1.] 1e-9 -#@test_approx_eq_eps logpdf_with_trans(d, invlink(d, r), true) -1999.30685281944 1e-9 # NaN -@test_approx_eq_eps logpdf_with_trans(d, invlink(d, r2), true) -3.760398892580863 1e-9 +@test invlink(d, r) ≈ [0., 0., 1.] atol=1e-9 +#@test logpdf_with_trans(d, invlink(d, r), true) -1999.30685281944 1e-9 ≈ # atol=NaN +@test logpdf_with_trans(d, invlink(d, r2), true) ≈ -3.760398892580863 atol=1e-9 diff --git a/test/util.jl/util.jl b/test/util.jl/util.jl index 40038a29e..fac705285 100644 --- a/test/util.jl/util.jl +++ b/test/util.jl/util.jl @@ -6,8 +6,8 @@ using Base.Test i = 1 @test @VarName(s)[1:end-1] == (:s,) @test @VarName(x[1,2][1+5][45][3][i])[1:end-1] == (:x,([1,2],[6],[45],[3],[1])) -@test invlogit(1.1) == 1.0 / (exp(-1.1) + 1.0) -@test_approx_eq_eps logit(0.3) -0.8472978603872036 1e-9 +@test invlogit(1.1) == 1.0 / (exp.(-1.1) + 1.0) +@test logit(0.3) ≈ -0.8472978603872036 atol=1e-9 @test isnan.(logit(1.0)) == false @test isinf.(logit(1.0)) == true @test isnan.(logit(0.)) == false diff --git a/test/varinfo.jl/test_varname.jl b/test/varinfo.jl/test_varname.jl index 36b6e5dab..793552b9a 100644 --- a/test/varinfo.jl/test_varname.jl +++ b/test/varinfo.jl/test_varname.jl @@ -34,7 +34,7 @@ v_mat = eval(varname(:((x[1,2][1+5][45][3][i])))[1]) end chain = sample(mat_name_test(), HMC(1000, 0.2, 4)) -@test_approx_eq_eps mean(chain[Symbol("p[1, 1]")]) 0 0.25 +@test mean(chain[Symbol("p[1, 1]")]) ≈ 0 atol=0.25 # Multi array i, j = 1, 2 @@ -51,4 +51,4 @@ v_arrarr = eval(varname(:(x[i][j]))[1]) p end chain = sample(marr_name_test(), HMC(1000, 0.2, 4)) -@test_approx_eq_eps mean(chain[Symbol("p[1][1]")]) 0 0.25 +@test mean(chain[Symbol("p[1][1]")]) ≈ 0 atol=0.25 From aecd4e78234e59ada60b5c0bf1e1b55cb20be247 Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Tue, 10 Oct 2017 21:51:45 +0100 Subject: [PATCH 16/27] implement callbacks for inner function --- src/core/compiler.jl | 38 +++++++++++++++++++++++++++++++++----- 1 file changed, 33 insertions(+), 5 deletions(-) diff --git a/src/core/compiler.jl b/src/core/compiler.jl index 4a9016a2e..4f1ed51d6 100644 --- a/src/core/compiler.jl +++ b/src/core/compiler.jl @@ -236,7 +236,8 @@ macro model(fexpr) dprintln(1, fbody_inner) - fname_inner = Symbol("$(fname)_model") + fname_inner_str = "$(fname)_model" + fname_inner = Symbol(fname_inner_str) fdefn_inner = Expr(:(=), fname_inner, Expr(:function, Expr(:call, fname_inner))) # fdefn = :( $fname() ) # push!(fdefn_inner.args[2].args[1].args, fargs_inner...) # set parameters (x,y;data..) @@ -246,13 +247,20 @@ macro model(fexpr) push!(fdefn_inner.args[2].args, deepcopy(fbody_inner)) # set function definition dprintln(1, fdefn_inner) + + fdefn_inner_callback_1 = parse("$fname_inner_str(vi::Turing.VarInfo)=$fname_inner_str(vi,nothing)") + fdefn_inner_callback_2 = parse("$fname_inner_str(sampler::Turing.Sampler)=$fname_inner_str(Turing.VarInfo(),nothing)") + fdefn_inner_callback_3 = parse("$fname_inner_str()=$fname_inner_str(Turing.VarInfo(),nothing)") compiler = Dict(:fname => fname, :fargs => fargs, :fbody => fbody, :dvars => Set{Symbol}(), # data :pvars => Set{Symbol}(), # parameter - :fdefn_inner => fdefn_inner) + :fdefn_inner => fdefn_inner, + :fdefn_inner_callback_1 => fdefn_inner_callback_1, + :fdefn_inner_callback_2 => fdefn_inner_callback_2, + :fdefn_inner_callback_3 => fdefn_inner_callback_3) # Outer function defintion 1: f(x,y) ==> f(x,y;data=Dict()) fargs_outer = deepcopy(fargs) @@ -270,13 +278,33 @@ macro model(fexpr) fdefn_outer = Expr(:function, Expr(:call, fname, fargs_outer...), Expr(:block, Expr(:return, fname_inner))) + unshift!(fdefn_outer.args[2].args, :(Main.eval(fdefn_inner_callback_3))) + unshift!(fdefn_outer.args[2].args, :(Main.eval(fdefn_inner_callback_2))) + unshift!(fdefn_outer.args[2].args, :(Main.eval(fdefn_inner_callback_1))) unshift!(fdefn_outer.args[2].args, :(Main.eval(fdefn_inner))) unshift!(fdefn_outer.args[2].args, quote # Check fargs, data eval(Turing, :(_compiler_ = deepcopy($compiler))) - fargs = Turing._compiler_[:fargs]; - fdefn_inner = Turing._compiler_[:fdefn_inner]; - fdefn_inner.args[2].args[1].args[1] = gensym((fdefn_inner.args[2].args[1].args[1])) + fargs = Turing._compiler_[:fargs]; + + # Copy the expr of function definition and callbacks + fdefn_inner = Turing._compiler_[:fdefn_inner]; + fdefn_inner_callback_1 = Turing._compiler_[:fdefn_inner_callback_1]; + fdefn_inner_callback_2 = Turing._compiler_[:fdefn_inner_callback_2]; + fdefn_inner_callback_3 = Turing._compiler_[:fdefn_inner_callback_3]; + + # Add gensym to function name + fname_inner_with_gensym = gensym((fdefn_inner.args[2].args[1].args[1])); + + # Change the name of inner function definition to the one with gensym() + fdefn_inner.args[2].args[1].args[1] = fname_inner_with_gensym + fdefn_inner_callback_1.args[1].args[1] = fname_inner_with_gensym + fdefn_inner_callback_1.args[2].args[2].args[1] = fname_inner_with_gensym + fdefn_inner_callback_2.args[1].args[1] = fname_inner_with_gensym + fdefn_inner_callback_2.args[2].args[2].args[1] = fname_inner_with_gensym + fdefn_inner_callback_3.args[1].args[1] = fname_inner_with_gensym + fdefn_inner_callback_3.args[2].args[2].args[1] = fname_inner_with_gensym + # Copy data dictionary for k in keys(data) if fdefn_inner.args[2].args[2].args[1].head == :line From f0fa67a4dfb27b3639177bc1c116a9f4c2e953a5 Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Thu, 19 Oct 2017 20:58:58 +0100 Subject: [PATCH 17/27] fix model type bug --- example-models/stan-models/normal-mixture.model.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example-models/stan-models/normal-mixture.model.jl b/example-models/stan-models/normal-mixture.model.jl index 1bf7b2419..fce8d4e62 100644 --- a/example-models/stan-models/normal-mixture.model.jl +++ b/example-models/stan-models/normal-mixture.model.jl @@ -8,7 +8,7 @@ using ForwardDiff: Dual theta ~ Uniform(0, 1) - mu = tzeros(Dual, 2) + mu = Vector{Dual}(2) # mu is sampled by HMC for i = 1:2 mu[i] ~ Normal(0, 10) end From ced0eb8b62060f3f4e5d65bd64c10d4b41e10c10 Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Sun, 22 Oct 2017 23:45:00 +0100 Subject: [PATCH 18/27] Fix type --- example-models/stan-models/normal-mixture.model.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/example-models/stan-models/normal-mixture.model.jl b/example-models/stan-models/normal-mixture.model.jl index fce8d4e62..ba8a2f6b4 100644 --- a/example-models/stan-models/normal-mixture.model.jl +++ b/example-models/stan-models/normal-mixture.model.jl @@ -8,7 +8,7 @@ using ForwardDiff: Dual theta ~ Uniform(0, 1) - mu = Vector{Dual}(2) # mu is sampled by HMC + mu = Vector{Real}(2) # mu is sampled by HMC for i = 1:2 mu[i] ~ Normal(0, 10) end From 1776c1f9dd42d2ef1c8fc3f22ac43ffaf8ca9a12 Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Mon, 23 Oct 2017 16:03:24 +0100 Subject: [PATCH 19/27] update Dual in benchmark --- benchmarks/optimization.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmarks/optimization.jl b/benchmarks/optimization.jl index 13a389789..ecc3f251c 100644 --- a/benchmarks/optimization.jl +++ b/benchmarks/optimization.jl @@ -290,7 +290,7 @@ optRes *= "realpart(): \n" using ForwardDiff: Dual -ds = [Dual{10,Float64}(rand()) for i = 1:1000] +ds = [Dual{Void,Float64,10}(rand()) for i = 1:1000] t_map = @elapsed for i = 1:1000 map(d -> d.value, ds) end t_list = @elapsed for i = 1:1000 Float64[ds[i].value for i = 1:length(ds)] end @@ -304,7 +304,7 @@ optRes *= "Constructing Dual numbers: \n" dps = zeros(44); dps[11] = 1; -t_dualnumbers = @elapsed for _ = 1:(44*2000*5) ForwardDiff.Dual(1.1, dps...) end +t_dualnumbers = @elapsed for _ = 1:(44*2000*5) ForwardDiff.Dual{Void, Float64, 44}(1.1, dps...) end optRes *= "44*2000*5 times: $t_dualnumbers\n" From 7401353766ee5e1aa4188b945424cd625bfbaace Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Mon, 23 Oct 2017 22:32:49 +0100 Subject: [PATCH 20/27] update Dual constructor --- benchmarks/optimization.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/optimization.jl b/benchmarks/optimization.jl index ecc3f251c..087188267 100644 --- a/benchmarks/optimization.jl +++ b/benchmarks/optimization.jl @@ -304,7 +304,7 @@ optRes *= "Constructing Dual numbers: \n" dps = zeros(44); dps[11] = 1; -t_dualnumbers = @elapsed for _ = 1:(44*2000*5) ForwardDiff.Dual{Void, Float64, 44}(1.1, dps...) end +t_dualnumbers = @elapsed for _ = 1:(44*2000*5) ForwardDiff.Dual{Void, Float64, 44}(1.1, dps) end optRes *= "44*2000*5 times: $t_dualnumbers\n" From 4f461c0965c6ad2ff98c6ce1e8b9cba09806e84b Mon Sep 17 00:00:00 2001 From: Hong Ge Date: Tue, 24 Oct 2017 17:54:16 +0100 Subject: [PATCH 21/27] Bump up required Julia version to 0.6 --- REQUIRE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/REQUIRE b/REQUIRE index 814d7a751..0a804be92 100644 --- a/REQUIRE +++ b/REQUIRE @@ -1,4 +1,4 @@ -julia 0.5 +julia 0.6 Stan Distributions 0.11.0 From 36eeb26b74e9bc4b473b06bd711787e79b4ffc6e Mon Sep 17 00:00:00 2001 From: Hong Ge Date: Thu, 9 Nov 2017 17:03:07 +0000 Subject: [PATCH 22/27] Disable depreciated warning messages for `consume/produce`. --- src/trace/taskcopy.jl | 110 +++++++++++++++++++++++++----------------- 1 file changed, 67 insertions(+), 43 deletions(-) diff --git a/src/trace/taskcopy.jl b/src/trace/taskcopy.jl index 468e3e86d..22823c042 100644 --- a/src/trace/taskcopy.jl +++ b/src/trace/taskcopy.jl @@ -39,52 +39,76 @@ function Base.copy(t::Task) end @suppress_err function Base.produce(v) - #### un-optimized version - #q = current_task().consumers - #t = shift!(q.waitq) - #empty = isempty(q.waitq) - ct = current_task() - local empty, t, q - while true - q = ct.consumers - if isa(q,Task) - t = q - ct.consumers = nothing - empty = true - break - elseif isa(q,Condition) && !isempty(q.waitq) - t = shift!(q.waitq) - empty = isempty(q.waitq) - break + + ct = current_task() + local empty, t, q + while true + q = ct.consumers + if isa(q,Task) + t = q + ct.consumers = nothing + empty = true + break + elseif isa(q,Condition) && !isempty(q.waitq) + t = shift!(q.waitq) + empty = isempty(q.waitq) + break + end + wait() end - wait() - end - t.state = :runnable - if empty - if isempty(Base.Workqueue) - yieldto(t, v) + t.state == :runnable || throw(AssertionError("producer.consumer.state == :runnable")) + if empty + Base.schedule_and_wait(t, v) + ct = current_task() # When a task is copied, ct should be updated to new task ID. + while true + # wait until there are more consumers + q = ct.consumers + if isa(q,Task) + return q.result + elseif isa(q,Condition) && !isempty(q.waitq) + return q.waitq[1].result + end + wait() + end else - Base.schedule_and_wait(t, v) - end - ct = current_task() # When a task is copied, ct should be updated to new task ID. - while true - # wait until there are more consumers - q = ct.consumers - if isa(q,Task) - return q.result - elseif isa(q,Condition) && !isempty(q.waitq) + schedule(t, v) + # make sure `t` runs before us. otherwise, the producer might + # finish before `t` runs again, causing it to see the producer + # as done, causing done(::Task, _) to miss the value `v`. + # see issue #7727 + yield() return q.waitq[1].result - end - wait() end - else - schedule(t, v) - # make sure `t` runs before us. otherwise, the producer might - # finish before `t` runs again, causing it to see the producer - # as done, causing done(::Task, _) to miss the value `v`. - # see issue #7727 - yield() - return q.waitq[1].result - end +end + +produce(v...) = produce(v) + +@suppress_err function Base.consume(P::Task, values...) + + if istaskdone(P) + return wait(P) + end + + ct = current_task() + ct.result = length(values)==1 ? values[1] : values + + #### un-optimized version + #if P.consumers === nothing + # P.consumers = Condition() + #end + #push!(P.consumers.waitq, ct) + # optimized version that avoids the queue for 1 consumer + if P.consumers === nothing || (isa(P.consumers,Condition)&&isempty(P.consumers.waitq)) + P.consumers = ct + else + if isa(P.consumers, Task) + t = P.consumers + P.consumers = Condition() + push!(P.consumers.waitq, t) + end + push!(P.consumers.waitq, ct) + end + + P.state == :runnable ? Base.schedule_and_wait(P) : wait() # don't attempt to queue it twice end From d8b7ed0da951c9b17fb185430f124572844ac9ab Mon Sep 17 00:00:00 2001 From: Hong Ge Date: Thu, 9 Nov 2017 17:15:37 +0000 Subject: [PATCH 23/27] Remove duplicate definition of produce. --- src/trace/taskcopy.jl | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/trace/taskcopy.jl b/src/trace/taskcopy.jl index 22823c042..3aed31995 100644 --- a/src/trace/taskcopy.jl +++ b/src/trace/taskcopy.jl @@ -82,8 +82,6 @@ end end end -produce(v...) = produce(v) - @suppress_err function Base.consume(P::Task, values...) if istaskdone(P) From 841df5cce5f9a50c11140012379afdd3eab004eb Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Wed, 15 Nov 2017 00:30:31 +0000 Subject: [PATCH 24/27] fix floor, tanh, abs, log --- src/samplers/support/resample.jl | 4 ++-- src/samplers/support/transform.jl | 4 ++-- test/hmc.jl/multivariate_support.jl | 2 +- test/utility.jl | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/samplers/support/resample.jl b/src/samplers/support/resample.jl index 92cdfe8c8..f69caf038 100644 --- a/src/samplers/support/resample.jl +++ b/src/samplers/support/resample.jl @@ -20,7 +20,7 @@ function resampleResidual( w::Vector{Float64}, num_particles::Int ) M = length( w ) # "Repetition counts" (plus the random part, later on): - Ns = floor(length(w) .* w) + Ns = floor.(length(w) .* w) # The "remainder" or "residual" count: R = Int(sum( Ns )) @@ -29,7 +29,7 @@ function resampleResidual( w::Vector{Float64}, num_particles::Int ) M_rdn = num_particles-R; # The modified weights: - Ws = (M .* w - floor(M .* w))/M_rdn; + Ws = (M .* w - floor.(M .* w))/M_rdn; # Draw the deterministic part: indx1 = Array{Int}(R) diff --git a/src/samplers/support/transform.jl b/src/samplers/support/transform.jl index 9cbafff96..56b7e6007 100644 --- a/src/samplers/support/transform.jl +++ b/src/samplers/support/transform.jl @@ -104,7 +104,7 @@ invlink{T<:Real}(d::PositiveDistribution, x::Union{T,Vector{T}}) = exp.(x) logpdf_with_trans{T<:Real}(d::PositiveDistribution, x::Union{T,Vector{T}}, transform::Bool) = begin lp = logpdf(d, x) - transform ? lp + log(x) : lp + transform ? lp + log.(x) : lp end @@ -212,7 +212,7 @@ logpdf_with_trans{T}(d::SimplexDistribution, X::Matrix{T}, transform::Bool) = be @inbounds Z[k,:] = X[k,:] ./ (one(T) - sum(X[1:k-1,:],1))' end for k = 1:K-1 - lp += log(Z[k,:]) + log(one(T) - Z[k,:]) + log(one(T) - sum(X[1:k-1,:], 1))' + lp += log.(Z[k,:]) + log.(one(T) - Z[k,:]) + log.(one(T) - sum(X[1:k-1,:], 1))' end end lp diff --git a/test/hmc.jl/multivariate_support.jl b/test/hmc.jl/multivariate_support.jl index 5e31839ae..bf38b087e 100644 --- a/test/hmc.jl/multivariate_support.jl +++ b/test/hmc.jl/multivariate_support.jl @@ -9,7 +9,7 @@ end # Define NN flow function nn(x, b1, w11, w12, w13, bo, wo) - h = tanh([w11' * x + b1[1]; w12' * x + b1[2]; w13' * x + b1[3]]) + h = tanh.([w11' * x + b1[1]; w12' * x + b1[2]; w13' * x + b1[3]]) return sigmoid((wo' * h)[1] + bo) end diff --git a/test/utility.jl b/test/utility.jl index dab907dfb..003fee08d 100644 --- a/test/utility.jl +++ b/test/utility.jl @@ -4,13 +4,13 @@ check_numerical(chain, symbols::Vector{Symbol}, exact_vals::Vector; for (sym, val) in zip(symbols, exact_vals) E = mean(chain[sym]) print(" $sym = $E ≈ $val (eps = $eps) ?") - cmp = abs(sum(mean(chain[sym]) - val)) <= eps + cmp = abs.(sum(mean(chain[sym]) - val)) <= eps if cmp print_with_color(:green, "./\n") - print_with_color(:green, " $sym = $E, diff = $(abs(E - val))\n") + print_with_color(:green, " $sym = $E, diff = $(abs.(E - val))\n") else print_with_color(:red, " X\n") - print_with_color(:red, " $sym = $E, diff = $(abs(E - val))\n") + print_with_color(:red, " $sym = $E, diff = $(abs.(E - val))\n") end end end From b895169942d90ff5b0e80221f446a1c7ec99217c Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Wed, 15 Nov 2017 01:17:22 +0000 Subject: [PATCH 25/27] fix logpdf warning and bug --- src/samplers/sampler.jl | 8 ++++++-- src/samplers/support/transform.jl | 4 ++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/samplers/sampler.jl b/src/samplers/sampler.jl index 7de389fcc..5be4f89dc 100644 --- a/src/samplers/sampler.jl +++ b/src/samplers/sampler.jl @@ -83,7 +83,7 @@ assume{T<:Distribution}(spl::Void, dists::Vector{T}, vn::VarName, var::Any, vi:: end end - acclogp!(vi, sum(logpdf(dist, rs, istrans(vi, vns[1])))) + acclogp!(vi, sum(logpdf_with_trans(dist, rs, istrans(vi, vns[1])))) var end @@ -95,5 +95,9 @@ observe{T<:Distribution}(spl::Void, dists::Vector{T}, value::Any, vi::VarInfo) = @assert length(dists) == 1 "[observe] Turing only support vectorizing i.i.d distribution" dist = dists[1] @assert isa(dist, UnivariateDistribution) || isa(dist, MultivariateDistribution) "[observe] vectorizing matrix distribution is not supported" - acclogp!(vi, sum(logpdf(dist, value))) + if isa(dist, UnivariateDistribution) # only univariate distributions support broadcast operation (logpdf.) by Distributions.jl + acclogp!(vi, sum(logpdf.(dist, value))) + else + acclogp!(vi, sum(logpdf(dist, value))) + end end diff --git a/src/samplers/support/transform.jl b/src/samplers/support/transform.jl index 56b7e6007..85613ee59 100644 --- a/src/samplers/support/transform.jl +++ b/src/samplers/support/transform.jl @@ -87,7 +87,7 @@ link{T<:Real}(d::RealDistribution, x::Union{T,Vector{T}}) = x invlink{T<:Real}(d::RealDistribution, x::Union{T,Vector{T}}) = x -logpdf_with_trans{T<:Real}(d::RealDistribution, x::Union{T,Vector{T}}, transform::Bool) = logpdf(d, x) +logpdf_with_trans{T<:Real}(d::RealDistribution, x::Union{T,Vector{T}}, transform::Bool) = logpdf.(d, x) ######### @@ -103,7 +103,7 @@ link{T<:Real}(d::PositiveDistribution, x::Union{T,Vector{T}}) = log(x) invlink{T<:Real}(d::PositiveDistribution, x::Union{T,Vector{T}}) = exp.(x) logpdf_with_trans{T<:Real}(d::PositiveDistribution, x::Union{T,Vector{T}}, transform::Bool) = begin - lp = logpdf(d, x) + lp = logpdf.(d, x) transform ? lp + log.(x) : lp end From a7bf48a015c8657459de92cadd2616729b6e7011 Mon Sep 17 00:00:00 2001 From: Kai Xu Date: Wed, 15 Nov 2017 02:37:05 +0000 Subject: [PATCH 26/27] fix vec assume init --- src/samplers/sampler.jl | 2 +- src/samplers/support/init.jl | 54 ++++++++++++++++++++++++++++-------- 2 files changed, 43 insertions(+), 13 deletions(-) diff --git a/src/samplers/sampler.jl b/src/samplers/sampler.jl index 5be4f89dc..d56ccc2d5 100644 --- a/src/samplers/sampler.jl +++ b/src/samplers/sampler.jl @@ -57,7 +57,7 @@ assume{T<:Distribution}(spl::Void, dists::Vector{T}, vn::VarName, var::Any, vi:: if haskey(vi, vns[1]) rs = vi[vns] else - rs = rand(dist, n) + rs = init(dist, n) if isa(dist, UnivariateDistribution) || isa(dist, MatrixDistribution) for i = 1:n diff --git a/src/samplers/support/init.jl b/src/samplers/support/init.jl index b380b1b66..bc1ac1961 100644 --- a/src/samplers/support/init.jl +++ b/src/samplers/support/init.jl @@ -1,14 +1,15 @@ +# Uniform rand with range e +randrealuni() = Real(e * rand()) # may Euler's number give us good luck +randrealuni(args) = map(x -> Real(x), e * rand(args...)) + # Only use customized initialization for transformable distributions init(dist::TransformDistribution) = inittrans(dist) # Callbacks for un-transformable distributions init(dist::Distribution) = rand(dist) -# Uniform rand with range -randuni() = e * rand() # may Euler's number give us good luck - inittrans(dist::UnivariateDistribution) = begin - r = Real(randuni()) + r = randrealuni() r = invlink(dist, r) @@ -18,10 +19,7 @@ end inittrans(dist::MultivariateDistribution) = begin D = size(dist)[1] - r = Vector{Real}(D) - for d = 1:D - r[d] = randuni() - end + r = randrealuni(D) r = invlink(dist, r) @@ -31,12 +29,44 @@ end inittrans(dist::MatrixDistribution) = begin D = size(dist) - r = Matrix{Real}(D...) - for d1 = 1:D, d2 = 1:D - r[d1,d2] = randuni() - end + r = randrealuni(D...) r = invlink(dist, r) r end + + +# Only use customized initialization for transformable distributions +init(dist::TransformDistribution, n::Int) = inittrans(dist, n) + +# Callbacks for un-transformable distributions +init(dist::Distribution, n::Int) = rand(dist, n) + +inittrans(dist::UnivariateDistribution, n::Int) = begin + rs = randrealuni(n) + + rs = invlink(dist, rs) + + rs +end + +inittrans(dist::MultivariateDistribution, n::Int) = begin + D = size(dist)[1] + + rs = randrealuni(D, n) + + rs = invlink(dist, rs) + + rs +end + +inittrans(dist::MatrixDistribution, n::Int) = begin + D = size(dist) + + rs = [randrealuni(D...) for _ = 1:n] + + rs = invlink(dist, rs) + + rs +end From 3fd77aff8c7ad10483fb132008689e0a2ad45095 Mon Sep 17 00:00:00 2001 From: Hong Ge Date: Thu, 16 Nov 2017 14:28:10 +0000 Subject: [PATCH 27/27] Travis: Allow `Benchmarking` test to fail. --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index a36441a3f..aae5970b5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -36,6 +36,7 @@ matrix: allow_failures: - env: GROUP=Test os: osx + - env: GROUP=Bench - env: GROUP=LDA - env: GROUP=MOC - env: GROUP=SV