diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 74de4761..99b2d253 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,22 +16,16 @@ jobs: matrix: include: - - version: '1' + - version: '1.6' os: ubuntu-latest arch: x64 - - version: '1.1' - os: ubuntu-latest - arch: x64 - - version: '1.2' - os: ubuntu-latest - arch: x64 - - version: '1.3' - os: ubuntu-latest + - version: '1.6' + os: windows-latest arch: x64 - - version: '1.4' + - version: '1.7' os: ubuntu-latest arch: x64 - - version: '1' + - version: '1.7' os: windows-latest arch: x64 steps: diff --git a/.gitignore b/.gitignore index 82211e26..9f0ea459 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ *.jl.cov *.jl.*.cov *.jl.mem +*.mem *.cov docs/build/ diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 00000000..c912406a --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,17 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "type": "julia", + "request": "launch", + "name": "Run active Julia file", + "program": "${file}", + "stopOnEntry": false, + "cwd": "${workspaceFolder}", + "juliaEnv": "${command:activeJuliaEnvironment}" + } + ] +} \ No newline at end of file diff --git a/Project.toml b/Project.toml index af6e775d..4618f9ce 100644 --- a/Project.toml +++ b/Project.toml @@ -1,48 +1,50 @@ name = "EAGO" uuid = "bb8be931-2a91-5aca-9f87-79e1cb69959a" - authors = ["Matthew Wilhelm "] -version = "0.6.1" +version = "0.7.0" [deps] Cassette = "7057c7e9-c182-5462-911a-8362d720325c" +Cbc = "9961bab8-2fa3-5c5a-9d89-47fab24efd76" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" FastRounding = "fa42c844-2597-5d31-933b-ebd51ab2693f" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" -GLPK = "60bf3e95-4087-53dc-ae20-288a0d20c6a6" IntervalArithmetic = "d1acc4aa-44c8-5952-acd4-ba5d80a2a253" IntervalContractors = "15111844-de3b-5229-b4ba-526f2f385dc9" Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9" JuMP = "4076af6c-e467-56ae-b986-b466b2749572" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +MINLPTests = "ee0a3090-8ee9-5cdb-b8cb-8eeba3165522" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" McCormick = "53c679d3-6890-5091-8386-c291e8c8aaa1" NaNMath = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" -NumericIO = "6c575b1c-77cb-5640-a5dc-a54116c90507" +PrettyTables = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d" +Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" +Requires = "ae029012-a4dd-5104-9daa-d747884805df" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" [compat] Cassette = "0.3.1" +Cbc = "~0.9" DataStructures = "~0.17, ~0.18" DocStringExtensions = "~0.8" FastRounding = "~0.2, ~0.3" -ForwardDiff = "~0.5.0, ~0.6, ~0.7, ~0.8, ~0.9, ~0.10" -GLPK = "~0.9, ~0.10, ~0.11, ~0.12, ~0.13" -IntervalArithmetic = "~0.14, ~0.15, ~0.16, ~0.17" -IntervalContractors = "~0.3, ~0.4" -Ipopt = "~0.6" -JuMP = "0.21.2, 0.21.3, 0.21.4, 0.21.5, 0.21.6" -MathOptInterface = "0.9.13, 0.9.14, 0.9.15, 0.9.16, 0.9.17, 0.9.18, 0.9.19" -McCormick = "0.11.0" +ForwardDiff = "~0.10" +IntervalContractors = "~0.4" +Ipopt = "~0.8" +JuMP = "~0.23" +MINLPTests = "0.5.2" +MathOptInterface = "~1" +McCormick = "~0.13" NaNMath = "~0.3" -NumericIO = "= 0.3.1" -Reexport = "~0.2" -SpecialFunctions = "~0.8, ~0.9, ~0.10" -julia = "~1" - +PrettyTables = "~1" +Reexport = "~0.2, ~1" +Requires = "~1" +SpecialFunctions = "~1, ~2" +julia = "~1.6, ~1.7" [extras] Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/code_analysis/check_invalidations.jl b/code_analysis/check_invalidations.jl new file mode 100644 index 00000000..287dc48a --- /dev/null +++ b/code_analysis/check_invalidations.jl @@ -0,0 +1,8 @@ +using SnoopCompile +invalidations = @snoopr using EAGO + +@show length(invalidations) +trees = invalidation_trees(invalidations) +#methinvs = trees[end] +#root = methinvs.backedges[end] +#ascend(root) \ No newline at end of file diff --git a/code_analysis/create_precompile_statements.jl b/code_analysis/create_precompile_statements.jl new file mode 100644 index 00000000..679c56a3 --- /dev/null +++ b/code_analysis/create_precompile_statements.jl @@ -0,0 +1,41 @@ +# Added from Oscar Dowson code for JuMP at https://github.com/jump-dev/JuMP.jl/pull/2484 +generate_precompile = true +package_path = "C:\\Users\\wilhe\\Desktop\\Package Development" +example_path = "" + +module Foo + +using JuMP +using EAGO + +function stress_precompile() + for file in readdir("precompiles") + if !endswith(file, ".jl") + continue + end + include(file) + end + return +end + +if generate_precompile + using SnoopCompile + tinf = @snoopi_deep Foo.stress_precompile() + ttot, pcs = SnoopCompile.parcel(tinf) + SnoopCompile.write("precompiles", pcs) + for file in readdir("precompiles") + if !endswith(file, ".jl") + continue + end + src = joinpath("precompiles", file) + m = match(r"precompile\_(.+)\.jl", file) + modules = split(m[1], ".") + modules = vcat(modules[1], "src", modules[2:end]) + if !(modules[1] in ["EAGO"]) + continue + end + dest = joinpath(package_path, modules..., "precompile.jl") + @show dest + cp(src, dest; force = true) + end +end \ No newline at end of file diff --git a/code_analysis/cthulhu.jl b/code_analysis/cthulhu.jl new file mode 100644 index 00000000..0be55069 --- /dev/null +++ b/code_analysis/cthulhu.jl @@ -0,0 +1,35 @@ +using EAGO, JuMP, Cthulhu + + +####### Example of interest +m = Model(EAGO.Optimizer) +set_optimizer_attribute(m, "output_iterations", 1) +set_optimizer_attribute(m, "iteration_limit", 5) +set_optimizer_attribute(m, "verbosity", 0) + +# Define bounded variables +xL = [10.0; 0.0; 0.0; 0.0; 0.0; 85.0; 90.0; 3.0; 1.2; 145.0] +xU = [2000.0; 16000.0; 120.0; 5000.0; 2000.0; 93.0; 95.0; 12.0; 4.0; 162.0] +@variable(m, xL[1] <= x[1] <= xU[1]) +@NLobjective(m, Max, -2.0 * x[1]) + +JuMP.optimize!(m) + +####### Define code to reduce need to transverse tree to get to functions of interest +b = backend(m).optimizer.model.optimizer +nlr = b._working_problem._nonlinear_constr[1] +function g() + EAGO.relax!(b, b._working_problem._nonlinear_constr[1], 1, true) +end +function q(b, nlr) + EAGO.relax!(b, nlr, 1, true) +end +k(m) = JuMP.optimize!(m) + +f() = k(m) + +####### Define descend +@descend f() + + + diff --git a/code_analysis/memory_allocation_analysis.jl b/code_analysis/memory_allocation_analysis.jl new file mode 100644 index 00000000..0389c80d --- /dev/null +++ b/code_analysis/memory_allocation_analysis.jl @@ -0,0 +1,20 @@ +# C:\Users\wilhe\AppData\Local\Programs\Julia-1.6.0\bin\julia.exe --track-allocation=user +# include("C:\\Users\\wilhe\\Desktop\\Package Development\\EAGO.jl\\benchmark\\memory_allocation_analysis.jl"); exit() +#using Coverage; m = analyze_malloc("C:\\Users\\wilhe\\Desktop\\Package Development\\EAGO.jl\\src") +#filter!(x -> x.bytes > 0, m) + +using JuMP, EAGO + +m = Model(EAGO.Optimizer) +set_optimizer_attribute(m, "output_iterations", 1) +set_optimizer_attribute(m, "iteration_limit", 5) +set_optimizer_attribute(m, "verbosity", 0) + +# Define bounded variables +xL = [10.0; 0.0; 0.0; 0.0; 0.0; 85.0; 90.0; 3.0; 1.2; 145.0] +xU = [2000.0; 16000.0; 120.0; 5000.0; 2000.0; 93.0; 95.0; 12.0; 4.0; 162.0] +@variable(m, xL[1] <= x[1] <= xU[1]) +@NLobjective(m, Max, -2.0 * x[1]) + + +JuMP.optimize!(m) \ No newline at end of file diff --git a/code_analysis/nl_issue.jl b/code_analysis/nl_issue.jl new file mode 100644 index 00000000..f7195d2b --- /dev/null +++ b/code_analysis/nl_issue.jl @@ -0,0 +1,104 @@ + +using JuMP + +println(" ") +println("EAGO using ") +@time (using Ipopt) +@time (using Ipopt) + + +function make_model() + m = Model(Ipopt.Optimizer) + #set_optimizer_attribute(m, "output_iterations", 1) + #set_optimizer_attribute(m, "iteration_limit", 5) + #set_optimizer_attribute(m, "verbosity", 0) + + # Define bounded variables + xL = [10.0; 0.0; 0.0; 0.0; 0.0; 85.0; 90.0; 3.0; 1.2; 145.0] + xU = [2000.0; 16000.0; 120.0; 5000.0; 2000.0; 93.0; 95.0; 12.0; 4.0; 162.0] + @variable(m, xL[1] <= x[1] <= xU[1]) + @NLobjective(m, Max, -2.0 * x[1]) + return m +end + +function f(m) + JuMP.optimize!(m) +end + +m = make_model() +m1 = make_model() + +println("EAGO first call ") +@time f(m) +@time f(m1) + +#= +m1 = Model(EAGO.Optimizer) +set_optimizer_attribute(m1, "output_iterations", 1) +set_optimizer_attribute(m1, "iteration_limit", 5) +set_optimizer_attribute(m1, "verbosity", 0) + +# Define bounded variables +xL = [10.0; 0.0; 0.0; 0.0; 0.0; 85.0; 90.0; 3.0; 1.2; 145.0] +xU = [2000.0; 16000.0; 120.0; 5000.0; 2000.0; 93.0; 95.0; 12.0; 4.0; 162.0] +@variable(m1, xL[1] <= x[1] <= xU[1]) + +@constraint(m, e2, -x[1]+1.22*x[4]-x[5] == 0.0) +@constraint(m, e6, x[9]+0.222*x[10] == 35.82) +@constraint(m, e7, -3*x[7]+x[10] == -133.0) + +# Define nonlinear constraints +@NLconstraint(m, e1, -x[1]*(1.12+0.13167*x[8]-0.00667* (x[8])^2)+x[4] == 0.0) +@NLconstraint(m, e3, -0.001*x[4]*x[9]*x[6]/(98-x[6])+x[3] == 0.0) + +###Original constraints +@NLconstraint(m, e4, -(1.098*x[8]-0.038* (x[8])^2)-0.325*x[6]+x[7] == 57.425) +@NLconstraint(m, e5, -(x[2]+x[5])/x[1]+x[8] == 0.0) + +@NLobjective(m1, Max, -2.0 * x[1]) +JuMP.optimize!(m1) +=# +#= +=# +# Define nonlinear constraints +#@NLconstraint(m, e1, -x[1]*(1.12+0.13167*x[8]-0.00667* (x[8])^2)+x[4] == 0.0) +#@NLconstraint(m, e3, -0.001*x[4]*x[9]*x[6]/(98-x[6])+x[3] == 0.0) + +###Original constraints +#@NLconstraint(m, e4, -(1.098*x[8]-0.038* (x[8])^2)-0.325*x[6]+x[7] == 57.425) +#@NLconstraint(m, e5, -(x[2]+x[5])/x[1]+x[8] == 0.0) + +###Rewritten constraints +#= +@NLexpressions(m, begin + ex1, -(x[2]+x[5])/x[1]+x[8] + ex2, -(1.098*x[8]-0.038* (x[8])^2)-0.325*x[6]+x[7] +end) +@NLconstraint(m, e4, ex2 == 57.425) +@NLconstraint(m, e5, ex1 == 0.0) +=# + +# Define linear constraints +#@constraint(m, e2, -x[1]+1.22*x[4]-x[5] == 0.0) +#@constraint(m, e6, x[9]+0.222*x[10] == 35.82) +#@constraint(m, e7, -3*x[7]+x[10] == -133.0) + +# Define nonlinear objective +#@NLobjective(m, Max, 0.063*x[4]*x[7] - 5.04*x[1] - 0.035*x[2] - 10*x[3] - 3.36*x[5]) +#@NLobjective(m, Min, -(x[4]*x[7])) +#@NLobjective(m, Min, (0.063*x[4]*x[7]^2 - 5.04*x[1]^2 - 0.035*x[2] - 10*x[3] - 3.36*x[5])) + +#= +@NLobjective(m, Max, -2.0*x[1]) + + +JuMP.optimize!(m) +# Solve the optimization problem +#using ProfileView +#@profview f(m) +#@profview f(m) + +@show objective_value(m) +@show objective_bound(m) +@show value(x[1]) +=# diff --git a/code_analysis/preallocation_tests/nl_issue.jl b/code_analysis/preallocation_tests/nl_issue.jl new file mode 100644 index 00000000..279a4a01 --- /dev/null +++ b/code_analysis/preallocation_tests/nl_issue.jl @@ -0,0 +1,99 @@ + +using JuMP, EAGO, Cbc + +function make_model() + sb = SubSolvers(;relaxed_optimizer= Cbc.Optimizer()) + m = Model(() -> EAGO.Optimizer(subsolver_block = sb)) + set_optimizer_attribute(m, "output_iterations", 1) + set_optimizer_attribute(m, "iteration_limit", 5) + set_optimizer_attribute(m, "verbosity", 0) + + # Define bounded variables + xL = [10.0; 0.0; 0.0; 0.0; 0.0; 85.0; 90.0; 3.0; 1.2; 145.0] + xU = [2000.0; 16000.0; 120.0; 5000.0; 2000.0; 93.0; 95.0; 12.0; 4.0; 162.0] + @variable(m, xL[1] <= x[1] <= xU[1]) + @NLobjective(m, Max, -2.0 * x[1]) + return m +end + +function f(m) + JuMP.optimize!(m) +end + +m = make_model() +m1 = make_model() + +println("EAGO first call ") +@time f(m) +@time f(m1) + +#= +m1 = Model(EAGO.Optimizer) +set_optimizer_attribute(m1, "output_iterations", 1) +set_optimizer_attribute(m1, "iteration_limit", 5) +set_optimizer_attribute(m1, "verbosity", 0) + +# Define bounded variables +xL = [10.0; 0.0; 0.0; 0.0; 0.0; 85.0; 90.0; 3.0; 1.2; 145.0] +xU = [2000.0; 16000.0; 120.0; 5000.0; 2000.0; 93.0; 95.0; 12.0; 4.0; 162.0] +@variable(m1, xL[1] <= x[1] <= xU[1]) + +@constraint(m, e2, -x[1]+1.22*x[4]-x[5] == 0.0) +@constraint(m, e6, x[9]+0.222*x[10] == 35.82) +@constraint(m, e7, -3*x[7]+x[10] == -133.0) + +# Define nonlinear constraints +@NLconstraint(m, e1, -x[1]*(1.12+0.13167*x[8]-0.00667* (x[8])^2)+x[4] == 0.0) +@NLconstraint(m, e3, -0.001*x[4]*x[9]*x[6]/(98-x[6])+x[3] == 0.0) + +###Original constraints +@NLconstraint(m, e4, -(1.098*x[8]-0.038* (x[8])^2)-0.325*x[6]+x[7] == 57.425) +@NLconstraint(m, e5, -(x[2]+x[5])/x[1]+x[8] == 0.0) + +@NLobjective(m1, Max, -2.0 * x[1]) +JuMP.optimize!(m1) +=# +#= +=# +# Define nonlinear constraints +#@NLconstraint(m, e1, -x[1]*(1.12+0.13167*x[8]-0.00667* (x[8])^2)+x[4] == 0.0) +#@NLconstraint(m, e3, -0.001*x[4]*x[9]*x[6]/(98-x[6])+x[3] == 0.0) + +###Original constraints +#@NLconstraint(m, e4, -(1.098*x[8]-0.038* (x[8])^2)-0.325*x[6]+x[7] == 57.425) +#@NLconstraint(m, e5, -(x[2]+x[5])/x[1]+x[8] == 0.0) + +###Rewritten constraints +#= +@NLexpressions(m, begin + ex1, -(x[2]+x[5])/x[1]+x[8] + ex2, -(1.098*x[8]-0.038* (x[8])^2)-0.325*x[6]+x[7] +end) +@NLconstraint(m, e4, ex2 == 57.425) +@NLconstraint(m, e5, ex1 == 0.0) +=# + +# Define linear constraints +#@constraint(m, e2, -x[1]+1.22*x[4]-x[5] == 0.0) +#@constraint(m, e6, x[9]+0.222*x[10] == 35.82) +#@constraint(m, e7, -3*x[7]+x[10] == -133.0) + +# Define nonlinear objective +#@NLobjective(m, Max, 0.063*x[4]*x[7] - 5.04*x[1] - 0.035*x[2] - 10*x[3] - 3.36*x[5]) +#@NLobjective(m, Min, -(x[4]*x[7])) +#@NLobjective(m, Min, (0.063*x[4]*x[7]^2 - 5.04*x[1]^2 - 0.035*x[2] - 10*x[3] - 3.36*x[5])) + +#= +@NLobjective(m, Max, -2.0*x[1]) + + +JuMP.optimize!(m) +# Solve the optimization problem +#using ProfileView +#@profview f(m) +#@profview f(m) + +@show objective_value(m) +@show objective_bound(m) +@show value(x[1]) +=# diff --git a/code_analysis/single_thread_benchmark.jl b/code_analysis/single_thread_benchmark.jl new file mode 100644 index 00000000..ca4a0f98 --- /dev/null +++ b/code_analysis/single_thread_benchmark.jl @@ -0,0 +1,58 @@ +using Distributed, DataFrames, CSV +using MINLPLib, JuMP, SCIP + +new_lib = "ImprovedCompMult" + +# build library +build_lib = false +instance_names = ["bearing", "ex6_2_10", "ex6_2_10", "ex6_2_11", "ex6_2_12", "ex6_2_13", "ex7_2_4", "ex7_3_1", "ex7_3_2", "ex14_1_8", "ex14_1_9"] +if build_lib + source_lib = "global" + for n in instance_names + MINLPLib.add_to_lib(new_lib, source_lib, n) + end +end + +# solution handler +struct TrialSummary + was_error::Bool + solver_name::String + instance_name::String + term_status::MOI.TerminationStatusCode + primal_status::MOI.ResultStatusCode + dual_status::MOI.ResultStatusCode + obj_bound::Float64 + obj_value::Float64 + solve_time::Float64 +end +TrialSummary() = TrialSummary(true, "", "", MOI.OTHER_ERROR, MOI.OTHER_RESULT_STATUS, MOI.OTHER_RESULT_STATUS, -Inf, -Inf, 0.0) + +function benchmark_problem(d, lib, sname::String) + m = fetch_model(lib, sname) + set_optimizer(m, d) + + set_time_limit_sec(m, 1000.0) + set_silent(m) + + JuMP.optimize!(m) + p = primal_status(m) + ds = dual_status(m) + t = termination_status(m) + v = objective_value(m) + b = objective_bound(m) + n = solve_name(m) + s = solve_time(m) + return TrialSummary(false,n,sname,t,p,ds,v,b,s) +end + + +trial_summaries = TrialSummary[] + +# Need to assign tolerance here (since tolerances aren't standardized among solvers) +function build_scip() + m = SCIP.Optimizer(limits_gap=1E-3, # absolute tolerance + limits_absgap=1E-3 # relative tolerance + ) + return m +end +append!(trial_summaries, pmap(x->benchmark_problem(build_scip, new_lib, x), instance_names; on_error=ex->TrialSummary())) \ No newline at end of file diff --git a/docs/src/Optimizer/high_performance.md b/docs/src/Optimizer/high_performance.md index dbdc1dfc..c0817311 100644 --- a/docs/src/Optimizer/high_performance.md +++ b/docs/src/Optimizer/high_performance.md @@ -8,15 +8,14 @@ is highly recommended. Both Gurobi and CPLEX are free for academics and installation information can be found through http://www.gurobi.com/academia/academia-center and https://www.ibm.com/developerworks/community/blogs/jfp/entry/CPLEX_Is_Free_For_Students?lang=en, respectively. -!!! warning - EAGO assumes that the MOI wrapper for any sub-solver exhibits the expected behavior. Any deviation for the expected may lead to an error. We currently recommend using either the default GLPK solver or Gurobi rather than CPLEX. Our experience has been that the GLPK and Gurobi MathOptInterface wrappers are better maintained and less prone to unexpected behavior than CPLEX currently is (though this is continuously improving) and in particular GLPK is quite stable. - A non-default LP solver can then be selected by the user via a series of keyword argument inputs as illustrated in the code snippet below. The `relaxed_optimizer` contains an instance optimizer with valid relaxations that are made at the root node and is updated with affine relaxations in place. ```julia # Create opt EAGO Optimizer with Gurobi as a lower subsolver -m = Model(optimizer_with_attributes(EAGO.Optimizer, "relaxed_optimizer" => Gurobi.Optimizer(OutputFlag=0)) +subsolver_config = SubSolvers(relaxed_optimizer = Gurobi.Optimizer(OutputFlag=0)) +eago_factory = () -> EAGO.Optimizer(subsolvers = subsolver_config) +m = Model(eago_factory) ``` ## Rounding Mode diff --git a/examples/BeckerLago.jl b/examples/BeckerLago.jl new file mode 100644 index 00000000..ff162e32 --- /dev/null +++ b/examples/BeckerLago.jl @@ -0,0 +1,22 @@ +using JuMP + +m = Model() + +# ----- Variables ----- # +@variable(m, objvar) +x_Idx = Any[1, 2] +@variable(m, x[x_Idx]) +set_lower_bound(x[1], -10.0) +set_upper_bound(x[1], 10.0) +set_lower_bound(x[2], -10.0) +set_upper_bound(x[2], 10.0) + + +# ----- Constraints ----- # +@NLconstraint(m, e1, -((-5+sqrt( (x[1])^2))*(-5+sqrt( (x[1])^2))+(-5+sqrt( (x[2])^2))*(-5+sqrt( (x[2])^2)))+objvar == 0.0) + + +# ----- Objective ----- # +@objective(m, Min, objvar) + +m = m # model get returned when including this script. diff --git a/examples/Camel3.jl b/examples/Camel3.jl new file mode 100644 index 00000000..25200ba1 --- /dev/null +++ b/examples/Camel3.jl @@ -0,0 +1,32 @@ +using JuMP, EAGO + +m = Model(EAGO.Optimizer) +set_optimizer_attribute(m, "mul_relax_style", 1) +set_optimizer_attribute(m, "verbosity", 4) +set_optimizer_attribute(m, "output_iterations", 1) +set_optimizer_attribute(m, "iteration_limit", 1000) +set_optimizer_attribute(m, "cut_max_iterations", 2) +set_optimizer_attribute(m, "subgrad_tighten", false) + +# OBBT depth 0 -> 20... increases number of iterations... +set_optimizer_attribute(m, "obbt_depth", 8) +set_optimizer_attribute(m, "obbt_repetitions", 2) + +# ----- Variables ----- # +x_Idx = Any[1,2] +@variable(m, x[x_Idx]) +set_lower_bound(x[1], 0.1) +set_upper_bound(x[1], 0.9) + +set_lower_bound(x[2], 0.1) +set_upper_bound(x[2], 0.9) + +# ----- Constraints ----- # + +@NLobjective(m, Min, (x[1]^2 - x[1])*(x[2]^2 - x[2])) +s = time() +optimize!(m) +@show termination_status(m) +sout = s - time() +@show solve_time(m) +@show sout diff --git a/examples/ex6_2_11.jl b/examples/ex6_2_11.jl new file mode 100644 index 00000000..4cb2b4b5 --- /dev/null +++ b/examples/ex6_2_11.jl @@ -0,0 +1,49 @@ + +println("start run") + +using JuMP, EAGO + +m = Model(EAGO.Optimizer) +set_optimizer_attribute(m, "mul_relax_style", 1) +set_optimizer_attribute(m, "verbosity", 1) +set_optimizer_attribute(m, "output_iterations", 1000) +set_optimizer_attribute(m, "iteration_limit", 10000000) +set_optimizer_attribute(m, "cut_max_iterations", 2) + +# OBBT depth 0 -> 20... increases number of iterations... +set_optimizer_attribute(m, "obbt_depth", 8) +set_optimizer_attribute(m, "obbt_repetitions", 2) + +# ----- Variables ----- # +x_Idx = Any[2, 3, 4] +@variable(m, x[x_Idx]) +set_lower_bound(x[2], 1.0e-6) +set_upper_bound(x[2], 1.0) +set_lower_bound(x[3], 1.0e-6) +set_upper_bound(x[3], 1.0) +set_lower_bound(x[4], 1.0e-6) +set_upper_bound(x[4], 1.0) + + +# ----- Constraints ----- # +s = time() +#= +@NLobjective(m, Min, (15.3261663216011*x[2]+23.2043471859416*x[3]+6.69678129464404*x[4])*log(2.1055*x[2]+3.1878*x[3] + 0.92*x[4]) + + 1.04055250396734*x[2]-2.24199441248417*x[3]+3.1618173099828*x[4]+6.4661663216011*x[2]*log(x[2]/(2.1055*x[2]+3.1878*x[3]+0.92*x[4])) + + 12.2043471859416*x[3]*log(x[3]/(2.1055*x[2]+3.1878*x[3]+0.92*x[4]))+0.696781294644034*x[4]*log(x[4]/(2.1055*x[2]+3.1878*x[3]+0.92*x[4])) + + 9.86*x[2]*log(x[2]/(1.972*x[2]+2.4*x[3]+1.4*x[4]))+12*x[3]*log(x[3]/(1.972*x[2]+2.4*x[3]+1.4*x[4]))+7*x[4]*log(x[4]/(1.972*x[2]+2.4*x[3] + + 1.4*x[4]))+(1.972*x[2]+2.4*x[3]+1.4*x[4])*log(1.972*x[2]+2.4*x[3]+1.4*x[4])+1.972*x[2]*log(x[2]/(1.972*x[2]+0.283910843616504*x[3] + + 3.02002220174195*x[4]))+2.4*x[3]*log(x[3]/(1.45991339466884*x[2]+2.4*x[3]+0.415073537580851*x[4]))+1.4*x[4]*log(x[4]/(0.602183324335333*x[2] + + 0.115623371371275*x[3]+1.4*x[4]))-17.2981663216011*x[2]*log(x[2])-25.6043471859416*x[3]*log(x[3])-8.09678129464404*x[4]*log(x[4])) +=# +#@NLobjective(m, Min, (x[3]^3 - x[3]^3)*(x[3]^3 - x[3]^3)) +@NLobjective(m, Min, x[2]*x[3]*x[4] + x[2]*x[4]) +@constraint(m, e2, x[2]+x[3]+x[4] == 1.0) + +m = m # model get returned when including this script. + +optimize!(m) +@show termination_status(m) +sout = s - time() +@show solve_time(m) +@show sout \ No newline at end of file diff --git a/src/EAGO.jl b/src/EAGO.jl index 3a80fc78..89407025 100644 --- a/src/EAGO.jl +++ b/src/EAGO.jl @@ -15,20 +15,23 @@ module EAGO import MathOptInterface - using Reexport, Cassette, IntervalArithmetic, NumericIO, DocStringExtensions - using FastRounding, SpecialFunctions + using Reexport, Requires, Cassette, IntervalArithmetic, DocStringExtensions, + FastRounding, SpecialFunctions, Ipopt, Cbc, Printf, PrettyTables using JuMP + import JuMP import JuMP._Derivatives: operators, NodeData using JuMP._Derivatives: univariate_operators, univariate_operator_to_id - using Ipopt, GLPK + import JuMP: _SubexpressionStorage + import JuMP._Derivatives: NodeType, UserOperatorRegistry + const JuMPOpReg = JuMP._Derivatives.UserOperatorRegistry using DataStructures: OrderedDict, BinaryMinMaxHeap, popmin!, popmax!, top using SparseArrays: SparseMatrixCSC, spzeros, rowvals, nzrange, nonzeros, sparse, findnz using LinearAlgebra: eigmin, norm - - import IntervalArithmetic: mid + using Base: @propagate_inbounds + import Base: setindex!, + , *, -, ^, /, zero, one, inv, log, log10, exp, exp10, isempty, min, max @reexport using McCormick @reexport using SpecialFunctions @@ -50,8 +53,9 @@ module EAGO =# const MOI = MathOptInterface + const MOIU = MOI.Utilities + const MOIB = MOI.Bridges - const SV = MOI.SingleVariable const SAF = MOI.ScalarAffineFunction{Float64} const SQF = MOI.ScalarQuadraticFunction{Float64} const VECOFVAR = MOI.VectorOfVariables @@ -71,64 +75,57 @@ module EAGO const SCoefC = MOI.ScalarCoefficientChange const SConsC = MOI.ScalarConstantChange - const MOIU = MOI.Utilities const LT_ZERO = LT(0.0) # Add storage types for EAGO optimizers export NodeBB, get_history, get_lower_bound, get_upper_bound, get_lower_time, get_upper_time, get_preprocess_time, get_postprocess_time, get_lower_bound, get_solution_time, - get_iteration_number, get_node_count, get_absolute_gap, get_relative_gap + get_iteration_number, get_node_count, get_absolute_gap, get_relative_gap, SubSolvers, + EAGOModel, AuxiliaryVariableRef + export auxillary_variable, @auxillary_variable, add_mimo_expression, @add_mimo_expression export register_eago_operators! # map/reduce nonallocating no bounds checking map-reduce like utilities - include("eago_optimizer/unsafe_utilities.jl") + include(joinpath(@__DIR__, "eago_optimizer", "utilities.jl")) # creates a context that removes domain violations when constructing bounds - include("eago_optimizer/guarded_context.jl") + #include("eago_optimizer/guarded_context.jl") - # defines structure used to store node in stack - include("eago_optimizer/node_bb.jl") + include(joinpath(@__DIR__, "eago_optimizer", "types", "log.jl")) + include(joinpath(@__DIR__, "eago_optimizer", "types", "variable_info.jl")) + include(joinpath(@__DIR__, "eago_optimizer", "types", "node_bb.jl")) + include(joinpath(@__DIR__, "eago_optimizer", "types", "extension.jl")) + include(joinpath(@__DIR__, "eago_optimizer", "types", "incremental.jl")) + include(joinpath(@__DIR__, "eago_optimizer", "types", "subsolver_block.jl")) # load internal storage functions - include("eago_optimizer/functions/functions.jl") - - #include("eago_optimizer/evaluator/evaluator.jl") + include(joinpath(@__DIR__, "eago_optimizer", "functions", "functions.jl")) - # defines structure used to store information at each iteration of global optimize - include("eago_optimizer/logging/log.jl") + include(joinpath(@__DIR__, "eago_optimizer", "types", "global_optimizer.jl")) # defines the optimizer structures - include("eago_optimizer/optimizer.jl") + include(joinpath(@__DIR__, "eago_optimizer", "optimizer.jl")) - # defines routines to add variables and single variable constraints - include("eago_optimizer/variables.jl") - - # defines routines to add saf, sqf, and nlp block constraints - include("eago_optimizer/moi_constraints.jl") - - # functions which print information to console - include("eago_optimizer/display.jl") - - # - include("eago_optimizer/relax.jl") - include("eago_optimizer/bound.jl") + # defines routines to add variable, saf, sqf, and nlp block constraints + include(joinpath(@__DIR__, "eago_optimizer", "moi_wrapper.jl")) # - include("eago_optimizer/domain_reduction.jl") + include(joinpath(@__DIR__, "eago_optimizer", "optimize", "nonconvex", "relax.jl")) + include(joinpath(@__DIR__, "eago_optimizer", "optimize", "nonconvex", "bound.jl")) # - include("eago_optimizer/parse.jl") + include(joinpath(@__DIR__, "eago_optimizer", "domain_reduction.jl")) # - include("eago_optimizer/logging/log_iteration.jl") + include(joinpath(@__DIR__, "eago_optimizer", "parse.jl")) # - include("eago_optimizer/optimize/optimize.jl") + include(joinpath(@__DIR__, "eago_optimizer", "optimize", "optimize.jl")) # import the script solving utilities - include("eago_script/script.jl") + include(joinpath(@__DIR__, "eago_script", "script.jl")) # routines for solving SIPs export SIPResult, SIPProblem, SIPCallback, SIPSubResult, @@ -137,5 +134,22 @@ module EAGO sip_llp!, sip_bnd!, sip_res!, get_sip_optimizer, check_convergence, LowerLevel1, LowerLevel2, LowerLevel3, LowerProblem, UpperProblem, ResProblem, AbstractSIPAlgo, AbstractSubproblemType - include("eago_semiinfinite/semiinfinite.jl") + include(joinpath(@__DIR__, "eago_semiinfinite", "semiinfinite.jl")) + + include(joinpath(@__DIR__, "subsolvers", "cbc.jl")) + include(joinpath(@__DIR__, "subsolvers", "ipopt.jl")) + function __init__() + #@require Cbc="9961bab8-2fa3-5c5a-9d89-47fab24efd76" include(joinpath(@__DIR__, "subsolvers", "cbc.jl")) + @require Clp="e2554f3b-3117-50c0-817c-e040a3ddf72d" include(joinpath(@__DIR__, "subsolvers", "clp.jl")) + @require CPLEX="a076750e-1247-5638-91d2-ce28b192dca0" include(joinpath(@__DIR__, "subsolvers", "cplex.jl")) + @require GLPK="60bf3e95-4087-53dc-ae20-288a0d20c6a6" include(joinpath(@__DIR__, "subsolvers", "glpk.jl")) + @require Gurobi="2e9cd046-0924-5485-92f1-d5272153d98b" include(joinpath(@__DIR__, "subsolvers", "gurobi.jl")) + @require Hypatia="b99e6be6-89ff-11e8-14f8-45c827f4f8f2" include(joinpath(@__DIR__, "subsolvers", "hypatia.jl")) + @require KNITRO="67920dd8-b58e-52a8-8622-53c4cffbe346" include(joinpath(@__DIR__, "subsolvers", "knitro.jl")) + @require MosekTools="1ec41992-ff65-5c91-ac43-2df89e9693a4" include(joinpath(@__DIR__, "subsolvers", "mosek.jl")) + @require Xpress="9e70acf3-d6c9-5be6-b5bd-4e2c73e3e054" include(joinpath(@__DIR__, "subsolvers", "xpress.jl")) + end + + #include("precompile.jl") + #_precompile_() end diff --git a/src/eago_optimizer/bound.jl b/src/eago_optimizer/bound.jl deleted file mode 100644 index f9b1dc66..00000000 --- a/src/eago_optimizer/bound.jl +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/bound.jl -# Computes interval bounds of various functions. -############################################################################# - -### -### AFFINE FUNCTIONS -### - -function lower_interval_bound(m::Optimizer, f::AffineFunctionIneq, y::NodeBB) - - terms = f.terms - lo_bnds = y.lower_variable_bounds - up_bnds = y.upper_variable_bounds - - sol_branch_map = m._sol_to_branch_map - lower_interval_bound = f.constant - for i = 1:f.len - coeff, indx = @inbounds terms[i] - - if m._branch_variables[indx] - mapped_vi = @inbounds sol_branch_map[indx] - xL = @inbounds lo_bnds[mapped_vi] - xU = @inbounds up_bnds[mapped_vi] - else - xL = @inbounds m._working_problem._variable_info[indx].lower_bound - xU = @inbounds m._working_problem._variable_info[indx].upper_bound - end - - lower_interval_bound += (coeff > 0.0) ? coeff*xL : coeff*xU - end - - return lower_interval_bound -end - -function interval_bound(m::Optimizer, f::AffineFunctionEq, y::NodeBB) - terms = f.terms - lo_bnds = y.lower_variable_bounds - up_bnds = y.upper_variable_bounds - - sol_branch_map = m._sol_to_branch_map - lower_interval_bound = f.constant - upper_interval_bound = f.constant - for i = 1:f.len - coeff, indx = @inbounds terms[i] - - if m._branch_variables[indx] - mapped_vi = @inbounds sol_branch_map[indx] - xL = @inbounds lo_bnds[mapped_vi] - xU = @inbounds up_bnds[mapped_vi] - else - xL = @inbounds m._working_problem._variable_info[indx].lower_bound - xU = @inbounds m._working_problem._variable_info[indx].upper_bound - end - - if coeff > 0.0 - lower_interval_bound += coeff*xL - upper_interval_bound += coeff*xU - else - lower_interval_bound += coeff*xU - upper_interval_bound += coeff*xL - end - end - - return lower_interval_bound, upper_interval_bound -end - -### -### QUADRATIC FUNCTIONS -### - -function lower_interval_bound(m::Optimizer, f::BufferedQuadraticIneq, n::NodeBB) - - sol_branch_map = m._sol_to_branch_map - lo_bnds = n.lower_variable_bounds - up_bnds = n.upper_variable_bounds - lower_interval_bound = Interval{Float64}(f.func.constant) - - for aff_term in f.func.affine_terms - - coeff = aff_term.coefficient - - vi = aff_term.variable_index.value - if m._branch_variables[vi] - mapped_vi = @inbounds sol_branch_map[vi] - xL = @inbounds lo_bnds[mapped_vi] - xU = @inbounds up_bnds[mapped_vi] - else - xL = @inbounds m._working_problem._variable_info[vi].lower_bound - xU = @inbounds m._working_problem._variable_info[vi].upper_bound - end - - lower_interval_bound += coeff > 0.0 ? coeff*xL : coeff*xU - end - - # assumes branching on all quadratic terms, otherwise we'd need to distinguish - # between look ups to the node bounds and lookups to the variable info in the - # working problem. - for quad_term in f.func.quadratic_terms - - coeff = quad_term.coefficient - - vi1 = quad_term.variable_index_1.value - vi2 = quad_term.variable_index_2.value - mapped_vi1 = sol_branch_map[vi1] - - xL = @inbounds lo_bnds[mapped_vi1] - xU = @inbounds up_bnds[mapped_vi1] - - if vi1 === vi2 - if coeff > 0.0 - lower_interval_bound += (0.0 < xL) ? 0.5*coeff*xL*xL : ((xU <= 0.0) ? 0.5*coeff*xU*xU : 0.0) - else - lower_interval_bound += (xL < xU) ? 0.5*coeff*xU*xU : 0.5*coeff*xL*xL - end - else - mapped_vi2 = sol_branch_map[vi2] - - il2b = @inbounds lo_bnds[mapped_vi2] - iu2b = @inbounds up_bnds[mapped_vi2] - lower_interval_bound += coeff*Interval{Float64}(xL, xU)*Interval{Float64}(il2b, iu2b) - - end - end - - return lower_interval_bound.lo -end - -function interval_bound(m::Optimizer, f::BufferedQuadraticEq, n::NodeBB) - - sol_branch_map = m._sol_to_branch_map - lo_bnds = n.lower_variable_bounds - up_bnds = n.upper_variable_bounds - val_intv = Interval(f.func.constant) - - for aff_term in f.func.affine_terms - - coeff = aff_term.coefficient - - vi = aff_term.variable_index.value - if m._branch_variables[vi] - mapped_vi = @inbounds sol_branch_map[vi] - xL = @inbounds lo_bnds[mapped_vi] - xU = @inbounds up_bnds[mapped_vi] - else - xL = @inbounds m._working_problem._variable_info[vi].lower_bound - xU = @inbounds m._working_problem._variable_info[vi].upper_bound - end - - val_intv += coeff*Interval(xL, xU) - end - - # assumes branching on all quadratic terms, otherwise we'd need to distinguish - # between look ups to the node bounds and lookups to the variable info in the - # working problem. - for quad_term in f.func.quadratic_terms - - coeff = quad_term.coefficient - - vi1 = quad_term.variable_index_1.value - vi2 = quad_term.variable_index_2.value - - mapped_vi1 = @inbounds sol_branch_map[vi1] - - xL = @inbounds lo_bnds[mapped_vi1] - xU = @inbounds up_bnds[mapped_vi1] - - if vi1 === vi2 - val_intv += 0.5*coeff*pow(Interval(xL, xU), 2) - else - mapped_vi2 = @inbounds sol_branch_map[vi2] - @inbounds il2b = lo_bnds[mapped_vi2] - @inbounds iu2b = up_bnds[mapped_vi2] - val_intv += coeff*Interval(xL, xU)*Interval(il2b, iu2b) - end - end - - return val_intv.lo, val_intv.hi -end - -### -### SECOND-ORDER CONE -### -function lower_interval_bound(m::Optimizer, d::BufferedSOC, n::NodeBB) - - sol_branch_map = m._sol_to_branch_map - lo_bnds = n.lower_variable_bounds - up_bnds = n.upper_variable_bounds - vec_of_vi = d.variables.variables - - norm_bound = Interval(0.0) - for i = 2:length(vec_of_vi) - mapped_vi = @inbounds sol_branch_map[vec_of_vi[i].value] - x = Interval{Float64}(lo_bnds[mapped_vi], up_bnds[mapped_vi]) - norm_bound += pow(x, 2) - end - norm_bound = sqrt(norm_bound) - - mapped_vi = @inbounds sol_branch_map[vec_of_vi[1].value] - lower_bound = norm_bound.lo -(@inbounds up_bnds[mapped_vi]) - - return lower_bound -end - - -### -### NONLINEAR FUNCTIONS -### - -function lower_interval_bound(m::Optimizer, d::BufferedNonlinearFunction{V}, n::NodeBB) where V - if !d.has_value - forward_pass!(m._working_problem._relaxed_evaluator, d) - end - - expr = d.expr - if expr.isnumber[1] - lower_value = expr.numberstorage[1] - else - lower_value = expr.setstorage[1].Intv.lo - end - - return lower_value -end - -function interval_bound(m::Optimizer, d::BufferedNonlinearFunction{V}, n::NodeBB) where V - if !d.has_value - forward_pass!(d.evaluator, d) - end - - expr = d.expr - if expr.isnumber[1] - interval_value = Interval(expr.numberstorage[1]) - else - interval_value = expr.setstorage[1].Intv - end - - return interval_value.lo, interval_value.hi -end diff --git a/src/eago_optimizer/domain_reduction.jl b/src/eago_optimizer/domain_reduction.jl index 006caae4..cddb50d1 100644 --- a/src/eago_optimizer/domain_reduction.jl +++ b/src/eago_optimizer/domain_reduction.jl @@ -44,198 +44,195 @@ function variable_dbbt!(n::NodeBB, mult_lo::Vector{Float64}, mult_hi::Vector{Flo return nothing end +function set_preprocess_status(m::GlobalOptimizer{R,S,Q}, d) where {R,S,Q<:ExtensionType} + tstatus = MOI.get(d, MOI.TerminationStatus()) + pstatus = MOI.get(d, MOI.PrimalStatus()) + dstatus = MOI.get(d, MOI.DualStatus()) + m._preprocess_termination_status = tstatus + m._preprocess_primal_status = pstatus + m._preprocess_dual_status = dstatus + return relaxed_problem_status(tstatus, pstatus, dstatus) +end + """ $(FUNCTIONNAME) Excludes OBBT on variable indices that are tight for the solution of the relaxation. """ -function trivial_filtering!(m::Optimizer, n::NodeBB) - - obbt_tolerance = m._parameters.obbt_tolerance - m._preprocess_termination_status = MOI.get(m.relaxed_optimizer, MOI.TerminationStatus()) - m._preprocess_result_status = MOI.get(m.relaxed_optimizer, MOI.PrimalStatus()) - valid_flag, feasible_flag = is_globally_optimal(m._preprocess_termination_status, - m._preprocess_result_status) - - if valid_flag && feasible_flag - for j = 1:length(m._obbt_working_lower_index) - if @inbounds m._obbt_working_lower_index[j] - vi = @inbounds m._relaxed_variable_index[j] - diff = MOI.get(m.relaxed_optimizer, MOI.VariablePrimal(), vi) - diff -= @inbounds n.lower_variable_bounds[j] - if abs(diff) <= obbt_tolerance - @inbounds m._obbt_working_lower_index[j] = false +function trivial_filtering!(m::GlobalOptimizer{R,S,Q}, n::NodeBB) where {R,S,Q<:ExtensionType} + + d = _relaxed_optimizer(m) + obbt_tolerance = _obbt_tolerance(m) + if set_preprocess_status(m,d) == RRS_OPTIMAL + for j = 1:m._obbt_variable_count + vi = VI(_bvi(m, m._relaxed_variable_index[j].value)) + if m._obbt_working_lower_index[j] + z = MOI.get(d, MOI.VariablePrimal(),vi) - n.lower_variable_bounds[j] + if abs(z) <= obbt_tolerance + m._obbt_working_lower_index[j] = false end end - end - for j = 1:length(m._obbt_working_upper_index) - if @inbounds m._obbt_working_upper_index[j] - vi = @inbounds m._relaxed_variable_index[j] - diff = -MOI.get(m.relaxed_optimizer, MOI.VariablePrimal(), vi) - diff += @inbounds n.upper_variable_bounds[j] - if abs(diff) <= obbt_tolerance - @inbounds m._obbt_working_upper_index[j] = false + if m._obbt_working_upper_index[j] + z = n.upper_variable_bounds[j] - MOI.get(d, MOI.VariablePrimal(), vi) + if abs(z) <= obbt_tolerance + m._obbt_working_upper_index[j] = false end end end end - return end +and_not(x,y) = x & ~y """ $(FUNCTIONNAME) Utility function used to set vector of booleans z to x & ~y. Avoids the generation of conversion of the BitArray created by broadcasting logical operators. """ -function bool_indx_diff(z::Vector{Bool},x::Vector{Bool}, y::Vector{Bool}) - for i = 1:length(z) - @inbounds z[i] = (x[i] & ~y[i]) - end - return -end +bool_indx_diff!(z::Vector{Bool},x::Vector{Bool}, y::Vector{Bool}) = map!(and_not, z, x, y) """ $(FUNCTIONNAME) Excludes OBBT on variable indices after a search in a filtering direction. """ -function aggressive_filtering!(m::Optimizer, n::NodeBB) - - # Initial filtering vector (negative one direction per remark in Gleixner2017) - variable_number = m._working_problem._variable_count - v = -ones(variable_number) - - # Copy prior index set (ignores linear and binary terms) - obbt_variable_count = m._obbt_variable_count - copyto!(m._old_low_index, m._obbt_working_lower_index) - copyto!(m._old_upp_index, m._obbt_working_upper_index) - copyto!(m._new_low_index, m._obbt_working_lower_index) - copyto!(m._new_upp_index, m._obbt_working_upper_index) - - # Exclude unbounded directions - for i = 1:obbt_variable_count - if @inbounds m._new_low_index[i] && @inbounds n.lower_variable_bounds[i] === -Inf - @inbounds m._new_low_index[i] = false - end - if @inbounds m._new_low_index[i] && @inbounds n.upper_variable_bounds[i] === Inf - @inbounds m._new_low_index[i] = false - end - end +function aggressive_filtering!(m::GlobalOptimizer{R,S,Q}, n::NodeBB) where {R,S,Q<:ExtensionType} + if _obbt_aggressive_on(m) + # Initial filtering vector (negative one direction per remark in Gleixner2017) + d = _relaxed_optimizer(m) + variable_number = _variable_num(FullVar(), m) + v = -ones(variable_number) + + # Copy prior index set (ignores linear and binary terms) + obbt_variable_count = m._obbt_variable_count + copyto!(m._old_low_index, m._obbt_working_lower_index) + copyto!(m._old_upp_index, m._obbt_working_upper_index) + copyto!(m._new_low_index, m._obbt_working_lower_index) + copyto!(m._new_upp_index, m._obbt_working_upper_index) + + # Exclude unbounded directions + @__dot__ m._new_low_index = m._new_low_index & !isinf(n.lower_variable_bounds) + + # Begin the main algorithm + for k = 1:_obbt_aggressive_max_iteration(m) + + # Set index differences and vector for filtering direction + @__dot__ m._lower_indx_diff = m._old_low_index & ~m._new_low_index + @__dot__ m._upper_indx_diff = m._old_upp_index & ~m._new_upp_index - # Begin the main algorithm - for k = 1:m._parameters.obbt_aggressive_max_iteration - - # Set index differences and vector for filtering direction - bool_indx_diff(m._lower_indx_diff, m._old_low_index, m._new_low_index) - bool_indx_diff(m._upper_indx_diff, m._old_upp_index, m._new_upp_index) - - for i = 1:obbt_variable_count - vi = @inbounds v[i] - if @inbounds m._lower_indx_diff[i] && vi < 0.0 - @inbounds v[i] = 0.0 - end - if @inbounds m._upper_indx_diff[i] && vi > 0.0 - @inbounds v[i] = 0.0 + for i = 1:obbt_variable_count + vi = v[i] + if m._lower_indx_diff[i] && vi < 0.0 + v[i] = 0.0 + end + if m._upper_indx_diff[i] && vi > 0.0 + v[i] = 0.0 + end end - end - # Termination Condition - ((~any(m._new_low_index) & ~any(m._new_upp_index)) || (iszero(v))) && break - if k >= 2 - if (count(m._lower_indx_diff) + count(m._upper_indx_diff)) < m._parameters.obbt_aggressive_min_dimension - break + # Termination Condition + ((~any(m._new_low_index) & ~any(m._new_upp_index)) || (iszero(v))) && break + if k >= 2 + if (count(m._lower_indx_diff) + count(m._upper_indx_diff)) < m._parameters.obbt_aggressive_min_dimension + break + end end - end - - # Set objective in OBBT problem to filtering vector - MOI.set(m.relaxed_optimizer, MOI.ObjectiveSense(), MOI.MAX_SENSE) - saf = SAF(SAT.(v, m._relaxed_variable_index), 0.0) - MOI.set(m.relaxed_optimizer, MOI.ObjectiveFunction{SAF}(), saf) - - # Optimizes the problem and if successful filter additional bounds - MOI.optimize!(m.relaxed_optimizer) - m._preprocess_termination_status = MOI.get(m.relaxed_optimizer, MOI.TerminationStatus()) - m._preprocess_result_status = MOI.get(m.relaxed_optimizer, MOI.PrimalStatus()) - valid_flag, feasible_flag = is_globally_optimal(m._preprocess_termination_status, - m._preprocess_result_status) - - if valid_flag && feasible_flag - variable_primal = MOI.get(m.relaxed_optimizer, MOI.VariablePrimal(), m._relaxed_variable_index) - copyto!(m._new_low_index, m._old_low_index) - copyto!(m._new_upp_index, m._old_upp_index) - for i = 1:obbt_variable_count - vp_value = @inbounds variable_primal[i] - if @inbounds m._old_low_index[i] && vp_value == @inbounds n.lower_variable_bounds[i] - @inbounds m._new_low_index[i] = false - end - if @inbounds m._old_upp_index[i] && vp_value == @inbounds n.upper_variable_bounds[i] - @inbounds m._new_upp_index[i] = false + # Set objective in OBBT problem to filtering vector + MOI.set(d, MOI.ObjectiveSense(), MOI.MAX_SENSE) + MOI.set(d, MOI.ObjectiveFunction{SAF}(), SAF(SAT.(v, m._relaxed_variable_index), 0.0)) + + # Optimizes the problem and if successful filter additional bounds + MOI.optimize!(d) + + if set_preprocess_status(m,d) == RRS_OPTIMAL + variable_primal = MOI.get(d, MOI.VariablePrimal(), m._relaxed_variable_index) + copyto!(m._new_low_index, m._old_low_index) + copyto!(m._new_upp_index, m._old_upp_index) + for i = 1:obbt_variable_count + vp_value = variable_primal[i] + if m._old_low_index[i] && vp_value == n.lower_variable_bounds[i] + m._new_low_index[i] = false + end + if m._old_upp_index[i] && vp_value == n.upper_variable_bounds[i] + m._new_upp_index[i] = false + end end + else + return false end - else - return false end + copyto!(m._obbt_working_lower_index, m._new_low_index) + copyto!(m._obbt_working_upper_index, m._new_upp_index) end - copyto!(m._obbt_working_lower_index, m._new_low_index) - copyto!(m._obbt_working_upper_index, m._new_upp_index) return true end """ $(FUNCTIONNAME) """ -function set_node_flag!(m::Optimizer) - for constr in m._working_problem._nonlinear_constr - set_node_flag!(constr) - end - - if m._working_problem._objective_type === NONLINEAR - set_node_flag!(m._working_problem._objective_nl) - end - - return nothing -end +function set_reference_point!(m::GlobalOptimizer) -""" -$(FUNCTIONNAME) -""" -function set_reference_point!(m::Optimizer) - - evaluator = m._working_problem._relaxed_evaluator - evaluator_x = evaluator.x - current_xref = m._current_xref + wp = m._working_problem + evaluator = wp._relaxed_evaluator + evaluator_x = evaluator.variable_values.x + current_xref = m._lower_solution new_reference_point = false - for node_i = 1:m._branch_variable_count - solution_i = m._branch_to_sol_map[node_i] - - node_x = current_xref[node_i] - solution_x = evaluator_x[solution_i] - + for i = 1:length(current_xref) + node_x = current_xref[i] + solution_x = evaluator_x[i] if node_x !== solution_x - evaluator_x[solution_i] = node_x + l = _lower_bound(FullVar(), m, i) + u = _upper_bound(FullVar(), m, i) + if isfinite(l) && isfinite(u) + x = node_x + elseif isfinite(l) + x = min(0.0, u) + elseif isfinite(u) + x = max(0.0, l) + else + x = 0.0 + end + evaluator_x[i] = x new_reference_point = true end end if new_reference_point - - for constr in m._working_problem._nonlinear_constr - constr.has_value = false - end - - if m._working_problem._objective_type === NONLINEAR - m._working_problem._objective_nl.has_value = false - end + foreach(c -> _set_has_value!(c, false), wp._nonlinear_constr) + _set_has_value!(wp._objective, false) end fill!(evaluator.subexpressions_eval, false) return nothing end +# TODO replace with argmin statement post Julia 1.7 LTS version +function active_argmin(f, b, n) + vi = -1; v = Inf + if any(b) + for i = 1:n + if b[i] && (f(i) <= v) + vi = i + v = f(i) + end + end + end + vi, v +end +Δxl(m, i) = _lower_solution(BranchVar(),m,i) - _lower_bound(BranchVar(),m,i) +Δux(m, i) = _upper_bound(BranchVar(),m,i) - _lower_solution(BranchVar(),m,i) + +function reset_objective!(m::GlobalOptimizer{R,S,Q}, d) where {R,S,Q<:ExtensionType} + MOI.set(d, MOI.ObjectiveSense(), MOI.MIN_SENSE) + MOI.set(d, MOI.ObjectiveFunction{SAF}(), m._working_problem._objective_saf) + MOI.optimize!(d) + nothing +end + + """ $(FUNCTIONNAME) @@ -243,229 +240,134 @@ Performs OBBT with filtering and greedy ordering as detailed in: Gleixner, A.M., Berthold, T., Müller, B. et al. J Glob Optim (2017) 67: 731. https://doi.org/10.1007/s10898-016-0450-4 """ -function obbt!(m::Optimizer) +function obbt!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} feasibility = true - n = m._current_node - branch_to_sol_map = m._branch_to_sol_map - relaxed_optimizer = m.relaxed_optimizer - - # set node and reference point if necessary then solve initial problem to - # feasibility. This is repeated `obbt_repetitions` number of times in the - # following fashion. Relax the problem, populate affine constraints, run - # obbt which contracts variable bounds, delete affine constraints... - # update variable bounds and repeat. TODO: Keep track of which variables - # participate in which functions and only delete a constraint if a - # variable participating in a nonlinear term changes it bounds. - if m._obbt_performed_flag - reset_relaxation!(m) - set_first_relax_point!(m) - end + d = _relaxed_optimizer(m) - update_relaxed_problem_box!(m) - if m._nonlinear_evaluator_created - set_node!(m._working_problem._relaxed_evaluator, n) - set_node_flag!(m) - set_reference_point!(m) - end - relax_constraints!(m, 1) - relax_objective!(m, 1) - MOI.set(relaxed_optimizer, MOI.ObjectiveSense(), MOI.MIN_SENSE) - MOI.optimize!(relaxed_optimizer) - - # Sets indices to attempt OBBT on - obbt_variable_count = m._obbt_variable_count - fill!(m._obbt_working_lower_index, true) - fill!(m._obbt_working_upper_index, true) - - # Filters out any indicies with active bounds on variables - # determined by solving the feasibility problem - trivial_filtering!(m, n) - - # Applies an aggressive filter to eliminate indices that - # cannot be tightened by obbt - if m._parameters.obbt_aggressive_on - feasibility = aggressive_filtering!(m, n) - end - - # extracts info from relaxed problem (redundant if aggressive obbt is called) - m._preprocess_termination_status = MOI.get(relaxed_optimizer, MOI.TerminationStatus()) - m._preprocess_result_status = MOI.get(relaxed_optimizer, MOI.PrimalStatus()) - valid_flag, feasible_flag = is_globally_optimal(m._preprocess_termination_status, - m._preprocess_result_status) - - if valid_flag && feasible_flag - xLP = MOI.get(relaxed_optimizer, MOI.VariablePrimal(), m._relaxed_variable_index) - else - return false - end - - # continue tightening bounds by optimization until all indices have been checked - # or the node is empty and the problem is thus proven infeasible - while (any(m._obbt_working_lower_index) || any(m._obbt_working_upper_index)) && !isempty(n) + if relax_problem!(m) + MOI.set(d, MOI.ObjectiveSense(), MOI.MIN_SENSE) + MOI.optimize!(d) - # Get lower value - lower_indx = -1; upper_indx = -1 - lower_value = Inf; upper_value = Inf + # Sets indices to attempt OBBT on + obbt_variable_count = m._obbt_variable_count + fill!(m._obbt_working_lower_index, true) + fill!(m._obbt_working_upper_index, true) - # min of xLP - yL on active - if any(m._obbt_working_lower_index) - for i = 1:obbt_variable_count - if @inbounds m._obbt_working_lower_index[i] - sol_indx = branch_to_sol_map[i] - temp_value = @inbounds xLP[sol_indx] - n.lower_variable_bounds[i] - # Need less than or equal to handle unbounded cases - if temp_value <= lower_value - lower_value = temp_value - lower_indx = i - end - end - end + # Filters out any indicies with active bounds on variables + # determined by solving the feasibility problem + trivial_filtering!(m, n) + feasibility = aggressive_filtering!(m, n) + if set_preprocess_status(m,d) == RRS_OPTIMAL + xLP = MOI.get(d, MOI.VariablePrimal(), m._relaxed_variable_index) + else + return false end - # min of yU - xLP on active - if any(m._obbt_working_upper_index) - for i = 1:obbt_variable_count - if @inbounds m._obbt_working_upper_index[i] - sol_indx = branch_to_sol_map[i] - temp_value = @inbounds n.upper_variable_bounds[i] - xLP[sol_indx] - if temp_value <= upper_value - upper_value = temp_value - upper_indx = i + # continue tightening bounds by optimization until all indices have been checked + # or the node is empty and the problem is thus proven infeasible + while (any(m._obbt_working_lower_index) || any(m._obbt_working_upper_index)) && !isempty(n) + + # min of xLP - yL and xU - xLP for potential directions + lower_indx, lower_value = active_argmin(i -> Δxl(m, i), m._obbt_working_lower_index, obbt_variable_count) + upper_indx, upper_value = active_argmin(i -> Δux(m, i), m._obbt_working_lower_index, obbt_variable_count) + + # default to upper bound if no lower bound is found, use maximum distance otherwise + if lower_value <= upper_value && lower_indx > 0 + m._obbt_working_lower_index[lower_indx] = false + MOI.set(d, MOI.ObjectiveSense(), MOI.MIN_SENSE) + MOI.set(d, MOI.ObjectiveFunction{VI}(), m._relaxed_variable_index[_bvi(m, lower_indx)]) + MOI.optimize!(d) + status = set_preprocess_status(m,d) + if status == RRS_OPTIMAL + xLP .= MOI.get(d, MOI.VariablePrimal(), m._relaxed_variable_index) + updated_value = xLP[_bvi(m, lower_indx)] + previous_value = n.lower_variable_bounds[lower_indx] + + # if bound is improved update node and corresponding constraint update + # the node bounds and the single variable bound in the relaxation + # we assume branching does not occur on fixed variables and interval + # constraints are internally bridged by EAGO. So the only L <= x + # constraint in the model is a GreaterThan. + if updated_value > previous_value && (updated_value - previous_value) > 1E-6 + sv_geq_ci = m._node_to_sv_geq_ci[lower_indx] + MOI.set(d, MOI.ConstraintSet(), sv_geq_ci, GT(updated_value)) + n.lower_variable_bounds[lower_indx] = updated_value end - end - end - end - # default to upper bound if no lower bound is found, use maximum distance otherwise - if lower_value <= upper_value && lower_indx > 0 - - @inbounds m._obbt_working_lower_index[lower_indx] = false - var = SV(m._relaxed_variable_index[lower_indx]) - - MOI.set(relaxed_optimizer, MOI.ObjectiveSense(), MOI.MIN_SENSE) - MOI.set(relaxed_optimizer, MOI.ObjectiveFunction{SV}(), var) - - MOI.optimize!(m.relaxed_optimizer) - m._preprocess_termination_status = MOI.get(relaxed_optimizer, MOI.TerminationStatus()) - m._preprocess_result_status = MOI.get(relaxed_optimizer, MOI.PrimalStatus()) - valid_flag, feasible_flag = is_globally_optimal(m._preprocess_termination_status, - m._preprocess_result_status) - - if valid_flag && feasible_flag - xLP .= MOI.get(relaxed_optimizer, MOI.VariablePrimal(), m._relaxed_variable_index) - - node_index = branch_to_sol_map[lower_indx] - updated_value = xLP[node_index] - previous_value = n.lower_variable_bounds[lower_indx] - - # if bound is improved update node and corresponding constraint update - # the node bounds and the single variable bound in the relaxation - # we assume branching does not occur on fixed variables and interval - # constraints are internally bridged by EAGO. So the only L <= x - # constraint in the model is a GreaterThan. - if updated_value > previous_value && (updated_value - previous_value) > 1E-6 - ci_list = MOI.get(relaxed_optimizer, MOI.ListOfConstraintIndices{SAF,LT}()) - sv_geq_ci = m._node_to_sv_geq_ci[lower_indx] - MOI.set(relaxed_optimizer, MOI.ConstraintSet(), sv_geq_ci, GT(updated_value)) - @inbounds n.lower_variable_bounds[lower_indx] = updated_value - end + if isempty(n) + feasibility = false + break + end - if isempty(n) + elseif status == RRS_INFEASIBLE feasibility = false break + else + break end - elseif valid_flag && !feasible_flag - feasibility = false - break - - else - break - end + elseif upper_indx > 0 + + m._obbt_working_upper_index[upper_indx] = false + MOI.set(d, MOI.ObjectiveSense(), MOI.MAX_SENSE) + MOI.set(d, MOI.ObjectiveFunction{VI}(), m._relaxed_variable_index[_bvi(m, upper_indx)]) + MOI.optimize!(d) + + status = set_preprocess_status(m,d) + if status == RRS_OPTIMAL + xLP .= MOI.get(d, MOI.VariablePrimal(), m._relaxed_variable_index) + updated_value = xLP[_bvi(m, upper_indx)] + previous_value = n.upper_variable_bounds[upper_indx] + + # if bound is improved update node and corresponding constraint update + # the node bounds and the single variable bound in the relaxation + # we assume branching does not occur on fixed variables and interval + # constraints are internally bridged by EAGO. So the only U => x + # constraint in the model is a LessThan. + if updated_value < previous_value && (previous_value - updated_value) > 1E-6 + sv_leq_ci = m._node_to_sv_leq_ci[upper_indx] + MOI.set(d, MOI.ConstraintSet(), sv_leq_ci, LT(updated_value)) + n.upper_variable_bounds[upper_indx] = updated_value + end - elseif upper_indx > 0 - - m._obbt_working_upper_index[upper_indx] = false - var = SV(m._relaxed_variable_index[upper_indx]) - MOI.set(relaxed_optimizer, MOI.ObjectiveSense(), MOI.MAX_SENSE) - MOI.set(relaxed_optimizer, MOI.ObjectiveFunction{SV}(), var) - MOI.optimize!(relaxed_optimizer) - m._preprocess_termination_status = MOI.get(m.relaxed_optimizer, MOI.TerminationStatus()) - m._preprocess_result_status = MOI.get(m.relaxed_optimizer, MOI.PrimalStatus()) - valid_flag, feasible_flag = is_globally_optimal(m._preprocess_termination_status, - m._preprocess_result_status) - - if valid_flag && feasible_flag - xLP .= MOI.get(relaxed_optimizer, MOI.VariablePrimal(), m._relaxed_variable_index) - node_index = branch_to_sol_map[upper_indx] - updated_value = xLP[node_index] - previous_value = n.upper_variable_bounds[upper_indx] - - # if bound is improved update node and corresponding constraint update - # the node bounds and the single variable bound in the relaxation - # we assume branching does not occur on fixed variables and interval - # constraints are internally bridged by EAGO. So the only U => x - # constraint in the model is a LessThan. - if updated_value < previous_value && (previous_value - updated_value) > 1E-6 - sv_leq_ci = m._node_to_sv_leq_ci[upper_indx] - MOI.set(relaxed_optimizer, MOI.ConstraintSet(), sv_leq_ci, LT(updated_value)) - @inbounds n.upper_variable_bounds[upper_indx] = updated_value - end + if isempty(n) + feasibility = false + break + end - if isempty(n) + elseif status == RRS_INFEASIBLE feasibility = false break + else + break end - - elseif valid_flag && !feasible_flag - feasibility = false - break - else break end - - else - break + trivial_filtering!(m, n) end - trivial_filtering!(m, n) end - + reset_objective!(m, d) return feasibility end """ $(FUNCTIONNAME) """ -function load_fbbt_buffer!(m::Optimizer) - - n = m._current_node - sol_to_branch = m._sol_to_branch_map - lower_variable_bounds = n.lower_variable_bounds - upper_variable_bounds = n.upper_variable_bounds - +function load_fbbt_buffer!(m::GlobalOptimizer) for i = 1:m._working_problem._variable_count - if @inbounds m._branch_variables[i] - indx = @inbounds sol_to_branch[i] - @inbounds m._lower_fbbt_buffer[i] = lower_variable_bounds[indx] - @inbounds m._upper_fbbt_buffer[i] = upper_variable_bounds[indx] - - else - @inbounds m._lower_fbbt_buffer[i] = m._working_problem._variable_info[i].lower_bound - @inbounds m._upper_fbbt_buffer[i] = m._working_problem._variable_info[i].upper_bound - - end + m._lower_fbbt_buffer[i] = _lower_bound(FullVar(), m, i) + m._upper_fbbt_buffer[i] = _upper_bound(FullVar(), m, i) end - - return nothing + return end """ $(FUNCTIONNAME) """ -function unpack_fbbt_buffer!(m::Optimizer) +function unpack_fbbt_buffer!(m::GlobalOptimizer) n = m._current_node sol_to_branch = m._sol_to_branch_map @@ -495,201 +397,150 @@ Performs feasibility-based bound tightening on a back-end constraint and returns """ function fbbt! end -function fbbt!(m::Optimizer, f::AffineFunctionIneq) +function fbbt!(m::GlobalOptimizer, f::AffineFunctionIneq) # compute full sum lower_bounds = m._lower_fbbt_buffer upper_bounds = m._upper_fbbt_buffer - terms = f.terms temp_sum = -f.constant - for k = 1:f.len - - aik, indx_k = @inbounds terms[k] - if aik !== 0.0 - aik_xL = aik*(@inbounds lower_bounds[indx_k]) - aik_xU = aik*(@inbounds upper_bounds[indx_k]) + aik, i = @inbounds terms[k] + if !iszero(aik) + aik_xL = aik*(@inbounds lower_bounds[i]) + aik_xU = aik*(@inbounds upper_bounds[i]) temp_sum -= min(aik_xL, aik_xU) end end # subtract extra term, check to see if implied bound is better, if so update the node and # the working sum if the node is now empty then break - for k = 1:f.len - - aik, indx_k = @inbounds terms[k] - if aik !== 0.0 - - xL = @inbounds lower_bounds[indx_k] - xU = @inbounds upper_bounds[indx_k] - + aik, i = @inbounds terms[k] + if !iszero(aik) + xL = @inbounds lower_bounds[i] + xU = @inbounds upper_bounds[i] aik_xL = aik*xL aik_xU = aik*xU - temp_sum += min(aik_xL, aik_xU) xh = temp_sum/aik - if aik > 0.0 (xh < xL) && return false - if xh > xL - @inbounds upper_bounds[indx_k] = xh - end - + (xh > xL) && (@inbounds upper_bounds[i] = min(xh, xU)) elseif aik < 0.0 (xh > xU) && return false - if xh < xU - @inbounds lower_bounds[indx_k] = xh - end - + (xh < xU) && (@inbounds lower_bounds[i] = max(xh, xL)) else temp_sum -= min(aik_xL, aik_xU) continue - end - - aik_xL = aik*(@inbounds lower_bounds[indx_k]) - aik_xU = aik*(@inbounds upper_bounds[indx_k]) + aik_xL = aik*(@inbounds lower_bounds[i]) + aik_xU = aik*(@inbounds upper_bounds[i]) temp_sum -= min(aik_xL, aik_xU) - end end - return true end -function fbbt!(m::Optimizer, f::AffineFunctionEq) - +function fbbt!(m::GlobalOptimizer, f::AffineFunctionEq) # compute full sum lower_bounds = m._lower_fbbt_buffer upper_bounds = m._upper_fbbt_buffer - terms = f.terms temp_sum_leq = -f.constant temp_sum_geq = -f.constant for k = 1:f.len - aik, indx_k = @inbounds terms[k] - - if aik !== 0.0 - aik_xL = aik*(@inbounds lower_bounds[indx_k]) - aik_xU = aik*(@inbounds upper_bounds[indx_k]) + aik, i = @inbounds terms[k] + if !iszero(aik) + aik_xL = aik*(@inbounds lower_bounds[i]) + aik_xU = aik*(@inbounds upper_bounds[i]) temp_sum_leq -= min(aik_xL, aik_xU) temp_sum_geq -= max(aik_xL, aik_xU) - end end # subtract extra term, check to see if implied bound is better, if so update the node and # the working sum if the node is now empty then break for k = 1:f.len - - aik, indx_k = @inbounds terms[k] - if aik !== 0.0 - - xL = @inbounds lower_bounds[indx_k] - xU = @inbounds upper_bounds[indx_k] - + aik, i = @inbounds terms[k] + if !iszero(aik) + xL = @inbounds lower_bounds[i] + xU = @inbounds upper_bounds[i] aik_xL = aik*xL aik_xU = aik*xU - temp_sum_leq += min(aik_xL, aik_xU) temp_sum_geq += max(aik_xL, aik_xU) - xh_leq = temp_sum_leq/aik xh_geq = temp_sum_geq/aik - if aik > 0.0 (xh_leq < xL) && return false - if xh_leq > xL - @inbounds upper_bounds[indx_k] = xh_leq - end + (xh_leq > xL) && (@inbounds upper_bounds[i] = min(xh_leq, xU)) (xh_geq > xU) && return false - if (xh_geq < xU) - @inbounds lower_bounds[indx_k] = xh_geq - end - + (xh_geq < xU) && (@inbounds lower_bounds[i] = max(xh_geq, xL)) elseif aik < 0.0 (xh_leq > xU) && return false - if xh_leq < xU - @inbounds lower_bounds[indx_k] = xh_leq - end + (xh_leq < xU) && (@inbounds lower_bounds[i] = max(xh_leq, xL)) (xh_geq < xL) && return false - if (xh_geq > xL) - @inbounds upper_bounds[indx_k] = xh_geq - end - + (xh_geq > xL) && (@inbounds upper_bounds[i] = min(xh_geq, xU)) else temp_sum_leq -= min(aik_xL, aik_xU) temp_sum_geq -= max(aik_xL, aik_xU) continue - end - aik_xL = aik*(@inbounds lower_bounds[indx_k]) - aik_xU = aik*(@inbounds upper_bounds[indx_k]) - + aik_xL = aik*(@inbounds lower_bounds[i]) + aik_xU = aik*(@inbounds upper_bounds[i]) temp_sum_leq -= min(aik_xL, aik_xU) temp_sum_geq -= max(aik_xL, aik_xU) end end - return true end -cp_condition(m::Optimizer) = false - +_propagate_constraint!(d, f) = true +function _propagate_constraint!(d, f::BufferedNonlinearFunction) + forward_pass!(d, f) + is_feasible = rprop!(Relax(), d, f) + d.interval_intersect = true + is_feasible && forward_pass!(d, f) +end """ Performs bound tightening based on forward/reverse interval and/or McCormick passes. This routine resets the current node with new interval bounds. """ -function set_constraint_propagation_fbbt!(m::Optimizer) +function set_constraint_propagation_fbbt!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} feasible_flag = true + wp = m._working_problem if m._nonlinear_evaluator_created - evaluator = m._working_problem._relaxed_evaluator - set_node!(m._working_problem._relaxed_evaluator, m._current_node) - set_node_flag!(m) + evaluator = wp._relaxed_evaluator + set_node!(wp._relaxed_evaluator, m._current_node) set_reference_point!(m) - m._working_problem._relaxed_evaluator.is_first_eval = m._new_eval_constraint - for constr in m._working_problem._nonlinear_constr + wp._relaxed_evaluator.is_first_eval = m._new_eval_constraint + for constr in wp._nonlinear_constr if feasible_flag forward_pass!(evaluator, constr) - feasible_flag &= reverse_pass!(evaluator, constr) + feasible_flag &= rprop!(Relax(), evaluator, constr) evaluator.interval_intersect = true end end - m._working_problem._relaxed_evaluator.is_first_eval = m._new_eval_constraint - for constr in m._working_problem._nonlinear_constr - if feasible_flag - forward_pass!(evaluator, constr) - end + wp._relaxed_evaluator.is_first_eval = m._new_eval_constraint + for constr in wp._nonlinear_constr + feasible_flag && forward_pass!(evaluator, constr) end evaluator.is_post = m._parameters.subgrad_tighten - - m._working_problem._relaxed_evaluator.is_first_eval = m._new_eval_objective - if feasible_flag && (m._working_problem._objective_type === NONLINEAR) - obj_nonlinear = m._working_problem._objective_nl - set_node_flag!(obj_nonlinear) - forward_pass!(evaluator, obj_nonlinear) - feasible_flag &= reverse_pass!(evaluator, obj_nonlinear) - evaluator.interval_intersect = true - end - - if feasible_flag && (m._working_problem._objective_type === NONLINEAR) - obj_nonlinear = m._working_problem._objective_nl - set_node_flag!(obj_nonlinear) - forward_pass!(evaluator, obj_nonlinear) - end + wp._relaxed_evaluator.is_first_eval = m._new_eval_objective + feasible_flag && _propagate_constraint!(evaluator, wp._objective) m._new_eval_constraint = false m._new_eval_objective = false - - retrieve_x!(m._current_xref, evaluator) + _get_x!(BranchVar, m._current_xref, evaluator) m._current_node = retrieve_node(evaluator) + + interval_objective_bound!(m) end return feasible_flag diff --git a/src/eago_optimizer/functions/affine.jl b/src/eago_optimizer/functions/affine.jl deleted file mode 100644 index f15dce6f..00000000 --- a/src/eago_optimizer/functions/affine.jl +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/functions/nonlinear/affine.jl -# Defines buffered structures to store quadratic functions: -# AffineFunctionIneq, AffineFunctionEq, as well as the -# lower_interval_bound, interval_bound, and eliminate_fixed_variables! -# functions associated with each structure. -############################################################################# - -### -### Structure definitions -### - -""" -$(TYPEDEF) - -Current only used for bound tightening. Stores a representation -of an affine inequality. -""" -mutable struct AffineFunctionIneq <: AbstractEAGOConstraint - terms::Vector{Tuple{Float64,Int}} - constant::Float64 - len::Int -end - -""" -$(TYPEDEF) - -Current only used for bound tightening. Stores a representation -of an affine equality. -""" -mutable struct AffineFunctionEq <: AbstractEAGOConstraint - terms::Vector{Tuple{Float64,Int}} - constant::Float64 - len::Int -end - -### -### Constructor definitions -### -AffineFunctionIneq() = AffineFunctionIneq(Tuple{Float64,Int}[], 0.0, 0) -function AffineFunctionIneq(func::SAF, set::LT) - terms = map(x -> (x.coefficient, x.variable_index.value), func.terms) - return AffineFunctionIneq(terms, func.constant - set.upper, length(func.terms)) -end - -function AffineFunctionIneq(func::SAF, set::GT) - terms = map(x -> (-x.coefficient, x.variable_index.value), func.terms) - return AffineFunctionIneq(terms, set.lower - func.constant, length(func.terms)) -end - - -AffineFunctionEq() = AffineFunctionEq(Tuple{Float64,Int}[], 0.0, 0) -function AffineFunctionEq(func::SAF, set::ET) - terms = map(x -> (x.coefficient, x.variable_index.value), func.terms) - return AffineFunctionEq(terms, func.constant - set.value, length(func.terms)) -end - -### -### Parsing definitions -### - -function eliminate_fixed_variables!(f::T, v::Vector{VariableInfo}) where T <: Union{AffineFunctionIneq, - AffineFunctionEq} - deleted_count = 0 - index = 1 - while i + deleted_count <= f.len - coeff, indx = @inbounds f.terms[i] - variable_info = @inbounds v[indx] - if variable_info.is_fixed - f.constant += coeff*variable_info.lower_bound - deleteat!(f.terms, i) - deleted_count += 1 - else - i += 1 - end - end - f.len -= deleted_count - return nothing -end diff --git a/src/eago_optimizer/functions/cone.jl b/src/eago_optimizer/functions/cone.jl deleted file mode 100644 index 5078f1f6..00000000 --- a/src/eago_optimizer/functions/cone.jl +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/functions/nonlinear/cone.jl -# Defines buffered structures to store quadratic functions: -# BufferedSOC as well as the lower_interval_bound, interval_bound, and -# eliminate_fixed_variables! functions associated with each structure. -############################################################################# - -### -### Structure definitions -### -""" -$(TYPEDEF) - -Stores a second-order cone with a buffer. -""" -mutable struct BufferedSOC <: AbstractEAGOConstraint - variables::VECOFVAR - buffer::Dict{Int, Float64} - saf::SAF - len::Int -end - -### -### Constructor definitions -### -function BufferedSOC(func::VECOFVAR, set::SOC) - len = length(func.variables) - buffer = Dict{Int, Float64}([(variable.value, 0.0) for variable in func.variables]) - saf = SAF(fill(SAT(0.0, VI(1)), len), 0.0) - return BufferedSOC(copy(func), buffer, saf, len) -end - -### -### Parsing definitions -### diff --git a/src/eago_optimizer/functions/functions.jl b/src/eago_optimizer/functions/functions.jl index 7c77e2fb..331dc0b5 100644 --- a/src/eago_optimizer/functions/functions.jl +++ b/src/eago_optimizer/functions/functions.jl @@ -9,6 +9,8 @@ # Defines variable info and function types. ############################################################################# +include(joinpath(@__DIR__, "nonlinear\\auxiliary_variables.jl")) + """ $(TYPEDEF) @@ -36,38 +38,234 @@ Eliminate fixed variables by rearrangment or restructuring of `AbstractEAGOConst """ function eliminate_fixed_variables! end -@enum(BRANCH_VARIABLE, UNSPEC_BRANCH, NO_BRANCH, BRANCH) + +### +### +### Affine function +### +### + +""" +$(TYPEDEF) + +Current only used for bound tightening. Stores a representation +of an affine inequality. +""" +mutable struct AffineFunctionIneq <: AbstractEAGOConstraint + terms::Vector{Tuple{Float64,Int}} + constant::Float64 + len::Int +end +const AFI = AffineFunctionIneq + +AffineFunctionIneq() = AffineFunctionIneq(Tuple{Float64,Int}[], 0.0, 0) +function AffineFunctionIneq(f::SAF, s::LT) + terms = map(x -> (x.coefficient, x.variable.value), f.terms) + AffineFunctionIneq(terms, f.constant - s.upper, length(f.terms)) +end +function AffineFunctionIneq(f::SAF, s::GT) + terms = map(x -> (-x.coefficient, x.variable.value), f.terms) + AffineFunctionIneq(terms, s.lower - f.constant, length(f.terms)) +end +function AffineFunctionIneq(f::VI; is_max = false) + if is_max + return AffineFunctionIneq(Tuple{Float64,Int}[(-1.0, f.value)], 0.0, 1) + end + AffineFunctionIneq(Tuple{Float64,Int}[(1.0, f.value)], 0.0, 1) +end + + +""" +$(TYPEDEF) + +Current only used for bound tightening. Stores a representation +of an affine equality. +""" +mutable struct AffineFunctionEq <: AbstractEAGOConstraint + terms::Vector{Tuple{Float64,Int}} + constant::Float64 + len::Int +end +const AFE = AffineFunctionEq + +AffineFunctionEq() = AffineFunctionEq(Tuple{Float64,Int}[], 0.0, 0) +function AffineFunctionEq(func::SAF, set::ET) + terms = map(x -> (x.coefficient, x.variable_index.value), func.terms) + return AffineFunctionEq(terms, func.constant - set.value, length(func.terms)) +end + +### +### Parsing definitions +### + +function eliminate_fixed_variables!(f::T, v::Vector{VariableInfo}) where T <: Union{AffineFunctionIneq, + AffineFunctionEq} + deleted_count = 0 + i = 1 + while i + deleted_count <= f.len + coeff, indx = @inbounds f.terms[i] + variable_info = @inbounds v[indx] + if variable_info.is_fixed + f.constant += coeff*variable_info.lower_bound + deleteat!(f.terms, i) + deleted_count += 1 + else + i += 1 + end + end + f.len -= deleted_count + return nothing +end + + +### +### +### Quadratic function +### +### + +""" +$(TYPEDEF) + +Stores a general quadratic inequality constraint with a buffer. +""" +mutable struct BufferedQuadraticIneq <: AbstractEAGOConstraint + func::SQF + buffer::Dict{Int, Float64} + saf::SAF + len::Int +end +const BQI = BufferedQuadraticIneq + +""" +$(TYPEDEF) + +Stores a general quadratic equality constraint with a buffer. +""" +mutable struct BufferedQuadraticEq <: AbstractEAGOConstraint + func::SQF + minus_func::SQF + buffer::Dict{Int, Float64} + saf::SAF + len::Int +end +const BQE = BufferedQuadraticEq + +#= +mutable struct BufferedConvexQuadratic <: AbstractEAGOConstraint + func::SQF + buffer::Dict{Int, Float64} + saf::SAF + len::Int +end +=# + +function create_buffer_dict(func::SQF) + + buffer = Dict{Int, Float64}() + + for term in func.quadratic_terms + buffer[term.variable_1.value] = 0.0 + buffer[term.variable_2.value] = 0.0 + end + + for term in func.affine_terms + buffer[term.variable.value] = 0.0 + end + + return buffer +end + +BufferedQuadraticIneq() = BufferedQuadraticIneq(SQF(SQT[], SAT[], 0.0), Dict{Int, Float64}(), SAF(SAT[], 0.0), 0) + +function BufferedQuadraticIneq(func::SQF, set::LT) + + buffer = create_buffer_dict(func) + saf = SAF([SAT(0.0, VI(k)) for k in keys(buffer)], 0.0) + len = length(buffer) + cfunc = copy(func) + cfunc.constant -= set.upper + + return BufferedQuadraticIneq(cfunc, buffer, saf, len) +end + +function BufferedQuadraticIneq(func::SQF, set::GT) + + buffer = create_buffer_dict(func) + saf = SAF([SAT(0.0, VI(k)) for k in keys(buffer)], 0.0) + len = length(buffer) + cfunc = MOIU.operate(-, Float64, func) + cfunc.constant += set.lower + + return BufferedQuadraticIneq(cfunc, buffer, saf, len) +end + +BufferedQuadraticEq() = BufferedQuadraticEq(SQF(SQT[], SAT[], 0.0), SQF(SQT[], SAT[], 0.0), Dict{Int, Float64}(), SAF(SAT[], 0.0), 0) + +function BufferedQuadraticEq(func::SQF, set::ET) + + buffer = create_buffer_dict(func) + saf = SAF([SAT(0.0, VI(k)) for k in keys(buffer)], 0.0) + len = length(buffer) + cfunc1 = copy(func) + cfunc1.constant -= set.value + cfunc2 = MOIU.operate(-, Float64, func) + cfunc2.constant += set.value + + return BufferedQuadraticEq(cfunc1, cfunc2, buffer, saf, len) +end + +#= +function BufferedConvexQuadratic(f::BufferedQuadraticIneq) + BufferedConvexQuadratic(copy(f.func), copy(f.buffer), copy(f.saf), f.len) +end +=# + +function eliminate_fixed_variables!(f::T, v::Vector{VariableInfo}) where T <: Union{BufferedQuadraticIneq, + BufferedQuadraticIneq} + deleted_count = 0 + i = 1 + while i + deleted_count <= f.len + term = f.sqf.terms[i] + variable_info_1 = v[term.variable_1.value] + variable_info_2 = v[term.variable_2.value] + if variable_info_1.is_fixed && variable_info_2.is_fixed + f.sqf.constant += coeff*variable_info_1.lower_bound*variable_info_2.lower_bound + deleteat!(f.sqf.terms, i) + deleted_count += 1 + else + i += 1 + end + end + f.len -= deleted_count + + return nothing +end + + +### +### +### Cone +### +### """ $(TYPEDEF) -A structure used to store information related to the bounds assigned to each -variable. - -$(TYPEDFIELDS) -""" -mutable struct VariableInfo - "Is the variable integer valued?" - is_integer::Bool - "Lower bounds. May be -Inf." - lower_bound::Float64 - "Boolean indicating whether finite lower bound exists." - has_lower_bound::Bool - "Upper bounds. May be Inf." - upper_bound::Float64 - "Boolean indicating whether finite upper bound exists." - has_upper_bound::Bool - "Boolean indicating variable is fixed to a finite value." - is_fixed::Bool - "Is variable used for branching (unset in input model)" - branch_on::BRANCH_VARIABLE -end -VariableInfo() = VariableInfo(false,-Inf, false, Inf, false, false, UNSPEC_BRANCH) -lower_bound(x::VariableInfo) = x.lower_bound -upper_bound(x::VariableInfo) = x.upper_bound -is_integer(x::VariableInfo) = x.is_integer - -include("affine.jl") -include("quadratic.jl") -include("cone.jl") +Stores a second-order cone with a buffer. +""" +mutable struct BufferedSOC <: AbstractEAGOConstraint + variables::VECOFVAR + buffer::Dict{Int, Float64} + saf::SAF + len::Int +end + +function BufferedSOC(func::VECOFVAR, set::SOC) + len = length(func.variables) + buffer = Dict{Int, Float64}([(variable.value, 0.0) for variable in func.variables]) + saf = SAF(fill(SAT(0.0, VI(1)), len), 0.0) + return BufferedSOC(copy(func), buffer, saf, len) +end + include("nonlinear/nonlinear.jl") diff --git a/src/eago_optimizer/functions/nonlinear/apriori_relax/affine_arithmetic.jl b/src/eago_optimizer/functions/nonlinear/apriori_relax/affine_arithmetic.jl new file mode 100644 index 00000000..93882618 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/apriori_relax/affine_arithmetic.jl @@ -0,0 +1,808 @@ + +const USE_MIN_RANGE = true + +struct AffineEAGO{N} + c::Float64 # mid-point + γ::SVector{N,Float64} # affine terms + Δ::Float64 # error term +end + +function AffineEAGO(x::AffineEAGO{N}, p::Float64, q::Float64, δ::Float64) where N + c = p * x.c + q + γ = p .* x.γ + Δ = p * x.Δ + δ + AffineEAGO{N}(c, γ, δ) +end +mid(x::Interval{Float64}) = 0.5*(x.lo + x.hi) +AffineEAGO{N}(x::Float64, X::Interval{Float64}, i::Int) where N = AffineEAGO{N}(mid(X), radius(X)*seed_gradient(i, Val(N)), 0.0) + +const UNIT_INTERVAL = Interval{Float64}(-1,1) +Interval(x::AffineEAGO{N}) where N = x.c + x.Δ*UNIT_INTERVAL + sum(y -> abs(y)*UNIT_INTERVAL, x.γ) +function bounds(x::AffineEAGO{N}) where N + z = Interval(x) + z.lo, z.hi +end + +zero(::Type{AffineEAGO{N}}) where N = AffineEAGO{N}(0.0, zero(SVector{N,Float64}), 0.0) +zero(x::AffineEAGO{N}) where N = AffineEAGO{N}(0.0, zero(SVector{N,Float64}), 0.0) + +one(::Type{AffineEAGO{N}}) where N = AffineEAGO{N}(1.0, zero(SVector{N,Float64}), 0.0) +one(x::AffineEAGO{N}) where N = AffineEAGO{N}(1.0, zero(SVector{N,Float64}), 0.0) + ++(x::AffineEAGO{N}, y::AffineEAGO{N}) where N = AffineEAGO{N}(x.c + y.c, x.γ .+ y.γ, x.Δ + y.Δ) ++(x::AffineEAGO{N}, α::Float64) where N = AffineEAGO{N}(α+x.c, x.γ, x.Δ) ++(α::Float64, x::AffineEAGO{N}) where N = x + α + +function *(x::AffineEAGO{N}, y::AffineEAGO{N}) where N + x0 = x.c + y0 = y.c + γ = SVector{N,Float64}(ntuple(i -> x0*y.γ[i] + y0*x.γ[i], Val(N))) + Δ = abs(x0)*y.Δ + abs(y0)*x.Δ + sx = abs(x.Δ) + sy = abs(y.Δ) + for i = 1:N + sx += abs(x.γ[i]) + sy += abs(y.γ[i]) + end + Δ += sx + sy + return AffineEAGO{N}(x0*y0, γ, Δ) +end +*(x::AffineEAGO{N}, α::Float64) where N = AffineEAGO{N}(α*x.c, α.*x.γ, abs(α)*x.Δ) +*(α::Float64, x::AffineEAGO{N}) where N = x*α + +function -(x::AffineEAGO{N}, y::AffineEAGO{N}) where N + AffineEAGO{N}(x.c - y.c, x.γ .- y.γ, x.Δ + y.Δ) +end +-(x::AffineEAGO{N}) where N = AffineEAGO{N}(-x.c, .-(x.γ), x.Δ) +-(x::AffineEAGO{N}, α::Float64) where N = AffineEAGO{N}(x.c - α, x.γ, x.Δ) +-(α::Float64, x::AffineEAGO{N}) where N = α + (-x) + +/(x::AffineEAGO{N}, α::Float64) where N = AffineEAGO{N}(x.c/α, x.γ/α, x.Δ/abs(α)) +/(α::Float64, x::AffineEAGO{N}) where N = α*inv(x) + + +function exp(x::AffineEAGO{N}) where N + a, b = bounds(x) + fa = exp(a) + fb = exp(b) + if USE_MIN_RANGE + p = exp(a) + q = 0.5*(fa + fb - p*(a + b)) + Δ = abs(0.5*(fb - fa - p*(b - a))) + return AffineEAGO(x, p, q, Δ) + end + p = (fb - fa)/(b - a) + ξ = log(p) + fξ = p + q = 0.5*(fa + fξ - p*(a + ξ)) + Δ = abs(0.5*(fξ - fa - p*(ξ - a))) + return AffineEAGO(x, p, q, Δ) +end + +function exp10(x::AffineEAGO{N}) where N + a, b = bounds(x) + fa = exp10(a) + fb = exp10(b) + if USE_MIN_RANGE + p = log10(a) + q = 0.5*(fa + fb - p*(a + b)) + Δ = abs(0.5*(fb - fa - p*(b - a))) + return AffineEAGO(x, p, q, Δ) + end + p = (fb - fa)/(b - a) + ξ = log10(p) + fξ = p + q = 0.5*(fa + fξ - p*(a + ξ)) + Δ = abs(0.5*(fξ - fa - p*(ξ - a))) + return AffineEAGO(x, p, q, Δ) +end + +function log(x::AffineEAGO{N}) where N + a, b = bounds(x) + fa = log(a) + fb = log(b) + if USE_MIN_RANGE + p = 1/b + q = 0.5*(fa + fb - p*(a + b)) + Δ = abs(0.5*(fb - fa - p*(b - a))) + return AffineEAGO(x, p, q, Δ) + end + p = (fb - fa)/(b - a) + ξ = 1/p + fξ = log(ξ) + q = 0.5*(fa + fξ - p*(a + ξ)) + Δ = abs(0.5*(fξ - fa - p*(ξ - a))) + return AffineEAGO(x, p, q, Δ) +end + +function log10(x::AffineEAGO{N}) where N + a, b = bounds(x) + fa = log10(a) + fb = log10(b) + if USE_MIN_RANGE + p = 1/(b*log(10)) + q = 0.5*(fa + fb - p*(a + b)) + Δ = abs(0.5*(fb - fa - p*(b - a))) + return AffineEAGO(x, p, q, Δ) + end + p = (fb - fa)/(b - a) + ξ = 1/p + fξ = log10(ξ) + q = 0.5*(fa + fξ - p*(a + ξ)) + Δ = abs(0.5*(fξ - fa - p*(ξ - a))) + return AffineEAGO(x, p, q, Δ) +end + +function pow_1d(x::AffineEAGO{N}, n::Number, p) where N + a, b = bounds(x) + fa = a^n + fb = b^n + if USE_MIN_RANGE + q = 0.5*(fa + fb - p*(a + b)) + Δ = abs(0.5*(fb - fa - p*(b - a))) + return AffineEAGO(x, p, q, Δ) + end + p = (fb - fa)/(b - a) + ξ = (p/n)^(1/(n - 1)) + fξ = ξ^n + q = 0.5*(fa + fξ - p*(a + ξ)) + Δ = abs(0.5*(fξ - fa - p*(ξ - a))) + return AffineEAGO(x, p, q, Δ) +end + +function pow_even(x::AffineEAGO{N}, n::Int) where N + a, b = bounds(x) + fa = a^n + fb = b^n + if USE_MIN_RANGE + m = min(0.0, fa, fb) + M = max(0.0, fa, fb) + p = 0.0 + q = 0.5*(m + M) + Δ = 0.5*(M - m) + return AffineEAGO(x, p, q, Δ) + end + p = (fb - fa)/(b - a) + ξ = (p/n)^(1/(n - 1)) + fξ = ξ^n + q = 0.5*(fa + fξ - p*(a + ξ)) + Δ = abs(0.5*(fξ - fa - p*(ξ - a))) + return AffineEAGO(x, p, q, Δ) +end + +function pow_odd(x::AffineEAGO{N}, n::Int) where N + #println("ran power odd") + # TODO: DOES THIS HANDLE a <= 0.0 <= b? + a, b = bounds(x) + fa = a^n + fb = b^n + p = (fb - fa)/(b - a) + q = 0.0 + ξ = (p/n)^(1/(n-1)) + fξ = ξ^n + Δ = abs(fξ - p*ξ) + return AffineEAGO(x, p, q, Δ) + #y = Base.power_by_squaring(x,n) + # return y +end + +function ^(x::AffineEAGO{N}, n::Int) where N + + iszero(n) && zero(x) + isone(n) && one(x) + + xL, xU = bounds(x) + if (xL > 0.0) || (xU < 0.0) + return pow_1d(x, n, (n >= 0) ? n*xL^(n-1) : n*xU^(n-1)) + elseif iseven(n) + return pow_even(x, n) + end + return pow_odd(x, n) +end + +function ^(x::AffineEAGO{N}, n::Float64) where N + if isinteger(n) + return x^Int(n) + end + xL, xU = bounds(x) + (xL < 0.0) && error("Invalid domain...") + if (n > 1.0) || (n < 0.0) + return pow_1d(x, n, n*xU^(n-1)) + end + return pow_1d(x, n, n*xL^(n-1)) +end + + +^(x::AffineEAGO{N}, y::AffineEAGO{N}) where N = exp(y*log(x)) +function ^(x::AffineEAGO{N}, n) where N + if iszero(n) + return zero(x) + elseif isone(n) + return one(x) + end + return x^n +end + +function inv(x::AffineEAGO{N}) where N + a, b = bounds(x) + (a < 0.0 < b) && error("Invalid domain...") + if b < 0.0 + return -inv(-x) + end + if USE_MIN_RANGE + p = -1/b^2 + q = -(p*(a + b)^2)/(2*a) + Δ = -(p*(a - b)^2)/(2*a) + return AffineEAGO(x, p, q, Δ) + end + p = -1/(a*b) + q = -0.5*p*(sqrt(a) + sqrt(b))^2 + Δ = -0.5*p*(sqrt(a) - sqrt(b))^2 + return AffineEAGO(x, p, q, Δ) +end + +struct MCAffPnt{N,T} + v::MC{N,T} + box::AffineEAGO{N} +end +MC(x::MCAffPnt{N,T}) where {N,T<:RelaxTag} = x.v +MC(x::MC{N,T}) where {N, T<:RelaxTag} = x + +relax_info(s::RelaxAA, n::Int, t::T) where {N,T} = MCAffPnt{n,T} + +zero(::Type{MCAffPnt{N,T}}) where {N,T} = MCAffPnt{N,T}(zero(MC{N,T}), zero(AffineEAGO{N})) +zero(x::MCAffPnt{N,T}) where {N,T} = MCAffPnt{N,T}(zero(x.v), zero(x.box)) + +one(::Type{MCAffPnt{N,T}}) where {N,T} = MCAffPnt{N,T}(one(MC{N,T}), one(AffineEAGO{N})) +one(x::MCAffPnt{N,T}) where {N,T} = MCAffPnt{N,T}(one(MC{N,T}), one(AffineEAGO{N})) + ++(x::MCAffPnt{N,T}, y::MCAffPnt{N,T}) where {N,T} = MCAffPnt{N,T}(x.v + y.v, x.box + y.box) ++(x::MCAffPnt{N,T}, α::Number) where {N,T} = MCAffPnt{N,T}(x.v + α, x.box + α) ++(α::Number, x::MCAffPnt{N,T}) where {N,T} = x + α + +*(x::MCAffPnt{N,T}, y::MCAffPnt{N,T}) where {N,T} = MCAffPnt{N,T}(x.v*y.v, x.box*y.box) +*(x::MCAffPnt{N,T}, α::Number) where {N,T} = MCAffPnt{N,T}(x.v * α, x.box * α) +*(α::Number, x::MCAffPnt{N,T}) where {N,T} = x*α + +-(x::MCAffPnt{N,T}, y::MCAffPnt{N,T}) where {N,T} = MCAffPnt{N,T}(x.v-y.v, x.box-y.box) +-(x::MCAffPnt{N,T}) where {N,T} = MCAffPnt{N,T}(-x.v, -x.box) +-(x::MCAffPnt{N,T}, α::Number) where {N,T} = x + (-α) +-(α::Number, x::MCAffPnt{N,T}) where {N,T} = α + (-x) + +/(x::MCAffPnt{N,T}, y::MCAffPnt{N,T}) where {N,T} = x*inv(y) +/(x::MCAffPnt{N,T}, α::T) where {N,T} = MCAffPnt{N,T}(x.v/α, x.box/α) +/(α::T, x::MCAffPnt{N,T}) where {N,T} = MCAffPnt{N,T}(α*inv(x.v), α*inv(x.box)) + +^(x::MCAffPnt{N,T}, n::Integer) where {N,T} = MCAffPnt{N,T}(x.v^n, x.box^n) +^(x::MCAffPnt{N,T}, n::Number) where {N,T} = MCAffPnt{N,T}(x.v^n, x.box^n) +^(x::MCAffPnt{N,T}, n::MCAffPnt{N,T}) where {N,T} = MCAffPnt{N,T}(x.v^n.v, x.box^n.box) + +for op in (:inv, :log, :log10, :exp, :exp10) + @eval ($op)(x::MCAffPnt{N,T}) where {N,T} = MCAffPnt{N,T}(($op)(x.v), ($op)(x.box)) +end + +Interval(x::MCAffPnt{N,T}) where {N,T<:RelaxTag} = Interval(x.v) ∩ Interval(x.box) + +function cut(x::MCAffPnt{N,T}, z::MCAffPnt{N,T}, v::VariableValues, ϵ::Float64, s::Vector{Int}, cflag::Bool, pflag::Bool) where {N,T<:RelaxTag} + (pflag & cflag) && (return set_value_post(x ∩ Interval(z), v, s, ϵ)) + (pflag & !cflag) && (return set_value_post(x, v, s, ϵ)) + (pflag & cflag) && (return x ∩ Interval(z)) + return x +end + +function cut(x::MC{N,T}, z::MCAffPnt{N,T}, v::VariableValues, ϵ::Float64, s::Vector{Int}, cflag::Bool, pflag::Bool) where {N,T<:RelaxTag} + (pflag & cflag) && (return set_value_post(x ∩ Interval(z), v, s, ϵ)) + (pflag & !cflag) && (return set_value_post(x, v, s, ϵ)) + (pflag & cflag) && (return x ∩ Interval(z)) + return x +end + +function varset(::Type{MCAffPnt{N,T}}, i, x_cv, x_cc, l, u) where {V,N,T<:RelaxTag} + v = seed_gradient(i, Val(N)) + v_Intv = Interval{Float64}(l, u) + v_mc = MC{N,T}(x_cv, x_cc, v_Intv, v, v, false) + v_aff = AffineEAGO{N}(x_cv, v_Intv, i) + return MCAffPnt{N,T}(v_mc, v_aff) +end + +function fprop!(t::RelaxAAInfo, vt::Variable, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k) where {N,T<:RelaxTag} + i = first_index(g, k) + x = val(b, i) + l = lbd(b, i) + u = ubd(b, i) + if l == u + b[k] = x + b._is_num[k] = true + else + z = varset(MCAffPnt{N,T}, rev_sparsity(g, i, k), x, x, l, u) + if !first_eval(t, b) + z = z ∩ interval(b, k) + end + b._info[k] = z + b._is_num[k] = false + end + nothing +end + +function fprop!(t::RelaxAAInfo, ex::Subexpression, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k) where {N,T<:RelaxTag} + x = first_index(g, k) + if subexpression_is_num(b, x) + b[k] = subexpression_num(b, x) + b._is_num[k] = true + else + b._info[k] = subexpression_info(b, x) + b._is_num[k] = false + end +end + +for (F, f) in ((PLUS, :+), (MIN, :min), (MAX, :max), (DIV, :/), (ARH, :arh)) + @eval function fprop_2!(t::RelaxAAInfo, v::Val{$F}, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k) where {N,T<:RelaxTag} + x = child(g, 1, k) + y = child(g, 2, k) + if !xy_num(b, x, y) + if xyset(b, x, y) + z = ($f)(info(b, x), info(b, y)) + elseif xset_ynum(b, x, y) + z = ($f)(info(b, x), num(b, y)) + else + z = ($f)(num(b, x), info(b, y)) + end + b._info[k] = cut(z, info(b,k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + b._is_num[k] = false + else + b[k] = ($f)(num(b, x), num(b, y)) + b._is_num[k] = true + end + end +end + +function fprop!(t::RelaxAAInfo, v::Val{MINUS}, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k) where {N,T<:RelaxTag} + x = child(g, 1, k) + if is_binary(g, k) + y = child(g, 2, k) + if !xy_num(b, x, y) + if xyset(b, x, y) + z = info(b, x) - info(b, y) + elseif xset_ynum(b, x, y) + z = info(b, x) - num(b, y) + else + z = num(b, x) - info(b, y) + end + b._info[k] = cut(z, info(b,k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + b._is_num[k] = false + else + b[k] = num(b, x) - num(b, y) + b._is_num[k] = true + end + else + if is_num(b, x) + b[k] = -num(b, x) + b._is_num[k] = true + else + b._info[k] = cut(-info(b, x), info(b, k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + b._is_num[k] = false + end + end +end + +function fprop_n!(t::RelaxAAInfo, v::Val{PLUS}, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k::Int) where {N,T<:RelaxTag} + z = zero(MCAffPnt{N,T}) + znum = 0.0 + numval = true + for i in children(g, k) + if is_num(b, i) + znum += num(b, i) + else + numval = false + z += info(b, i) + end + end + if numval + b[k] = znum + b._is_num[k] = true + else + z += znum + b._info[k] = cut(z, info(b, k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + b._is_num[k] = false + end +end + +function fprop_n!(t::RelaxAAInfo, v::Val{MIN}, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k::Int) where {N,T<:RelaxTag} + z = Inf*one(MCAffPnt{N,T}) + znum = Inf + numval = true + for i in children(g, k) + if is_num(b, i) + znum = min(znum, num(b, i)) + else + numval = false + z = min(z, info(b, i)) + end + end + if numval + b[k] = znum + b._is_num[k] = true + else + z = min(z, znum) + b._info[k] = cut(z, info(b, k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + b._is_num[k] = false + end +end + +function fprop_n!(t::RelaxAAInfo, v::Val{MAX}, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k::Int) where {N,T<:RelaxTag} + z = -Inf*one(MCAffPnt{N,T}) + znum = -Inf + numval = true + for i in children(g, k) + if is_num(b, i) + znum = max(znum, num(b, i)) + else + numval = false + z = max(z, info(b, i)) + end + end + if numval + b[k] = znum + b._is_num[k] = true + else + z = max(z, znum) + b._info[k] = cut(z, info(b, k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + b._is_num[k] = false + end +end + +function fprop_2!(t::RelaxAAInfo, v::Val{MULT}, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k::Int) where {N,T<:RelaxTag} + + x = child(g, 1, k) + y = child(g, 2, k) + + if !xy_num(b, x, y) + if xyset(b, x, y) + xr = info(b, x) + yr = info(b, y) + xv = xr.v + yv = yr.v + if b.use_apriori_mul + dp = b.dp + dP = b.dP + p_rel = b.p_rel + p_diam = b.p_diam + s = sparsity(g, 1) + u1max, u2max, v1nmax, v2nmax = estimator_extrema(xr, yr, s, dP) + zv = xv*yv + wIntv = zv.Intv + if (u1max < xv.Intv.hi) || (u2max < yv.Intv.hi) + u1cv, u2cv, u1cvg, u2cvg = estimator_under(xv, yv, xr, yr, s, dp, dP, p_rel, p_diam) + za_l = McCormick.mult_apriori_kernel(xv, yv, wIntv, u1cv, u2cv, u1max, u2max, u1cvg, u2cvg) + zv = zv ∩ za_l + end + if (v1nmax > -xv.Intv.lo) || (v2nmax > -yv.Intv.lo) + v1ccn, v2ccn, v1ccgn, v2ccgn = estimator_over(xv, yv, xr, yr, s, dp, dP, p_rel, p_diam) + za_u = McCormick.mult_apriori_kernel(-xv, -yv, wIntv, v1ccn, v2ccn, v1nmax, v2nmax, v1ccgn, v2ccgn) + zv = zv ∩ za_u + end + z = MCAffPnt{N,T}(zv, xr.box*yr.box) + else + z = xv*yv + end + elseif xset_ynum(b, x, y) + z = info(b, x)*num(b, y) + else + z = num(b, x)*info(b, y) + end + b._info[k] = cut(z, info(b,k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + b._is_num[k] = true + else + b[k] = num(b, x)*num(b, y) + b._is_num[k] = false + end +end + +function fprop_n!(t::RelaxAAInfo, ::Val{MULT}, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k::Int) where {N,T<:RelaxTag} + + z = one(MCAffPnt{N,T}) + znum = one(Float64) + numval = true + if b.use_apriori_mul + zr = one(MCAffPnt{N,T}) + dp = b.dp + dP = b.dP + s = sparsity(g, 1) + for (q,i) in enumerate(children(g, k)) + if is_num(b, i) + znum = znum*num(b, i) + else + numval = false + xi = info(b, i) + x = xi.v + xr = info(b, i) + u1max, u2max, v1nmax, v2nmax = estimator_extrema(zr, xr, s, dP) + zv = z*x + wIntv = zv.Intv + if (u1max < z.Intv.hi) || (u2max < x.Intv.hi) + u1cv, u2cv, u1cvg, u2cvg = estimator_under(zr, xr, s, dp, dP) + za_l = McCormick.mult_apriori_kernel(z, x, wIntv, u1cv, u2cv, u1max, u2max, u1cvg, u2cvg) + zv = zv ∩ za_l + end + if (v1nmax > -z.Intv.lo) || (v2nmax > -x.Intv.lo) + v1ccn, v2ccn, v1ccgn, v2ccgn = estimator_under(zr, xr, s, dp, dP) + za_u = McCormick.mult_apriori_kernel(-z, -x, wIntv, v1ccn, v2ccn, v1nmax, v2nmax, v1ccgn, v2ccgn) + zv = zv ∩ za_u + end + zr = zr*xr + zv = cut(zv, zv, b.ic.v, b.ϵ_sg, sparsity(g, i), b.cut, false) + z = zv + end + end + else + for (q,i) in enumerate(children(g, k)) + if is_num(b, i) + znum = znum*num(b, i) + else + numval = false + x = info(b, i) + z = z*x + z = cut(z, z, b.ic.v, b.ϵ_sg, sparsity(g, i), b.cut, false) + end + end + end + if numval + b[k] = znum + b._is_num[k] = true + else + z = z*znum + b._info[k] = z + b._is_num[k] = false + end +end + +for F in (PLUS, MULT, MIN, MAX) + @eval function fprop!(t::RelaxAAInfo, v::Val{$F}, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k) where {N,T<:RelaxTag} + is_binary(g, k) ? fprop_2!(RelaxAAInfo(), Val($F), g, b, k) : fprop_n!(RelaxAAInfo(), Val($F), g, b, k) + end +end +function fprop!(t::RelaxAAInfo, v::Val{DIV}, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k) where {N,T<:RelaxTag} + fprop_2!(RelaxAAInfo(), v, g, b, k) +end + +function fprop!(t::RelaxAAInfo, v::Val{POW}, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k::Int) where {N,T<:RelaxTag} + x = child(g, 1, k) + y = child(g, 2, k) + if is_num(b, y) && isone(num(b, y)) + b._info[k] = info(b, x) + elseif is_num(b,y) && iszero(num(b, y)) + b._info[k] = one(MCAffPnt{N,T}) + elseif !xy_num(b, x, y) + if xyset(b, x, y) + z = info(b, x)^info(b, y) + elseif xset_ynum(b, x, y) + z = info(b, x)^num(b, y) + else + z = num(b, x)^info(b, y) + end + b._info[k] = cut(z, info(b,k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + b._is_num[k] = false + else + b[k] = num(b, x)^num(b, y) + b._is_num[k] = true + end +end + +function fprop!(t::RelaxAAInfo, v::Val{USER}, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k::Int) where {N,T<:RelaxTag} + f = user_univariate_operator(g, first_index(g, k)) + x = child(g, 1, k) + if is_num(b, x) + b[k] = f(num(b, x)) + b._is_num[k] = true + else + z = f(info(b, x)) + b._info[k] = cut(z, info(b, k), b.ic.v, zero(Float64), sparsity(g, k), b.cut, b.post) + b._is_num[k] = false + end +end + +function fprop!(t::RelaxAAInfo, v::Val{USERN}, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k::Int) where {N,T<:RelaxTag} + mv = user_multivariate_operator(g, first_index(g, k)) + n = arity(g, k) + set_input = _info_input(b, n) + num_input = _num_input(b, n) + anysets = false + i = 1 + for c in children(g, k) + if is_num(b, c) + x = num(b, c) + if !isinf(x) + set_input[i] = MCAffPnt{N,T}(x) + num_input[i] = x + end + else + set_input[i] = info(b, c) + anysets = true + end + i += 1 + end + if anysets + z = MOI.eval_objective(mv, set_input)::MCAffPnt{N,T} + b._info[k] = cut(z, info(b, k), b.ic.v, zero(Float64), sparsity(g,k), b.cut, b.post) + b._is_num[k] = false + else + b[k] = MOI.eval_objective(mv, num_input) + b._is_num[k] = true + end +end + +for ft in UNIVARIATE_ATOM_TYPES + f = UNIVARIATE_ATOM_DICT[ft] + (f == :user || f == :+ || f == :-) && continue + @eval function fprop!(t::RelaxAAInfo, v::Val{$ft}, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k) where {N,T<:RelaxTag} + x = child(g, 1, k) + if is_num(b, x) + b._is_num[k] = true + return b[k] = ($f)(num(b, x)) + else + b._is_num[k] = false + z = ($f)(info(b, x)) + b._info[k] = cut(z, info(b, k), b.ic.v, zero(Float64), sparsity(g,k), b.cut, b.post) + end + end +end + +for (F, f) in ((LOWER_BND, :lower_bnd), (UPPER_BND, :upper_bnd)) + @eval function fprop!(t::RelaxAAInfo, v::Val{$F}, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k) where {N,T<:RelaxTag} + x = child(g, 1, k) + y = child(g, 2, k) + if is_num(b, y) + z = info(b, x) + z = ($f)(z, num(b, y)) + b._info[k] = cut(z, info(b, k), b.ic.v, zero(Float64), sparsity(g, k), b.cut, b.post) + end + b._is_num[k] = b._is_num[x] + end +end + +function fprop!(t::RelaxAAInfo, v::Val{BND}, g::DAT, b::RelaxCache{MCAffPnt{N,T},N,T}, k) where {N,T<:RelaxTag} + x = child(g, 1, k) + z = info(b, x) + y = child(g, 2, k) + r = child(g, 3, k) + if is_num(b, y) && is_num(b, r) + z = bnd(z, num(b, y),num(b, r)) + end + b._is_num[k] = b._is_num[x] + b._info[k] = cut(z, info(b, k), b.ic.v, zero(Float64), sparsity(g,k), b.cut, b.post) +end + +function f_init!(t::RelaxAA, g::DAT, b::RelaxCache) + tinfo = RelaxAAInfo() + for k = node_count(g):-1:1 + c = node_class(g, k) + (c == EXPRESSION) && fprop!(tinfo, Expression(), g, b, k) + (c == VARIABLE) && fprop!(tinfo, Variable(), g, b, k) + (c == SUBEXPRESSION) && fprop!(tinfo, Subexpression(), g, b, k) + k_is_num = b._is_num[k] + b[k] = b._info[k].v + b._is_num[k] = k_is_num + end + fprop!(Relax(), g, b) + nothing +end + +for d in ALL_ATOM_TYPES + @eval function fprop!(t::RelaxAA, v::Val{$d}, g::DAT, b::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} + fprop!(Relax(), v, g, b, k) + end +end + +function estimator_extrema(x::MCAffPnt{N,T}, y::MCAffPnt{N,T}, s, dP) where {N,T} + + xIntv = x.box.c + sum(z -> z*UNIT_INTERVAL, x.box.γ) + xcvU = xIntv.hi - x.box.Δ + xccL = xIntv.lo + x.box.Δ + + yIntv = y.box.c + sum(z -> z*UNIT_INTERVAL, y.box.γ) + ycvU = yIntv.hi - y.box.Δ + yccL = yIntv.lo + y.box.Δ + + return xcvU, ycvU, -xccL, -yccL +end + +function estimator_under(xv, yv, x::MCAffPnt{N,T}, y::MCAffPnt{N,T}, s, dP, dp, p_rel, p_diam) where {N,T} + x_cv = x.box.c - x.box.Δ + y_cv = y.box.c - y.box.Δ + for (i,p_i) ∈ enumerate(s) + rp = 2.0*p_rel[p_i] + x_cv += x.box.γ[i]*rp + y_cv += y.box.γ[i]*rp + end + x_cv_grad = SVector{N,Float64}(ntuple(i -> 2.0*x.box.γ[i].*p_diam[s[i]], Val(N))) + y_cv_grad = SVector{N,Float64}(ntuple(i -> 2.0*y.box.γ[i].*p_diam[s[i]], Val(N))) + x_cv, y_cv, x_cv_grad, y_cv_grad +end + +function estimator_over(xv, yv, x::MCAffPnt{N,T}, y::MCAffPnt{N,T}, s, dp, dP, p_rel, p_diam) where {N,T} + x_cc = x.box.c + x.box.Δ + y_cc = y.box.c + y.box.Δ + for (i,p_i) ∈ enumerate(s) + rp = 2.0*p_rel[p_i] + x_cc += x.box.γ[i]*rp + y_cc += x.box.γ[i]*rp + end + x_ccn_grad = SVector{N,Float64}(ntuple(i -> -2.0*x.box.γ[i].*p_diam[s[i]], Val(N))) + y_ccn_grad = SVector{N,Float64}(ntuple(i -> -2.0*y.box.γ[i].*p_diam[s[i]], Val(N))) + -x_cc, -y_cc, x_ccn_grad, y_ccn_grad +end + + +#= +function extract_apriori_info(t::Union{RelaxAA,RelaxAAInfo}, v::VariableValues{Float64}, x::AffineEAGO{N}) where {N,T} + z = 0.0 + for (k,i) in enumerate(t.v) + l = _bd(v, i) + u = ubd(v, i) + z += x.γ[k]*(_val(v, i) - 0.5*(l + u))/(u - l) + end + Z = x.c + sum(z -> z*UNIT_INTERVAL, x.γ) + xcv = x.c + z - x.Δ + xcc = x.c + z + x.Δ + xcvU = Z.hi - x.Δ + xccL = Z.lo + x.Δ + xcvg = x.γ + xccg = x.γ + return xcv, xcvU, xcc, xccL, xcvg, xccg +end + +function _cut_info(v::VariableValues{Float64}, z::MC{N,T}, x::MCAffPnt{N,T}) where {N,T} + xcv, xcvU, xcc, xccL, xcvg, xccg = extract_apriori_info(RelaxAA(), v, x.box) + zaff = MC{N,T}(xcv, xcc, Interval(x.box), xcvg, xccg, false) + return zaff ∩ z +end +function _cut_info(v::VariableValues{Float64}, z::MCAffPnt{N,T}, x::MCAffPnt{N,T}) where {N,T} + xcv, xcvU, xcc, xccL, xcvg, xccg = extract_apriori_info(RelaxAA(), v, x.box) + zaff = MC{N,T}(xcv, xcc, Interval(x.box), xcvg, xccg, false) + return zaff ∩ z.v +end + +function _cut(t::RelaxAAInfo, b, k, x::MCAffPnt{N,T}, z::MCAffPnt{N,T}, v::VariableValues, ϵ::Float64, s::Vector{Int}, c::Bool, p::Bool) where {N,T<:RelaxTag} + xt = p ? MCAffPnt{N,T}(set_value_post(x.v, v, s, ϵ), x.box) : x + xtmc = _cut_info(v, xt, xt) + xtaffp = MCAffPnt{N,T}(xtmc, xt.box) + b._info[k] = xtaffp + return +end + +function _cut(t::RelaxAA, b, k, x::MC{N,T}, z::MCAffPnt{N,T}, v::VariableValues, ϵ::Float64, s::Vector{Int}, c::Bool, p::Bool) where {N,T<:RelaxTag} + xMC = set_value_post(c ? x ∩ x.Intv ∩ Interval(z.box) : x, v, s, ϵ) + xt = p ? xMC : x + zt = _cut_info(v, xt, info(b, k)) + _store_set!(b, zt, k) + return +end + +function fprop!(t::RelaxAAInfo, vt::Variable, g::DAT, b::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} + i = first_index(g, k) + x = val(b, i) + l = lbd(b, i) + u = ubd(b, i) + z = varset(MC{N,T}, rev_sparsity(g, i, k), x, x, l, u) + zaff = AffineEAGO{N}(x, Interval(l,u), i) + zinfo = MCAffPnt{N,T}(z, zaff) + b._info[k] = zinfo + return +end +function fprop!(t::RelaxAA, vt::Variable, g::DAT, b::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} + i = first_index(g, k) + x = val(b, i) + l = lbd(b, i) + u = ubd(b, i) + z = varset(MC{N,T}, rev_sparsity(g, i, k), x, x, l, u) + z = z ∩ _interval(b, k) + _store_set!(b, z, k) +end +=# + + + + + + + diff --git a/src/eago_optimizer/functions/nonlinear/apriori_relax/apriori_relax.jl b/src/eago_optimizer/functions/nonlinear/apriori_relax/apriori_relax.jl new file mode 100644 index 00000000..04ab9567 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/apriori_relax/apriori_relax.jl @@ -0,0 +1,32 @@ + +function estimator_extrema(x::MC{N,T}, y::MC{N,T}, s, dP) where {N,T} + xcv = x.cv; xcvg = x.cv_grad + ycv = y.cv; ycvg = y.cv_grad + xccn = -x.cc; xccgn = -x.cc_grad + yccn = -y.cc; yccgn = -y.cc_grad + t3 = affine_expand_del(dP, xcv, xcvg, s) + t4 = affine_expand_del(dP, ycv, ycvg, s) + s3 = affine_expand_del(dP, xccn, xccgn, s) + s4 = affine_expand_del(dP, yccn, yccgn, s) + return t3, t4, s3, s4 +end + +function estimator_under(xv, yv, x::MC{N,T}, y::MC{N,T}, s, dp, dP, p_rel, p_diam) where {N,T} + xcv = x.cv; xcvg = x.cv_grad + ycv = y.cv; ycvg = y.cv_grad + u1cv = affine_expand_del(dp, xcv, xcvg, s) + u2cv = affine_expand_del(dp, ycv, ycvg, s) + return u1cv, u2cv, xcvg, ycvg +end + +function estimator_over(xv, yv, x::MC{N,T}, y::MC{N,T}, s, dp, dP, p_rel, p_diam) where {N,T} + xccn = -x.cc; xccgn = -x.cc_grad + yccn = -y.cc; yccgn = -y.cc_grad + v1ccn = affine_expand_del(dp, xccn, xccgn, s) + v2ccn = affine_expand_del(dp, yccn, yccgn, s) + return v1ccn, v2ccn, xccgn, yccgn +end + + +include(joinpath(@__DIR__, "enumeration.jl")) +include(joinpath(@__DIR__, "affine_arithmetic.jl")) \ No newline at end of file diff --git a/src/eago_optimizer/functions/nonlinear/apriori_relax/enumeration.jl b/src/eago_optimizer/functions/nonlinear/apriori_relax/enumeration.jl new file mode 100644 index 00000000..0ba5cfc3 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/apriori_relax/enumeration.jl @@ -0,0 +1,64 @@ +const ENUM_OUTER_RND = 1E-9 +function f_init!(::RelaxMulEnum, g::DAT, b::RelaxCache{V,N,T}) where {V,N,T} + b.use_apriori_mul = false + fprop!(Relax(), g, b) + xp = copy(val(b)) + vlbd = lbd(b) + vubd = ubd(b) + xl = copy(vlbd) + xu = copy(vubd) + vlbd .-= ENUM_OUTER_RND + vubd .+= ENUM_OUTER_RND + for k = 1:node_count(g) + if !is_num(b, k) + b._info[k].v = set(b, k) + end + end + s = sparsity(g, 1) + for i = 0:2^N-1 + s = last(bitstring(i), N) + for (k,j) in enumerate(sparsity(g, 1)) + b.ic.v.x[j] = s[k] == '1' ? xl[j] : xu[j] + end + fprop!(Relax(), g, b) + for k = node_count(g):-1:1 + if !is_num(b, k) + b._info[k][i+1] = set(b, k) + end + end + end + b.ic.v.x .= xp + vlbd .= xl + vubd .= xu + b.use_apriori_mul = true + fprop!(Relax(), g, b) + return +end + +_cut_info(t::RelaxMulEnumInner, v, z, x) = z +_cut_info(t::RelaxMulEnum, v, z, x) = z + +relax_info(s::RelaxMulEnumInner, n::Int, t::T) where T = MCBoxPnt{2^n,n,T} +relax_info(s::RelaxMulEnum, n::Int, t::T) where T = MCBoxPnt{2^n,n,T} + +function estimator_extrema(x::MCBoxPnt{Q,N,T}, y::MCBoxPnt{Q,N,T}, s, dP) where {Q,N,T} + xmax = maximum(cv, x.box) + ymax = maximum(cv, y.box) + xmin = minimum(cc, x.box) + ymin = minimum(cc, y.box) + return xmax, ymax, -xmin, -ymin +end + +function estimator_under(xv, yv, x::MCBoxPnt{Q,N,T}, y::MCBoxPnt{Q,N,T}, s, dp, dP, p_rel, p_diam) where {Q,N,T} + xv.cv, yv.cv, xv.cv_grad, yv.cv_grad +end + +function estimator_over(xv, yv, x::MCBoxPnt{Q,N,T}, y::MCBoxPnt{Q,N,T}, s, dp, dP, p_rel, p_diam) where {Q,N,T} + -xv.cc, -yv.cc, -xv.cc_grad, -yv.cc_grad +end + +for d in ALL_ATOM_TYPES + @eval function fprop!(t::RelaxMulEnum, v::Val{$d}, g::DAT, b::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} + fprop!(Relax(), v, g, b, k) + end +end \ No newline at end of file diff --git a/src/eago_optimizer/functions/nonlinear/auxiliary_variables.jl b/src/eago_optimizer/functions/nonlinear/auxiliary_variables.jl new file mode 100644 index 00000000..62cbd6d4 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/auxiliary_variables.jl @@ -0,0 +1,179 @@ + +function _not_EAGO_error!(m::JuMP.Model) + if JuMP.solver_name(m) !== "EAGO: Easy Advanced Global Optimization" + error("Solve attached to model must be EAGO.Optimizer") + end +end + +#= +Reference for auxillary variables +=# +struct AuxiliaryVariableRef <: JuMP.AbstractVariableRef + idx::Int + model::JuMP.Model +end + +Base.@kwdef mutable struct _AuxVarData + aux::Dict{JuMP.VariableRef, AuxiliaryVariableRef} = Dict{JuMP.VariableRef, AuxiliaryVariableRef}() + #mimo_expr::Vector{MIMOExpr} = MIMOExpr[] + last_hook::Union{Nothing,Function} = nothing +end +function is_auxilliary_variable(m::_AuxVarData, i::Int) + false +end +is_auxilliary_variable(::Nothing, ::Int) = false + + +#= +function aux_variable_optimizehook(model::JuMP.Model) + initialize_auxillary_variables!(model) + model.optimize_hook = model.ext[:aux_var].last_hook + optimize!(model) + model.optimize_hook = aux_variable_optimizehook + return +end +function _initialize_auxillary_variable_data(model::Model) + model.ext[:aux_var] = _AuxVarData() + model.ext[:aux_var].last_hook = model.optimize_hook + model.optimize_hook = aux_variable_optimizehook + return model +end +function enable_auxillary_variables(model::Model) + haskey(model.ext, :aux_var) && error("Model has auxillary variables parameter enabled") + return _initialize_auxillary_variable_data(model) +end +EAGOModel(m::JuMP.Model) = enable_auxillary_variables(m) +EAGOModel() = EAGOModel(Model(EAGO.Optimizer)) + + +_getmodel(v::AuxiliaryVariableRef) = v.model +function _getauxdata(v::AuxiliaryVariableRef)::_AuxVarData + return _getauxdata(_getmodel(v))::_AuxVarData +end +function _getauxdata(model::Model)::_AuxVarData + auxvar = get(model.ext, :aux_var, nothing) + if auxvar !== nothing + return auxvar + end + return enable_auxillary_variables(model) +end + + +JuMP.index(p::AuxiliaryVariableRef) = p.idx +Base.iszero(::AuxiliaryVariableRef) = false +Base.copy(p::AuxiliaryVariableRef) = ParameterRef(p.idx, p.model) + +struct AuxiliaryVariableNotOwned <: Exception + aux::AuxiliaryVariableRef +end +JuMP.owner_model(p::AuxiliaryVariableRef) = p.model +function JuMP.check_belongs_to_model(p::AuxiliaryVariableRef, model::AbstractModel) + if owner_model(p) !== model + throw(AuxiliaryVariableNotOwned(p)) + end +end +function JuMP.is_valid(model::Model, aux::AuxiliaryVariableRef) + return model === owner_model(aux) +end + +function Base.hash(p::AuxiliaryVariableRef, h::UInt) + return hash(objectid(owner_model(p)), hash(p.ind, h)) +end +function Base.isequal(p1::AuxiliaryVariableRef, p2::AuxiliaryVariableRef) + return owner_model(p1) === owner_model(p2) && p1.ind == p2.ind +end + +JuMP.name(p::AuxiliaryVariableRef) = get(_getauxdata(p).names, p, "") +JuMP.set_name(p::AuxiliaryVariableRef, s::String) = _getauxdata(p).names[p] = s + + +struct AuxVar end + +_aux_msg(msg) = "Invalid initialization of auxillary variable. " * msg * " not supported." +function JuMP.build_variable(_error::Function, info::JuMP.VariableInfo, ::AuxVar) + info.has_lb && _error(_aux_msg("Lower bound")) + info.has_ub && _error(_aux_msg("Upper bound")) + info.binary && _error(_aux_msg("Binary")) + info.integer && _error(_aux_msg("Integer")) + info.has_start && _error(_aux_msg("Initial value")) + info.has_fix && _error(_aux_msg("Fixed value")) + return AuxVar() +end +function JuMP.add_variable(m::JuMP.Model, v::AuxVar, name::String="") + vref = _add_auxillary_variable(m) + if !isempty(name) + JuMP.set_name(vref, name) + end + return vref +end + +macro auxiliary_variable(m, args...) + esc(quote + @show ($args) + @show ($args...) + @show $(args...) + #@variable($m, ($args...), AuxVar()) + end + ) +end + +macro mimo_expression(m, name, f!, y, x) end + +macro mimo_expression(m, f!, y, x) + esc(quote + name = Symbol($f!) + @mimo_expression($m, name, $f!, $y, $x) + end + ) +end +=# + +#= +struct MIMOExpr + m::Model + f!::Function + y::Vector{Union{AuxiliaryVariableRef, JuMP.VariableRef}} + x::Vector{Union{AuxiliaryVariableRef, JuMP.VariableRef}} +end + + +function initialize_auxillary_variables!(m::JuMP.Model) + set_optimizer_attribute(m, "_auxillary_variable_info", m.ext[:aux_var]) + return +end + +function aux_variable_optimize!(m::JuMP.Model) + initialize_auxillary_variables!(m) + m.optimize_hook = m.ext[:aux_var].last_hook + optimize!(m) + m.optimize_hook = aux_variable_optimize! + return +end + +function JuMP.add_variable(m::JuMP.Model, v::AuxVar, name::String="") + x = JuMP.add_variable(m, JuMP.ScalarVariable(v.info), name) + m.ext[:aux_var].var_to_aux[x] = v + return x +end + +JuMP.build_variable(_err::Function, info::JuMP.VariableInfo, ::Type{AuxVar}; kwargs...) = AuxVar(info) +function JuMP.add_variable(m::JuMP.Model, v::AuxVar, name::String) + x = JuMP.add_variable(m, JuMP.ScalarVariable(v.info), name) + m.ext[:aux_var].var_to_aux[x] = v + return x +end + +" +m = EAGOModel() +@variable(m, -2 <= x[i=1:2] <= 2) +" +function add_auxillary_variable(m::JuMP.Model, x, l, u) +end + +""" +""" +function add_mimo_expression(m::JuMP.Model, name::String, f!::Function, y, x) + MIMOExpr + m = mimo_expr +end +=# \ No newline at end of file diff --git a/src/eago_optimizer/functions/nonlinear/composite_relax/composite_relax.jl b/src/eago_optimizer/functions/nonlinear/composite_relax/composite_relax.jl new file mode 100644 index 00000000..fb6ab4d9 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/composite_relax/composite_relax.jl @@ -0,0 +1,281 @@ + +abstract type RelaxCacheAttribute <: AbstractCacheAttribute end + +""" + Relax + +Used to dispatch relaxations to a standard +""" +struct Relax <: RelaxCacheAttribute end + +""" + RelaxAA +""" +struct RelaxAA <: RelaxCacheAttribute + v::Vector{Int} +end +RelaxAA() = RelaxAA(Int[]) + +struct RelaxAAInfo <: RelaxCacheAttribute + v::Vector{Int} +end +RelaxAAInfo() = RelaxAAInfo(Int[]) + +""" + RelaxMulEnum +""" +struct RelaxMulEnum <: RelaxCacheAttribute + v::Vector{Int} + use_info::Bool +end +RelaxMulEnum() = RelaxMulEnum(Int[], false) +RelaxMulEnum(x::Vector{Int64}) = RelaxMulEnum(x, false) +RelaxMulEnum(x::Bool) = RelaxMulEnum(Int[], x) + +struct RelaxMulEnumInner <: RelaxCacheAttribute + v::Vector{Int} + use_info::Bool +end +RelaxMulEnumInner() = RelaxMulEnumInner(Int[], false) +RelaxMulEnumInner(x::Vector{Int64}) = RelaxMulEnumInner(x, false) +RelaxMulEnumInner(x::Bool) = RelaxMulEnumInner(Int[], x) + + +const RELAX_ATTRIBUTE = Union{Relax,RelaxAA,RelaxAAInfo,RelaxMulEnum,RelaxMulEnumInner,RelaxInterval} +const RELAX_ONLY_ATTRIBUTE = Union{Relax,RelaxAA,RelaxAAInfo,RelaxMulEnum,RelaxMulEnumInner} + +mutable struct MCBoxPnt{Q,N,T} + v::MC{N,T} + box::Vector{MC{N,T}} +end + +cv(x::MCBoxPnt{Q,N,T}) where {Q,N,T} = x.v.cv +cc(x::MCBoxPnt{Q,N,T}) where {Q,N,T} = x.v.cc +cv_grad(x::MCBoxPnt{Q,N,T}) where {Q,N,T} = x.v.cv_grad +cc_grad(x::MCBoxPnt{Q,N,T}) where {Q,N,T} = x.v.cc_grad + +cv(x::MC{N,T}) where {N,T} = x.cv +cc(x::MC{N,T}) where {N,T} = x.cc +cv_grad(x::MC{N,T}) where {N,T} = x.cv_grad +cc_grad(x::MC{N,T}) where {N,T} = x.cc_grad + +function zero(::Type{MCBoxPnt{Q,N,T}}) where {Q,N,T} + MCBoxPnt{Q,N,T}(zero(MC{N,T}), zeros(MC{N,T}, Q)) +end +function one(::Type{MCBoxPnt{Q,N,T}}) where {Q,N,T} + MCBoxPnt{Q,N,T}(one(MC{N,T}), ones(MC{N,T}, Q)) +end + +for op in (:+, :-, :/, :^, :*, :max, :min) + @eval function ($op)(x::MCBoxPnt{Q,N,T}, y::Float64) where {Q,N,T} + MCBoxPnt{Q,N,T}(x.v*y, ($op).(x.box, y)) + end + @eval function ($op)(x::Float64, y::MCBoxPnt{Q,N,T}) where {Q,N,T} + MCBoxPnt{Q,N,T}(x*y.v, ($op).(x, y.box)) + end + @eval function ($op)(x::MCBoxPnt{Q,N,T}, y::MCBoxPnt{Q,N,T}) where {Q,N,T} + MCBoxPnt{Q,N,T}(x.v*y.v, ($op).(x.box, y.box)) + end +end +for ft in UNIVARIATE_ATOM_TYPES + op = UNIVARIATE_ATOM_DICT[ft] + if (op == :user) && continue + @eval function ($op)(x::MCBoxPnt{Q,N,T}) where {Q,N,T} + MCBoxPnt{Q,N,T}(($op)(x.v), ($op).(x.box)) + end + end +end + +function setindex!(d::MCBoxPnt{Q,N,T}, x::MC{N,T}, i::Int) where {Q,N,T} + d.box[i] = x +end + +Base.@kwdef mutable struct RelaxCache{V,N,T<:RelaxTag} <: AbstractCache + ic::IntervalCache = IntervalCache{Float64}() + dp::Vector{Float64} = Float64[] + dP::Vector{Interval{Float64}} = Interval{Float64}[] + p_rel::Vector{Float64} = Float64[] + p_diam::Vector{Float64} = Float64[] + _set::Vector{MC{N,T}} = MC{N,T}[] + _num::Vector{Float64} = Float64[] + _is_num::Vector{Bool} = Bool[] + _info::Vector{V} = V[] + _subexpression_set::Dict{Int,MC{N,T}} = Dict{Int,MC{N,T}}() + _subexpression_num::Dict{Int,Float64} = Dict{Int,Float64}() + _subexpression_is_num::Dict{Int,Bool} = Dict{Int,Bool}() + _subexpression_info::Dict{Int,MC{N,T}} = Dict{Int,MC{N,T}}() + _cv_grad_buffer::Vector{Float64} = Float64[] + _cc_grad_buffer::Vector{Float64} = Float64[] + _set_mv_buffer::Vector{MC{N,T}} = MC{N,T}[] + _num_mv_buffer::Vector{Float64} = Float64[] + _info_mv_buffer::Vector{V} = V[] + _mult_temp::V = zero(V) + ϵ_sg::Float64 = 1E-6 + post::Bool = false + cut::Bool = false + cut_interval::Bool = false + first_eval::Bool = true + use_apriori_mul::Bool = false +end +function RelaxCache{N,T}(::Relax, n::Int, m::Int, p::Int) where {N,T<:RelaxTag} + RelaxCache{N,T}(_set = zeros(MC{N,T}, n), + _num = zeros(Float64, n), + _is_num = zeros(Bool, n), + _info = zeros(MC{N,T}, n), + _cv_grad_buffer = zeros(Float64, p), + _cc_grad_buffer = zeros(Float64, p), + _set_mv_buffer = zeros(MC{N,T}, p), + _num_mv_buffer = zeros(p), + _info_mv_buffer = zeros(MC{N,T}, p), + ) +end +function initialize!(c::RelaxCache{V,N,T}, g::DirectedTree) where {V,N,T<:RelaxTag} + + n = node_count(g) + m = dep_subexpr_count(g) + p = length(sparsity(g, 1)) + + c._num = zeros(n) + c._is_num = zeros(Bool, n) + c._set = zeros(MC{N,T}, n) + c._cv_grad_buffer = zeros(p) + c._cc_grad_buffer = zeros(p) + c._set_mv_buffer = zeros(MC{N,T}, p) + c._num_mv_buffer = zeros(p) + c._mult_temp = zero(V) + + + for i = 1:n + ni = node(g, i) + if node_class(ni) == CONSTANT + c._is_num[i] = true + c._num[i] = g.constant_values[first_index(ni)] + end + end + + for i = 1:n + push!(c._info, zero(V)) + end + for i = 1:p + push!(c._info_mv_buffer, zero(V)) + end + return +end + +function copy_subexpr!(rc::RelaxCache{V,N,T}, ds, dn, din, di) where {V,N,T<:RelaxTag} + rc._subexpression_set = ds + rc._subexpression_num = dn + rc._subexpression_is_num = din + rc._subexpression_info = di + return +end + +mc_type(rc::RelaxCache{V,N,T}) where {V,N,T<:RelaxTag} = MC{N,T} + +### +### +### Access functions for RelaxCache. +### +### +set(b::RelaxCache{V,N,T}, i::Int) where {V,N,T<:RelaxTag} = b._set[i] +num(b::RelaxCache{V,N,T}, i::Int) where {V,N,T<:RelaxTag} = b._num[i] +set_or_num(b::RelaxCache{V,N,T}, i::Int) where {V,N,T<:RelaxTag} = !is_num(b, i) ? set(b,i) : num(b,i) +info(b::RelaxCache{V,N,T}, i::Int) where {V,N,T<:RelaxTag} = b._info[i] +is_num(b::RelaxCache{V,N,T}) where {V,N,T<:RelaxTag} = b._is_num +is_num(b::RelaxCache{V,N,T}, i::Int) where {V,N,T<:RelaxTag} = b._is_num[i] +interval(b::RelaxCache{V,N,T}, i::Int) where {V,N,T<:RelaxTag} = Interval{Float64}(set(b, i)) +subexpression_set(b::RelaxCache{V,N,T}, i::Int) where {V,N,T<:RelaxTag} = b._subexpression_set[i] +subexpression_num(b::RelaxCache{V,N,T}, i::Int) where {V,N,T<:RelaxTag} = b._subexpression_num[i] +subexpression_is_num(b::RelaxCache{V,N,T}, i::Int) where {V,N,T<:RelaxTag} = b._subexpression_is_num[i] +function Base.setindex!(b::RelaxCache{V,N,T}, v::Float64, i::Int) where {V,N,T<:RelaxTag} + b._is_num[i] = true + b._num[i] = v + nothing +end +function Base.setindex!(b::RelaxCache{V,N,T}, v::MC{N,T}, i::Int) where {V,N,T<:RelaxTag} + b._is_num[i] = false + b._set[i] = v + nothing +end +store_info!(b::RelaxCache{V,N,T}, v::V, i::Int) where {V,N,T<:RelaxTag} = (b._info[i] = v; nothing) + +function store_subexpression_set!(b::RelaxCache{V,N,T}, v::MC{N,T}, i::Int) where {V,N,T<:RelaxTag} + b._subexpression_is_num[i] = false + b._subexpression_set[i] = v + nothing +end +function store_subexpression_num!(b::RelaxCache{V,N,T}, v, i::Int) where {V,N,T<:RelaxTag} + b._subexpression_is_num[i] = true + b._subexpression_num[i] = v + nothing +end + +first_eval(t::RELAX_ONLY_ATTRIBUTE, b::RelaxCache) = b.first_eval +first_eval(t::RelaxInterval, b::RelaxCache) = b.first_eval + +val(b::RelaxCache{V,N,T}) where {V,N,T<:RelaxTag} = val(b.ic.v) +lbd(b::RelaxCache{V,N,T}) where {V,N,T<:RelaxTag} = lbd(b.ic.v) +ubd(b::RelaxCache{V,N,T}) where {V,N,T<:RelaxTag} = ubd(b.ic.v) + +val(b::RelaxCache{V,N,T}, i::Int) where {V,N,T<:RelaxTag} = val(b.ic.v, i) +lbd(b::RelaxCache{V,N,T}, i::Int) where {V,N,T<:RelaxTag} = lbd(b.ic.v, i) +ubd(b::RelaxCache{V,N,T}, i::Int) where {V,N,T<:RelaxTag} = ubd(b.ic.v, i) + +_set_input(b::RelaxCache{V,N,T}, n::Int) where {V,N,T<:RelaxTag} = view(b._set_mv_buffer, 1:n) +_num_input(b::RelaxCache{V,N,T}, n::Int) where {V,N,T<:RelaxTag} = view(b._num_mv_buffer, 1:n) +_info_input(b::RelaxCache{V,N,T}, n::Int) where {V,N,T<:RelaxTag} = view(b._info_mv_buffer, 1:n) + +include(joinpath(@__DIR__, "utilities.jl")) +include(joinpath(@__DIR__, "forward_propagation.jl")) +include(joinpath(@__DIR__, "reverse_propagation.jl")) + +function fprop!(t::RELAX_ATTRIBUTE, g::DAT, b::RelaxCache{V,N,T}) where {V,N,T<:RelaxTag} + for k = node_count(g):-1:1 + c = node_class(g, k) + (c == EXPRESSION) && (fprop!(t, Expression(), g, b, k); continue) + (c == VARIABLE) && (fprop!(t, Variable(), g, b, k); continue) + (c == SUBEXPRESSION) && (fprop!(t, Subexpression(), g, b, k); continue) + end + nothing +end + +function rprop!(t::RELAX_ATTRIBUTE, g::DAT, b::RelaxCache{V,N,T}) where {V,N,T<:RelaxTag} + flag = r_init!(t, g, b) + for k = 1:node_count(g) + nt = node_class(g, k) + if nt == EXPRESSION + flag = rprop!(t, Expression(), g, b, k) + elseif nt == VARIABLE + flag = rprop!(t, Variable(), g, b, k) + elseif nt == SUBEXPRESSION + flag = rprop!(t, Subexpression(), g, b, k) + end + end + return flag +end + + +function display_expr(g, i) + n = node(g, i) + nc = node_class(n) + if nc == VARIABLE + return "x[$(first_index(n))]" + elseif nc == PARAMETER + return "p[$(first_index(n))]" + elseif nc == EXPRESSION + ex_typ = ex_type(n) + ex_sym = ALL_ATOM_DICT[ex_typ] + ctup = tuple(children(n)...) + return "$(ex_sym)$(ctup)" + elseif nc == CONSTANT + return "c[$(first_index(n))]" + end +end + +function display_table!(g::DAT, b::RelaxCache{V,N,T}) where {V,N,T<:RelaxTag} + nc = node_count(g) + val = [b._is_num[i] ? b._num[i] : b._set[i] for i in 1:nc] + exr = [display_expr(g, i) for i in 1:nc] + data = hcat(exr, b._is_num, val) + show(pretty_table(data, header = ["Expr", "Is Num", "Val"]; show_row_number = true)) +end \ No newline at end of file diff --git a/src/eago_optimizer/functions/nonlinear/composite_relax/forward_propagation.jl b/src/eago_optimizer/functions/nonlinear/composite_relax/forward_propagation.jl new file mode 100644 index 00000000..badebcac --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/composite_relax/forward_propagation.jl @@ -0,0 +1,367 @@ +# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. +# This code is licensed under MIT license (see LICENSE.md for full details) +############################################################################# +# EAGO +# A development environment for robust and global optimization +# See https://github.com/PSORLab/EAGO.jl +############################################################################# +# src/eago_optimizer/evaluator/passes.jl +# Functions used to compute forward pass of nonlinear functions which include: +# set_value_post, overwrite_or_intersect, forward_pass_kernel, associated blocks +############################################################################# + +xnum_yset(b, x, y) = is_num(b, x) && !is_num(b, y) +xset_ynum(b, x, y) = !is_num(b, x) && is_num(b, y) +xy_num(b, x, y) = is_num(b, x) && is_num(b, y) +xyset(b, x, y) = !(is_num(b, x) || is_num(b, y)) + +function varset(::Type{MC{N,T}}, i, x_cv, x_cc, l, u) where {V,N,T<:RelaxTag} + v = seed_gradient(i, Val(N)) + return MC{N,T}(x_cv, x_cc, Interval{Float64}(l, u), v, v, false) +end + +function fprop!(t::RelaxCacheAttribute, vt::Variable, g::DAT, b::RelaxCache{V,N,T}, k) where {V,N,T<:RelaxTag} + i = first_index(g, k) + x = val(b, i) + l = lbd(b, i) + u = ubd(b, i) + if l == u + b[k] = x + else + z = varset(MC{N,T}, rev_sparsity(g, i, k), x, x, l, u) + if !first_eval(t, b) + z = z ∩ interval(b, k) + end + b[k] = z + end + nothing +end + +function fprop!(t::Relax, ex::Subexpression, g::DAT, b::RelaxCache{V,N,T}, k) where {V,N,T<:RelaxTag} + y = first_index(g, k) + x = dependent_subexpression_index(g, y) + if subexpression_is_num(b, x) + b[k] = subexpression_num(b, x) + else + b[k] = subexpression_set(b, x) + end +end + +for (F, f) in ((PLUS, :+), (MIN, :min), (MAX, :max), (DIV, :/), (ARH, :arh)) + @eval function fprop_2!(t::Relax, v::Val{$F}, g::DAT, b::RelaxCache{V,N,T}, k) where {V,N,T<:RelaxTag} + x = child(g, 1, k) + y = child(g, 2, k) + if !xy_num(b, x, y) + if xyset(b, x, y) + z = ($f)(set(b, x), set(b, y)) + elseif xset_ynum(b, x, y) + z = ($f)(set(b, x), num(b, y)) + else + z = ($f)(num(b, x), set(b, y)) + end + b[k] = cut(z, set(b,k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + else + b[k] = ($f)(num(b, x), num(b, y)) + end + end +end + +function fprop!(t::Relax, v::Val{MINUS}, g::DAT, b::RelaxCache{V,N,T}, k) where {V,N,T<:RelaxTag} + x = child(g, 1, k) + if is_binary(g, k) + y = child(g, 2, k) + if !xy_num(b, x, y) + if xyset(b, x, y) + z = set(b, x) - set(b, y) + elseif xset_ynum(b, x, y) + z = set(b, x) - num(b, y) + else + z = num(b, x) - set(b, y) + end + b[k] = cut(z, set(b,k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + else + b[k] = num(b, x) - num(b, y) + end + else + if is_num(b, x) + b[k] = -num(b, x) + else + z = -set(b, x) + b[k] = cut(-set(b, x), set(b, k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + end + end +end + +function fprop_n!(t::Relax, v::Val{PLUS}, g::DAT, b::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} + z = zero(MC{N,T}) + znum = 0.0 + numval = true + for i in children(g, k) + if is_num(b, i) + znum += num(b, i) + else + numval = false + z += set(b, i) + end + end + if numval + b[k] = znum + else + z += znum + b[k] = cut(z, set(b, k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + end +end + +function fprop_n!(t::Relax, v::Val{MIN}, g::DAT, b::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} + z = Inf*one(MC{N,T}) + znum = Inf + numval = true + for i in children(g, k) + if is_num(b, i) + znum = min(znum, num(b, i)) + else + numval = false + z = min(z, set(b, i)) + end + end + if numval + b[k] = znum + else + z = min(z, znum) + b[k] = cut(z, set(b, k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + end +end + +function fprop_n!(t::Relax, v::Val{MAX}, g::DAT, b::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} + z = -Inf*one(MC{N,T}) + znum = -Inf + numval = true + for i in children(g, k) + if is_num(b, i) + znum = max(znum, num(b, i)) + else + numval = false + z = max(z, set(b, i)) + end + end + if numval + b[k] = znum + else + z = max(z, znum) + b[k] = cut(z, set(b, k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + end +end + +function fprop_2!(t::Relax, v::Val{MULT}, g::DAT, b::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} + + x = child(g, 1, k) + y = child(g, 2, k) + + if !xy_num(b, x, y) + if xyset(b, x, y) + xv = set(b, x) + yv = set(b, y) + if b.use_apriori_mul + dp = b.dp + dP = b.dP + p_rel = b.p_rel + p_diam = b.p_diam + s = sparsity(g, 1) + xr = info(b, x) + yr = info(b, y) + u1max, u2max, v1nmax, v2nmax = estimator_extrema(xr, yr, s, dP) + z = xv*yv + wIntv = z.Intv + if (u1max < xv.Intv.hi) || (u2max < yv.Intv.hi) + u1cv, u2cv, u1cvg, u2cvg = estimator_under(xv, yv, xr, yr, s, dp, dP, p_rel, p_diam) + za_l = McCormick.mult_apriori_kernel(xv, yv, wIntv, u1cv, u2cv, u1max, u2max, u1cvg, u2cvg) + z = z ∩ za_l + end + if (v1nmax > -xv.Intv.lo) || (v2nmax > -yv.Intv.lo) + v1ccn, v2ccn, v1ccgn, v2ccgn = estimator_over(xv, yv, xr, yr, s, dp, dP, p_rel, p_diam) + za_u = McCormick.mult_apriori_kernel(-xv, -yv, wIntv, v1ccn, v2ccn, v1nmax, v2nmax, v1ccgn, v2ccgn) + z = z ∩ za_u + end + else + z = xv*yv + end + elseif xset_ynum(b, x, y) + z = set(b, x)*num(b, y) + else + z = num(b, x)*set(b, y) + end + b[k] = cut(z, set(b,k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + else + b[k] = num(b, x)*num(b, y) + end +end + +function fprop_n!(t::Relax, ::Val{MULT}, g::DAT, b::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} + + z = one(MC{N,T}) + znum = one(Float64) + numval = true + if b.use_apriori_mul + zr = one(V) + dp = b.dp + dP = b.dP + s = sparsity(g, 1) + for (q,i) in enumerate(children(g, k)) + if is_num(b, i) + znum = znum*num(b, i) + else + numval = false + x = set(b, i) + xr = info(b, i) + u1max, u2max, v1nmax, v2nmax = estimator_extrema(zr, xr, s, dP) + zv = z*x + wIntv = zv.Intv + if (u1max < z.Intv.hi) || (u2max < x.Intv.hi) + u1cv, u2cv, u1cvg, u2cvg = estimator_under(zr, xr, s, dp, dP) + za_l = McCormick.mult_apriori_kernel(z, x, wIntv, u1cv, u2cv, u1max, u2max, u1cvg, u2cvg) + zv = zv ∩ za_l + end + if (v1nmax > -z.Intv.lo) || (v2nmax > -x.Intv.lo) + v1ccn, v2ccn, v1ccgn, v2ccgn = estimator_under(zr, xr, s, dp, dP) + za_u = McCormick.mult_apriori_kernel(-z, -x, wIntv, v1ccn, v2ccn, v1nmax, v2nmax, v1ccgn, v2ccgn) + zv = zv ∩ za_u + end + zr = zr*xr + zv = cut(zv, zv, b.ic.v, b.ϵ_sg, sparsity(g, i), b.cut, false) + z = zv + end + end + else + for (q,i) in enumerate(children(g, k)) + if is_num(b, i) + znum = znum*num(b, i) + else + numval = false + x = set(b, i) + z = z*x + z = cut(z, z, b.ic.v, b.ϵ_sg, sparsity(g, i), b.cut, false) + end + end + end + if numval + b[k] = znum + else + z = z*znum + b[k] = z + end +end + +for F in (PLUS, MULT, MIN, MAX) + @eval function fprop!(t::Relax, v::Val{$F}, g::DAT, b::RelaxCache{V,N,T}, k) where {V,N,T<:RelaxTag} + is_binary(g, k) ? fprop_2!(Relax(), Val($F), g, b, k) : fprop_n!(Relax(), Val($F), g, b, k) + end +end +function fprop!(t::Relax, v::Val{DIV}, g::DAT, b::RelaxCache{V,N,T}, k) where {V,N,T<:RelaxTag} + fprop_2!(Relax(), v, g, b, k) +end + +function fprop!(t::Relax, v::Val{POW}, g::DAT, b::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} + x = child(g, 1, k) + y = child(g, 2, k) + if is_num(b, y) && isone(num(b, y)) + b[k] = set(b, x) + elseif is_num(b,y) && iszero(num(b, y)) + b[k] = one(MC{N,T}) + elseif !xy_num(b, x, y) + if xyset(b, x, y) + z = set(b, x)^set(b, y) + elseif xset_ynum(b, x, y) + z = set(b, x)^num(b, y) + else + z = num(b, x)^set(b, y) + end + b[k] = cut(z, set(b,k), b.ic.v, b.ϵ_sg, sparsity(g, k), b.cut, false) + else + b[k] = num(b, x)^num(b, y) + end +end + +function fprop!(t::Relax, v::Val{USER}, g::DAT, b::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} + f = user_univariate_operator(g, first_index(g, k)) + x = child(g, 1, k) + if is_num(b, x) + b[k] = f(num(b, x)) + else + z = f(set(b, x)) + b[k] = cut(z, set(b, k), b.ic.v, zero(Float64), sparsity(g, k), b.cut, b.post) + end +end + +function fprop!(t::Relax, v::Val{USERN}, g::DAT, b::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} + mv = user_multivariate_operator(g, first_index(g, k)) + n = arity(g, k) + set_input = _set_input(b, n) + num_input = _num_input(b, n) + anysets = false + i = 1 + for c in children(g, k) + if is_num(b, c) + x = num(b, c) + if !isinf(x) + set_input[i] = MC{N,T}(x) + num_input[i] = x + end + else + set_input[i] = set(b, c) + anysets = true + end + i += 1 + end + if anysets + z = MOI.eval_objective(mv, set_input)::MC{N,T} + b[k] = cut(z, set(b, k), b.ic.v, zero(Float64), sparsity(g,k), b.cut, b.post) + else + b[k] = MOI.eval_objective(mv, num_input) + end +end + +for ft in UNIVARIATE_ATOM_TYPES + f = UNIVARIATE_ATOM_DICT[ft] + (f == :user || f == :+ || f == :-) && continue + @eval function fprop!(t::Relax, v::Val{$ft}, g::DAT, b::RelaxCache{V,N,T}, k) where {V,N,T<:RelaxTag} + x = child(g, 1, k) + if is_num(b, x) + return b[k] = ($f)(num(b, x)) + else + z = ($f)(set(b, x)) + b[k] = cut(z, set(b, k), b.ic.v, zero(Float64), sparsity(g,k), b.cut, b.post) + end + end +end + +for (F, f) in ((LOWER_BND, :lower_bnd), (UPPER_BND, :upper_bnd)) + @eval function fprop!(t::Relax, v::Val{$F}, g::DAT, b::RelaxCache{V,N,T}, k) where {V,N,T<:RelaxTag} + y = child(g, 2, k) + if is_num(b, y) + z = set(b, child(g, 1, k)) + z = ($f)(z, num(b, y)) + b[k] = cut(z, set(b, k), b.ic.v, zero(Float64), sparsity(g, k), b.cut, b.post) + end + end +end + +function fprop!(t::Relax, v::Val{BND}, g::DAT, b::RelaxCache{V,N,T}, k) where {V,N,T<:RelaxTag} + z = set(b, child(g, 1, k)) + y = child(g, 2, k) + r = child(g, 3, k) + if is_num(b, y) && is_num(b, r) + z = bnd(z, num(b, y),num(b, r)) + end + b[k] = cut(z, set(b, k), b.ic.v, zero(Float64), sparsity(g,k), b.cut, b.post) +end + +function f_init!(t::Relax, g::DAT, b::RelaxCache) + for k = node_count(g):-1:1 + c = node_class(g, k) + (c == EXPRESSION) && fprop!(t, Expression(), g, b, k) + (c == VARIABLE) && fprop!(t, Variable(), g, b, k) + (c == SUBEXPRESSION) && fprop!(t, Subexpression(), g, b, k) + b._info[k] = set(b, k) + end + nothing +end \ No newline at end of file diff --git a/src/eago_optimizer/functions/nonlinear/composite_relax/forward_user_function.jl b/src/eago_optimizer/functions/nonlinear/composite_relax/forward_user_function.jl new file mode 100644 index 00000000..36b77f4b --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/composite_relax/forward_user_function.jl @@ -0,0 +1,69 @@ +#= +# TODO: Infer dimensionality of user function from context type +struct RelaxMeta{N,T} + p::SVector{N,T} + pL::SVector{N,T} + pU::SVector{N,T} + v::VariableValues{Float64} + s::Vector{Int} + ϵ::Float64 + subgradient_refinement::Bool +end + +@context RelaxCtx + +function ctx_subgradient_refine(c::RelaxCtx{RelaxMeta{N,T},Nothing}, z::MC{N,T}) where {N,T} + m = ctx.metadata + v = ctx.metadata.v + s = ctx.metadata.s + ϵ = ctx.metadata.ϵ + return m.subgradient_refinement ? set_value_post(z, m.v, m.s, m.ϵ), z +end + +for k in UNIVARIATE_ATOM_TYPES + f = UNIVARIATE_ATOM_DICT[k] + @eval function Cassette.overdub(ctx::RelaxCtx{RelaxMeta{N,T},Nothing}, ::typeof($f), x::MC{N,T}) where {N,T} + return ctx_subgradient_refine(ctx, ($f)(x)) + end +end + +for k in BIVARIATE_ATOM_TYPES + f = BIVARIATE_ATOM_DICT[k] + @eval function Cassette.overdub(ctx::RelaxCtx{RelaxMeta{N,T},Nothing}, ::typeof($f), x::MC{N,T}, y::NumberNotRelax) where {N,T} + return ctx_subgradient_refine(ctx, ($f)(x, y)) + end + @eval function Cassette.overdub(ctx::RelaxCtx{RelaxMeta{N,T},Nothing}, ::typeof($f), y::NumberNotRelax, x::MC{N,T}) where {N,T} + return ctx_subgradient_refine(ctx, ($f)(y, x)) + end +end + +#= +for k in NARITY_ATOM_TYPES + f = NARITY_ATOM_DICT[k] + @eval function Cassette.overdub(ctx::RelaxCtx{RelaxMeta{N,T},Nothing}, ::typeof($f), ...) where {N,T} + end +end +=# + +# commonly used storage objects automatically get promoted to use the set-valued storage +Cassette.overdub(::F_RELAX_CTX{T}, ::typeof(zeros), n::Int) where {T<:Number} = zeros(T, n) +Cassette.overdub(::F_RELAX_CTX{T}, ::typeof(zeros), dims...) where {T<:Number} = zeros(T, dims...) +Cassette.overdub(::F_RELAX_CTX{T}, ::typeof(fill), v::Any, dims::Int...) where {T<:Number} = fill(convert(T,v), dims) +function Cassette.overdub(::F_RELAX_CTX{T}, ::typeof(hcat), A::AbstractArray, Bs::AbstractArray...) where {T<:Number} + vA = hcat(A,Bs...) + vR = zeros(T, size(vA)...) + return copyto!(vR,vA) +end +function Cassette.overdub(::F_RELAX_CTX{T}, ::typeof(vcat), A::AbstractArray, Bs::AbstractArray...) where {T<:Number} + vA = vcat(A,Bs...) + vR = zeros(T, size(vA)...) + return copyto!(vR,vA) +end +=# +#= +# solution object +function Cassette.overdub(::F_RELAX_CTX{T}, ::typeof(ODEProblem), f, u0, tspan) where {T<:Number} +end +function Cassette.overdub(::F_RELAX_CTX{T}, ::typeof(solve), prob, alg, kwags...) where {T<:Number} +end +=# diff --git a/src/eago_optimizer/functions/nonlinear/composite_relax/reverse_propagation.jl b/src/eago_optimizer/functions/nonlinear/composite_relax/reverse_propagation.jl new file mode 100644 index 00000000..06e93069 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/composite_relax/reverse_propagation.jl @@ -0,0 +1,189 @@ +# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. +# This code is licensed under MIT license (see LICENSE.md for full details) +############################################################################# +# EAGO +# A development environment for robust and global optimization +# See https://github.com/PSORLab/EAGO.jl +############################################################################# +# src/eago_optimizer/functions/nonlinear/reverse_pass.jl +# Functions used to compute reverse pass of nonlinear functions. +############################################################################# + +function r_init!(t::Relax, g::DAT, b::RelaxCache{V,N,T}) where {V,N,T<:RelaxTag} + if !is_num(b, 1) + b[1] = set(b, 1) ∩ g.sink_bnd + end + return !isempty(z) +end + +function rprop!(t::Relax, v::Variable, g::DAT, c::RelaxCache{V,N,T}, k) where {V,N,T<:RelaxTag} + i = first_index(g, k) + x = val(c, i) + z = var_set(MC{N,T}, rev_sparsity(g, i, k), x, x, lbd(c, i), ubd(c, i)) + if first_eval(c) + z = z ∩ interval(c, k) + end + c[k] = z + return !isempty(z) +end + +function rprop!(t::Relax, v::Subexpression, g::DAT, c::RelaxCache{V,N,T}, k) where {V,N,T<:RelaxTag} + store_subexpression!(c, set(c, k), first_index(g, k)) + return true +end + +const MAX_ASSOCIATIVE_REVERSE = 6 + +""" +$(FUNCTIONNAME) + +Updates storage tapes with reverse evalution of node representing `n = x + y` which updates x & y. +""" +function rprop_2!(t::Relax, v::Val{PLUS}, g::DAT, c::RelaxCache{V,N,T}, k) where {V,N,T<:RelaxTag} + is_num(c, k) && return true + x = child(g, 1, k) + y = child(g, 2, k) + if xset_ynum(b, x, y) + b, a, q = IntervalContractors.plus_rev(interval(c, k), interval(c, x), num(c, y)) + elseif xnum_yset(b, x, y) + b, a, q = IntervalContractors.plus_rev(interval(c, k), num(c, x), interval(c, y)) + else + b, a, q = IntervalContractors.plus_rev(interval(c, k), interval(c, x), interval(c, y)) + end + if !is_num(c, x) + isempty(a) && return false + b[x] = MC{N,T}(a) + end + if !is_num(c, y) + isempty(q) && return false + b[y] = MC{N,T}(q) + end + return true +end + +""" +$(FUNCTIONNAME) + +Updates storage tapes with reverse evalution of node representing `n = +(x,y,z...)` which updates x, y, z and so on. +""" +function rprop_n!(t::Relax, v::Val{PLUS}, g::DAT, c::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} + # out loops makes a temporary sum (minus one argument) + # a reverse is then compute with respect to this argument + count = 0 + children_idx = children(g, k) + for i in children_idx + is_num(c, i) && continue # don't contract a number valued argument + (count >= MAX_ASSOCIATIVE_REVERSE) && break + tsum = zero(MC{N,T}) + count += 1 + for j in children_idx + if j != i + if is_num(c, j) + tsum += num(c, j) + else + tsum += set(c, j) + end + end + end + _, w, _ = IntervalContractors.plus_rev(interval(c, k), interval(c, i), interval(tsum)) + isempty(w) && (return false) + c[i] = MC{N,T}(w) + end + return true +end + +""" +$(FUNCTIONNAME) + +Updates storage tapes with reverse evalution of node representing `n = x * y` which updates x & y. +""" +function rprop_2!(t::Relax, v::Val{MULT}, g::DAT, c::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} + + is_num(c,k) && (return true) + x = child(g, 1, k) + y = child(g, 2, k) + + if xset_ynum(b, x, y) + c, a, q = IntervalContractors.mul_rev(interval(c, k), interval(c, x), num(c, y)) + elseif xnum_yset(b, x, y) + c, a, q = IntervalContractors.mul_rev(interval(c, k), num(c, x), interval(c, y)) + else + c, a, q = IntervalContractors.mul_rev(interval(c, k), interval(c, x), interval(c, y)) + end + + if !is_num(c, x) + isempty(a) && (return false) + c[x] = MC{N,T}(a) + end + if !is_num(c, x) + isempty(q) && (return false) + c[y] = MC{N,T}(q) + end + return true +end + +""" +$(FUNCTIONNAME) + +Updates storage tapes with reverse evalution of node representing `n = *(x,y,z...)` which updates x, y, z and so on. +""" +function rprop_n!(t::Relax, v::Val{MULT}, g::DAT, c::RelaxCache{V,N,T}, k::Int) where {V,N,T<:RelaxTag} + # a reverse is then compute with respect to this argument + count = 0 + children_idx = children(g, k) + for i in children_idx + is_num(b, i) && continue # don't contract a number valued argument + (count >= MAX_ASSOCIATIVE_REVERSE) && break + tmul = one(MC{N,T}) + count += 1 + for j in children_idx + if i != j + if is_num(b, j) + tmul *= num(b, j) + else + tmul *= set(b, j) + end + end + end + _, w, _ = IntervalContractors.mul_rev(interval(b, k), interval(b, c), interval(tmul)) + isempty(w) && (return false) + c[i] = MC{N,T}(w) + end + return true +end + +for (f, fc, F) in ((-, MINUS, IntervalContractors.minus_rev), + (^, POW, IntervalContractors.power_rev), + (/, DIV, IntervalContractors.div_rev)) + @eval function rprop!(t::Relax, v::Val{$fc}, g::DAT, b::RelaxCache{V,N,T}, k) where {V,N,T<:RelaxTag} + is_num(b,k) && (return true) + x = child(g, 1, k) + y = child(g, 2, k) + + if xset_ynum(b, x, y) + z, u, v = ($F)(interval(b, k), interval(b, x), num(b, y)) + elseif xnum_yset(b, x, y) + z, u, v = ($F)(interval(b, k), num(b, x), interval(b, y)) + else + z, u, v = ($F)(interval(b, k), interval(b, x), interval(b, y)) + end + if !is_num(c, x) + isempty(a) && (return false) + b[x] = MC{N,T}(u) + end + if !is_num(c, y) + isempty(b) && (return false) + b[y] = MC{N,T}(v) + end + return true + end +end + +rprop!(t::Relax, v::Val{USER}, g::DAT, b::RelaxCache, k::Int) = true +rprop!(t::Relax, v::Val{USERN}, g::DAT, b::RelaxCache, k::Int) = true + +for ft in UNIVARIATE_ATOM_TYPES + f = UNIVARIATE_ATOM_DICT[ft] + (f == :user || f == :+ || f == :-) && continue + @eval rprop!(t::Relax, v::Val{$ft}, g::DAT, b::RelaxCache, k::Int) = true +end diff --git a/src/eago_optimizer/functions/nonlinear/composite_relax/reverse_user_function.jl b/src/eago_optimizer/functions/nonlinear/composite_relax/reverse_user_function.jl new file mode 100644 index 00000000..b6ef84e6 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/composite_relax/reverse_user_function.jl @@ -0,0 +1,107 @@ +#= +# TODO: Infer dimensionality of user function from context type +mutable struct RevRelaxMeta{T} end +Cassette.@context RelaxCtx + +macro scalar_relax_rule(f, rev) + esc(quote + function Cassette.overdub(::RelaxCtx{RelaxMeta{T}}, ::typeof($f), x::VRev{T,F}) where {T<:Number,F} + r = y -> x.rev(_val(x) ∩ $rev) + return VRev{T,typeof(r)}(($f)(x), r) + end + end) +end + +@norev_scalar(RelaxCtx, identity) +@norev_scalar(RelaxCtx, one) +@norev_scalar(RelaxCtx, zero) +@norev_scalar(RelaxCtx, transpose) + +@scalar_set_rule(sqrt, y^2) +@scalar_set_rule(real, y) + +@scalar_set_rule(acos, cos(y)) +@scalar_set_rule(acot, cot(y)) +@scalar_set_rule(acsc, csc(y)) +@scalar_set_rule(asec, sec(y)) +@scalar_set_rule(asin, sin(y)) +@scalar_set_rule(atan, tan(y)) + +@scalar_set_rule(acosd, cosd(y)) +@scalar_set_rule(acotd, cotd(y)) +@scalar_set_rule(acscd, cscd(y)) +@scalar_set_rule(asecd, secd(y)) +@scalar_set_rule(asind, sind(y)) +@scalar_set_rule(atand, tand(y)) + +@scalar_set_rule(asinh, sinh(y)) +@scalar_set_rule(acosh, cosh(y)) +@scalar_set_rule(atanh, tanh(y)) +@scalar_set_rule(asech, sech(y)) +@scalar_set_rule(acsch, csch(y)) +@scalar_set_rule(acoth, coth(y)) + +@scalar_set_rule(sinh, asinh(y)) +@scalar_set_rule(tanh, atanh(y)) + +# has point discontinuity (& unbounded) +@scalar_set_rule(csch, acsch(y)) +@scalar_set_rule(coth, acoth(y)) + + + +function Cassette.overdub(::RelaxCtx{RelaxMeta{T}}, ::typeof(zeros), n::Int) where {T<:Number} + return zeros(VRev(T), n) +end +function Cassette.overdub(::RelaxCtx{RelaxMeta{T}}, ::typeof(zeros), dims...) where {T<:Number} + return zeros(VRev(T), dims...) +end + +function Cassette.overdub(ctx::RelaxCtx{RelaxMeta{T}}, ::typeof(hcat), A...) where {T<:Number} + vA = hcat(A...) + sz = size(vA) + vR = zeros(VRev, sz...) + vR[:] = vA[:] + return vR +end +function Cassette.overdub(ctx::RelaxCtx{RelaxMeta{T}}, ::typeof(vcat), A...) where {T<:Number} + vA = vcat(A...) + sz = size(vA) + vR = zeros(VRev, sz...) + vR[:] = vA[:] + return vR +end + +# Complex(), Real(), hypot, fma, muladd, rem2pi, mod, deg2rad, rad2deg, ldexp + +#= + +@scalar_rule cotd(x) -(π / oftype(x, 180)) * (1 + Ω ^ 2) +@scalar_rule cscd(x) -(π / oftype(x, 180)) * Ω * cotd(x) +@scalar_rule csch(x) -(coth(x)) * Ω +@scalar_rule sec(x) Ω * tan(x) +@scalar_rule secd(x) (π / oftype(x, 180)) * Ω * tand(x) +@scalar_rule sech(x) -(tanh(x)) * Ω + +@scalar_rule acot(x) -(inv(1 + x ^ 2)) +@scalar_rule acsc(x) -(inv(x ^ 2 * sqrt(1 - x ^ -2))) +@scalar_rule acsc(x::Real) -(inv(abs(x) * sqrt(x ^ 2 - 1))) +@scalar_rule asec(x) inv(x ^ 2 * sqrt(1 - x ^ -2)) +@scalar_rule asec(x::Real) inv(abs(x) * sqrt(x ^ 2 - 1)) + +@scalar_rule cosd(x) -(π / oftype(x, 180)) * sind(x) +@scalar_rule cospi(x) -π * sinpi(x) +@scalar_rule sind(x) (π / oftype(x, 180)) * cosd(x) +@scalar_rule sinpi(x) π * cospi(x) +@scalar_rule tand(x) (π / oftype(x, 180)) * (1 + Ω ^ 2) + +@scalar_rule sinc(x) cosc(x) + +@scalar_rule round(x) zero(x) +@scalar_rule floor(x) zero(x) +@scalar_rule ceil(x) zero(x) + +=# + +#@scalar_set_rule(imag(x::Real) Zero() +=# \ No newline at end of file diff --git a/src/eago_optimizer/functions/nonlinear/composite_relax/utilities.jl b/src/eago_optimizer/functions/nonlinear/composite_relax/utilities.jl new file mode 100644 index 00000000..66619ef3 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/composite_relax/utilities.jl @@ -0,0 +1,151 @@ + +function affine_expand_del(dx::Vector{Float64}, fx0::Float64, ∇fx0::SVector{N,Float64}, s::Vector{Int}) where N + v = fx0 + for i=1:N + v += ∇fx0[i]*dx[s[i]] + end + return v +end +function affine_expand_del(dx::Vector{Interval{Float64}}, fx0::Float64, ∇fx0::SVector{N,Float64}, s::Vector{Int}) where N + v = fx0 + for i = 1:N + t = ∇fx0[i] + tdx = dx[s[i]] + if !iszero(t) + v += t > 0.0 ? t*tdx.hi : t*tdx.lo + end + end + return v +end + +function affine_expand(x::Vector{Float64}, x0::Vector{Float64}, fx0::Float64, ∇fx0::SVector{N,Float64}) where N + v = fx0 + for i=1:N + v += ∇fx0[i]*(x[i] - x0[i]) + end + return v +end +function affine_expand(x::Vector{Interval{Float64}}, x0::Vector{Float64}, fx0::Float64, ∇fx0::SVector{N,Float64}) where N + v = Interval{Float64}(fx0) + for i=1:N + v += ∇fx0[i]*(x[i] - x0[i]) + end + return v +end + +function expand_set(::Type{MC{N2,T}}, x::MC{N1,T}, fsparse::Vector{Int}, subsparse::Vector{Int}, cv_buffer::Vector{Float64}, cc_buffer::Vector{Float64}) where {N1, N2, T<:RelaxTag} + cvg = x.cv_grad + ccg = x.cc_grad + xcount = 1 + xcurrent = subsparse[1] + for i = 1:N2 + if fsparse[i] === xcurrent + cv_buffer[i] = cvg[xcount] + cc_buffer[i] = ccg[xcount] + xcount += 1 + if xcount <= N1 + xcurrent = subsparse[xcount] + else + break + end + else + cv_buffer[i] = zero(Float64) + end + end + cv_grad = SVector{N2,Float64}(cv_buffer) + cc_grad = SVector{N2,Float64}(cc_buffer) + return MC{N2,T}(x.cv, x.cc, x.Intv, cv_grad, cc_grad, x.cnst) +end + +function set_value_post(z::MC{N,T}, v::VariableValues{Float64}, s::Vector{Int}, ϵ::Float64) where {V,N,T<:RelaxTag} + l = z.cv + u = z.cc + lower_refinement = true + upper_refinement = true + @inbounds for i = 1:N + cv_val = z.cv_grad[i] + cc_val = z.cc_grad[i] + i_sol = s[i] + x_z = v.x[i_sol] + lower_bound = _lbd(v, i_sol) + upper_bound = _ubd(v, i_sol) + if lower_refinement + if cv_val > zero(Float64) + if isinf(lower_bound) + !upper_refinement && break + lower_refinement = false + else + l += cv_val*(lower_bound - x_z) + end + else + if isinf(upper_bound) + !upper_refinement && break + lower_refinement = false + else + l += cv_val*(upper_bound - x_z) + end + end + end + if upper_refinement + if cc_val > zero(Float64) + if isinf(lower_bound) + !lower_refinement && break + upper_refinement = false + else + u += cc_val*(upper_bound - x_z) + end + else + if isinf(upper_bound) + !lower_refinement && break + upper_refinement = false + else + u += cc_val*(lower_bound - x_z) + end + end + end + end + + if lower_refinement && (z.Intv.lo + ϵ > l) + l = z.Intv.lo + elseif !lower_refinement + l = z.Intv.lo + else + l -= ϵ + end + + if upper_refinement && (z.Intv.hi - ϵ < u) + u = z.Intv.hi + elseif !upper_refinement + u = z.Intv.hi + else + u += ϵ + end + + return MC{N,T}(z.cv, z.cc, Interval{Float64}(l, u), z.cv_grad, z.cc_grad, z.cnst) +end + +""" +$(FUNCTIONNAME) + +Intersects the new set valued operator with the prior and performs affine bound tightening + +- First forward pass: `post` should be set by user option, `is_intersect` should be false + so that the tape overwrites existing values, and the `interval_intersect` flag could be set + to either value. +- Forward CP pass (assumes same reference point): `post` should be set by user option, + `is_intersect` should be true so that the tape intersects with existing values, and the + `interval_intersect` flag should be false. +- Forward CP pass (assumes same reference point): `post` should be set by user option, + `is_intersect` should be true so that the tape intersects with existing values, and the + `interval_intersect` flag should be false. +- Subsequent forward passes at new points: post` should be set by user option, + `is_intersect` should be true so that the tape intersects with existing values, and the + `interval_intersect` flag should be `true` as predetermined interval bounds are valid but + the prior values may correspond to different points of evaluation. +""" +function cut(x::MC{N,T}, z::MC{N,T}, v::VariableValues, ϵ::Float64, s::Vector{Int}, cflag::Bool, pflag::Bool) where {N,T<:RelaxTag} + (pflag & cflag) && (return set_value_post(x ∩ z.Intv, v, s, ϵ)) + (pflag & !cflag) && (return set_value_post(x, v, s, ϵ)) + (pflag & cflag) && (return x ∩ z.Intv) + return x +end \ No newline at end of file diff --git a/src/eago_optimizer/functions/nonlinear/convexity/convexity.jl b/src/eago_optimizer/functions/nonlinear/convexity/convexity.jl new file mode 100644 index 00000000..ec51f744 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/convexity/convexity.jl @@ -0,0 +1,2 @@ +function fprop!(t::Convexity, g::DAT, b::VexityCache) +end \ No newline at end of file diff --git a/src/eago_optimizer/functions/nonlinear/convexity/curvature.jl b/src/eago_optimizer/functions/nonlinear/convexity/curvature.jl new file mode 100644 index 00000000..b20a9d35 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/convexity/curvature.jl @@ -0,0 +1,124 @@ +@enum(Vexity, VEX_CONST, VEX_AFFINE, VEX_CONVEX, VEX_CONCAVE, VEX_NONDCP, VEX_UNSET) + +function -(v::Vexity) + if v == VEX_CONVEX + return VEX_CONCAVE + elseif v == VEX_CONCAVE + return VEX_CONCAVE + end + return v +end + +function +(v::Vexity, w::Vexity) + if (v == VEX_CONST) && (w == VEX_CONST) # constant times other vexity cases + return v + elseif (v == VEX_CONST) && (w == VEX_NONDCP) + return w + elseif (v == VEX_NONDCP) && (w == VEX_CONST) + return v + elseif (v == VEX_CONST) + return w + elseif (w == VEX_CONST) + return v + elseif (v == VEX_AFFINE) && (w == VEX_AFFINE) # affine times other vexity cases + return v + elseif (v == VEX_AFFINE) && (w == VEX_CONVEX) + return w + elseif (v == VEX_CONVEX) && (w == VEX_AFFINE) + return v + elseif (v == VEX_AFFINE) && (w == VEX_CONCAVE) + return w + elseif (v == VEX_CONCAVE) && (w == VEX_AFFINE) # remaining vexity cases + return v + elseif (v == VEX_CONVEX) && (w == VEX_CONVEX) + return v + elseif (v == VEX_CONCAVE) && (w == VEX_CONCAVE) + return v + elseif (v == VEX_CONCAVE) && (w == VEX_CONVEX) + return VEX_NONDCP + end + return VEX_NONDCP # (v == VEX_CONVEX) && (w == VEX_CONCAVE) +end + +curvature(f, x::Interval) = error("curvature(f,x) for $f is not implemented.") + +function curvature(d2f::Interval{T}, x::Interval{T}) where T + if zero(T) <= d2f + return VEX_CONCAVE + elseif zero(T) >= d2f + return VEX_CONVEX + elseif isatomic(x) + return VEX_CONST + end + return VEX_NONDCP +end + +for F in (positive, negative, lower_bnd, upper_bnd, bnd, rad2deg, deg2rad) + @eval curvature(::typeof($F), x::Interval) = VEX_AFFINE +end + +curvature(::typeof(+), x::Interval, y::Interval, args...) = VEX_CONST +curvature(::typeof(+), x::Interval) = VEX_CONST + +curvature(::typeof(-), x::Interval, y::Interval) = VEX_AFFINE +curvature(::typeof(-), x::Interval) = VEX_AFFINE + +for F in (abs, abs2, exp, exp2, exp10, expm1, leaky_relu, xlogx, cosh) + @eval curvature(::typeof($F), x::Interval) = VEX_CONVEX +end + +for F in (log, log2, log10, log1p, acosh, sqrt) + @eval curvature(::typeof($F), x::Interval) = VEX_CONCAVE +end + +curvature(::typeof(sin), x::Interval) = curvature(-sin(x), x) +curvature(::typeof(cos), x::Interval) = curvature(-cos(x), x) +curvature(::typeof(tan), x::Interval) = curvature(2*tan(x)*sec(x)^2, x) + +for F in (asin, acot, sinh, csch, coth, atanh, acsch, xabsx, erfinv, erfc) + @eval function curvature(::typeof($F), x::Interval) + if x <= zero(T) + return VEX_CONCAVE + elseif x >= zero(T) + return VEX_CONVEX + end + return VEX_NONDCP + end +end + +for F in (acos, atan, cbrt, tanh, erf, erfcinv) + @eval function curvature(::typeof($F), x::Interval) + if x <= zero(T) + return VEX_CONVEX + elseif x >= zero(T) + return VEX_CONCAVE + end + return VEX_NONDCP + end +end + +function curvature(::typeof(xexpax), x::Interval{T}, a::Interval{T}) where T + if isatomic(a) + inflect_pnt = -2/a.lo + if inflect_pnt < x + return VEX_CONVEX + elseif inflect_pnt > x + return VEX_CONCAVE + end + end + return VEX_NONDCP +end + +# TODO: HANDLE ARGMAX/ARGMIN IS CONSTANT CASE -> VEXAFFINE CASE... (need z interval bounds?) +curvature(::typeof(max), x::Interval{T}, args...) = VEX_CONVEX +curvature(::typeof(min), x::Interval{T}, args...) = VEX_CONCAVE + +function curvature(::typeof(relu), x::Interval{T}) where T + if x >= zero(T) + return VEX_CONVEX + end + return VEX_CONST +end + +# TODO: sech +# TODO: maxtanh, pentanh, sigmoid, bisigmoid, softsign, softplus, elu diff --git a/src/eago_optimizer/functions/nonlinear/convexity/monotonicity.jl b/src/eago_optimizer/functions/nonlinear/convexity/monotonicity.jl new file mode 100644 index 00000000..0395b38f --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/convexity/monotonicity.jl @@ -0,0 +1,167 @@ +@enum(Monotonicity, MONO_CONST, MONO_NONINCR, MONO_NONDECR, MONO_NONE, MONO_UNSET) + +function *(m::Monotonicity, v::Vexity) + if m == MONO_NONDECR + return v + elseif m == MONO_NONINCR + return -v + elseif m == MONO_NONE + if v == VEX_CONVEX + return VEX_NONDCP + elseif v == VEX_CONCAVE + return VEX_NONDCP + else + return v + end + end + return VEX_NONDCP # TODO CHECK FALLBACK RETURN +end + +monotonicity(f, x::Interval) = error("monotonicity(f,x) for $f is not implemented.") + +function monotonicity(df::Interval{T}, x::Interval{T}) where T + if zero(T) <= df + return (MONO_NONDECR,) + elseif zero(T) >= df + return (MONO_NONINCR,) + end + return (MONO_NONE,) +end + +for F in (positive, negative, lower_bnd, upper_bnd, bnd) + @eval monotonicity(::typeof($F), x::Interval) = MONO_AFFINE +end + +function monotonicity(::typeof(+), x::Interval, y::Interval, args...) + ntuple(i -> MONO_NONDECR, length(args) + 2) +end +monotonicity(::typeof(+), x::Interval) = (MONO_NONDECR) + +monotonicity(::typeof(-), x::Interval, y::Interval) = (MONO_NONDECR, MONO_NONINCR) +monotonicity(::typeof(-), x::Interval) = (MONO_NONINCR,) + +function monotonicity(::typeof(/), x::Interval{T}, y::Interval{T}) where T + if v > zero(T) + return (MONO_AFFINE, MONO_NONINCR) + elseif v < zero(T) + return (MONO_AFFINE, MONO_NONDECR) + end + return (MONO_AFFINE, MONO_NONE) +end + +function monotonicity(::typeof(*), args...) + return ntuple(i -> prod_exclude(_sign, args, i)*MONO_NONDECR, length(args)) +end + +for F in (abs, abs2, cosh, sech) + @eval function monotonicity(::typeof($F), x::Interval{T}) where T + if zero(T) <= x + return (MONO_NONDECR,) + elseif zero(T) >= x + return (MONO_NONINCR,) + end + return (MONO_NONE,) + end +end + +for F in (exp, exp2, exp10, expm1, sinh, tanh, + log, log2, log10, log1p, + asin, atan, + erf, erfinv, + sqrt, cbrt, xabsx, + # rectifer & sigmoid activation functions + relu, leaky_relu, maxtanh, maxsig, pentanh, sigmoid, + bisigmoid, softsign, softplus, elu) + @eval monotonicity(::typeof($F), x::Interval) = (MONO_NONDECR,) +end + +for F in (acos, acot, erfc, erfcinv) + @eval monotonicity(::typeof($F), x::Interval) = (MONO_NONINCR,) +end + +for F in (coth, csch) + @eval function monotonicity(::typeof($F), x::Interval{T}) where T + if zero(T) <= x + return (MONO_NONINCR,) + elseif zero(T) >= x + return (MONO_NONDECR,) + end + return (MONO_NONE,) + end +end + +monotonicity(::typeof(sin), x::Interval) = monotonicity(cos(x), x) +monotonicity(::typeof(cos), x::Interval) = monotonicity(sin(x), x) +monotonicity(::typeof(tan), x::Interval) = monotonicity(sec(x)^2, x) + +function monotonicity(::typeof(asec), x::Interval{T}) where T + if one(T) <= x + return (MONO_NONDECR,) + elseif -one(T) >= x + return (MONO_NONINCR,) + end + return (MONO_NONE,) +end + +function monotonicity(::typeof(acsc), x::Interval{T}) where T + if one(T) <= x + return (MONO_NONINCR,) + elseif -one(T) >= x + return (MONO_NONDECR,) + end + return (MONO_NONE,) +end + +function monotonicity(::typeof(xlogx), x::Interval{T}) where T + if one(T)/MathConstants.e <= x + return (MONO_NONDECR,) + elseif one(T)/MathConstants.e >= x + return (MONO_NONINCR,) + end + return (MONO_NONE,) +end + +function monotonicity(::typeof(xexpax), x::Interval{T}, a::Interval{T}) where T + # TODO: + return (MONO_NONE,) +end + +# arity 1 implies EXPRESSION OR SELECT +function monotonicity(::Val{1}, n::NodeType, atype::AtomType, bnds::Interval) + if n == SELECT + error("Not implemented yet...") + end + if n == EXPRESSION + # binary_switch on univariate atype + end +end + +# arity >= 2 implies EXPRESSION +function monotonicity(n::NodeType, atype::AtomType, bnds::Interval, i::Int) + # binary_switch... atype +end + +function populate_monotonicity!(n, buffer, indx) + mono = _mono(buffer, indx) + if _arity(n) == 1 + @inbounds mono[1] = monotonicity(Val(1), + _node_type(n), + _atom_type(n), + _bnd(buffer, _child_id(buffer, indx))) + else + unsafe_map!(monotonicity, mono, n, _arity(n)) + end + return nothing +end + +#= +Gets monotonicity of buffer at i. Recalling from a cached value if +discovered or locked. Otherwise, populating the mono[i] field. +=# +function monotonicity(buffer::ConvexityBuffer, indx) + if _discovered(buffer, indx) + return _mono(buffer, indx) + end + populate_monotonicity!(_node(dag, indx), buffer, indx) + return _mono(buffer, i) +end diff --git a/src/eago_optimizer/functions/nonlinear/convexity/variable.jl b/src/eago_optimizer/functions/nonlinear/convexity/variable.jl new file mode 100644 index 00000000..8ac3bb60 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/convexity/variable.jl @@ -0,0 +1,15 @@ + +# TODO: NEED VARIABLE SUBTYPES... + +is_convex(::typeof(VARIABLE), x::Interval{T}) where T = true +is_concave(::typeof(VARIABLE), x::Interval{T}) where T = true + +is_loglogconvex(::typeof(VARIABLE), x::Interval{T}) where T = true +is_loglogconcave(::typeof(VARIABLE), x::Interval{T}) where T = true + +is_increasing(::typeof(VARIABLE), x::Interval{T}) where T = true +is_decreasing(::typeof(VARIABLE), x::Interval{T}) where T = false + +is_positive(::typeof(VARIABLE), x::Interval{T}) where T = x >= zero(T) +is_negative(::typeof(VARIABLE), x::Interval{T}) where T = x <= zero(T) +is_locked(::typeof(VARIABLE), x::Interval{T}) where T = one(T) ∉ x diff --git a/src/eago_optimizer/functions/nonlinear/empty_evaluator.jl b/src/eago_optimizer/functions/nonlinear/empty_evaluator.jl deleted file mode 100644 index e634f68a..00000000 --- a/src/eago_optimizer/functions/nonlinear/empty_evaluator.jl +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# Defines the EmptyNLPEvaluator needed to provide a constructor for -# the EAGO.Optimizer prior to loading JuMP's NLPEvaluator. -############################################################################# - -struct EmptyNLPEvaluator <: MOI.AbstractNLPEvaluator - _current_node::NodeBB - has_nlobj::Bool -end -EmptyNLPEvaluator() = EmptyNLPEvaluator(NodeBB(),false) -set_current_node!(x::EmptyNLPEvaluator, n::NodeBB) = () - -MOI.features_available(::EmptyNLPEvaluator) = [:Grad, :Jac, :Hess] -MOI.initialize(::EmptyNLPEvaluator, features) = nothing -MOI.eval_objective(::EmptyNLPEvaluator, x) = NaN -function MOI.eval_constraint(::EmptyNLPEvaluator, g, x) - @assert length(g) == 0 - return -end -MOI.eval_objective_gradient(::EmptyNLPEvaluator, g, x) = nothing -MOI.jacobian_structure(::EmptyNLPEvaluator) = Tuple{Int64,Int64}[] -MOI.hessian_lagrangian_structure(::EmptyNLPEvaluator) = Tuple{Int64,Int64}[] -function MOI.eval_constraint_jacobian(::EmptyNLPEvaluator, J, x) - @assert length(J) == 0 - return -end -function MOI.eval_hessian_lagrangian(::EmptyNLPEvaluator, H, x, σ, μ) - @assert length(H) == 0 - return -end - -empty_nlp_data() = MOI.NLPBlockData([], EmptyNLPEvaluator(), false) diff --git a/src/eago_optimizer/functions/nonlinear/forward_pass.jl b/src/eago_optimizer/functions/nonlinear/forward_pass.jl deleted file mode 100644 index 52a994d3..00000000 --- a/src/eago_optimizer/functions/nonlinear/forward_pass.jl +++ /dev/null @@ -1,1120 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/evaluator/passes.jl -# Functions used to compute forward pass of nonlinear functions which include: -# set_value_post, overwrite_or_intersect, forward_pass_kernel, associated blocks -############################################################################# - -const FORWARD_DEBUG = false - -""" -$(FUNCTIONNAME) - -Post process set_value operator. By default, performs the affine interval cut on -a MC structure. -""" -function set_value_post(x_values::Vector{Float64}, val::MC{N,T}, lower_variable_bounds::Vector{Float64}, - upper_variable_bounds::Vector{Float64}, sparsity::Vector{Int}, - subgrad_tol::Float64) where {N, T<:RelaxTag} - - lower = val.cv - upper = val.cc - lower_refinement = true - upper_refinement = true - - for i = 1:N - - cv_val = @inbounds val.cv_grad[i] - cc_val = @inbounds val.cc_grad[i] - - i_sol = @inbounds sparsity[i] - x_val = @inbounds x_values[i_sol] - lower_bound = @inbounds lower_variable_bounds[i_sol] - upper_bound = @inbounds upper_variable_bounds[i_sol] - - if lower_refinement - if cv_val > 0.0 - if isinf(lower_bound) - !upper_refinement && break - lower_refinement = false - else - lower += cv_val*(lower_bound - x_val) - #delX = sub_round(lower_bound, x_val, RoundDown) - #lower = add_round(lower, mul_round(cv_val, delX, RoundDown), RoundDown) - end - else - if isinf(upper_bound) - !upper_refinement && break - lower_refinement = false - else - lower += cv_val*(upper_bound - x_val) - #delX = sub_round(upper_bound, x_val, RoundUp) - #lower = add_round(lower, mul_round(cv_val, delX, RoundDown), RoundDown) - end - end - end - - if upper_refinement - if cc_val > 0.0 - if isinf(lower_bound) - !lower_refinement && break - upper_refinement = false - else - upper += cc_val*(upper_bound - x_val) - #delX = sub_round(upper_bound, x_val, RoundUp) - #upper = add_round(upper, mul_round(cc_val, delX, RoundUp), RoundUp) - end - else - if isinf(upper_bound) - !lower_refinement && break - upper_refinement = false - else - upper += cc_val*(lower_bound - x_val) - #delX = sub_round(lower_bound, x_val, RoundDown) - #upper = add_round(upper, mul_round(cc_val, delX, RoundUp), RoundUp) - end - end - end - end - - if lower_refinement && (val.Intv.lo + subgrad_tol > lower) - lower = val.Intv.lo - elseif !lower_refinement - lower = val.Intv.lo - else - lower -= subgrad_tol #sub_round(lower, subgrad_tol, RoundDown) - end - - if upper_refinement && (val.Intv.hi - subgrad_tol < upper) - upper = val.Intv.hi - elseif !upper_refinement - upper = val.Intv.hi - else - upper += subgrad_tol #add_round(upper, subgrad_tol, RoundUp) - end - - return MC{N,T}(val.cv, val.cc, Interval{Float64}(lower, upper), val.cv_grad, val.cc_grad, val.cnst) -end - -""" -$(FUNCTIONNAME) - -Intersects the new set valued operator with the prior and performs affine bound tightening - -- First forward pass: `is_post` should be set by user option, `is_intersect` should be false - so that the tape overwrites existing values, and the `interval_intersect` flag could be set - to either value. -- Forward CP pass (assumes same reference point): `is_post` should be set by user option, - `is_intersect` should be true so that the tape intersects with existing values, and the - `interval_intersect` flag should be false. -- Forward CP pass (assumes same reference point): `is_post` should be set by user option, - `is_intersect` should be true so that the tape intersects with existing values, and the - `interval_intersect` flag should be false. -- Subsequent forward passes at new points: is_post` should be set by user option, - `is_intersect` should be true so that the tape intersects with existing values, and the - `interval_intersect` flag should be `true` as predetermined interval bounds are valid but - the prior values may correspond to different points of evaluation. -""" -function overwrite_or_intersect(xMC::MC{N,T}, past_xMC::MC{N,T}, x::Vector{Float64}, lbd::Vector{Float64}, - ubd::Vector{Float64}, subgrad_tol::Float64, sparsity::Vector{Int}, is_post::Bool, - is_intersect::Bool, - interval_intersect::Bool) where {N,T<:RelaxTag} - - if is_post && is_intersect && interval_intersect - return set_value_post(x, xMC ∩ past_xMC.Intv, lbd, ubd, sparsity, subgrad_tol) - - elseif is_post && is_intersect && !interval_intersect - return set_value_post(x, xMC ∩ past_xMC, lbd, ubd, sparsity, subgrad_tol) - - elseif is_post && !is_intersect - return set_value_post(x, xMC, lbd, ubd, sparsity, subgrad_tol) - - elseif !is_post && is_intersect && interval_intersect - return xMC ∩ past_xMC.Intv - - elseif !is_post && is_intersect && !interval_intersect - return xMC ∩ past_xMC - - end - return xMC -end - -""" -$(FUNCTIONNAME) - -Updates storage tapes with forward evalution of node representing `n = x + y`. -""" -function forward_plus_binary!(k::Int64, children_arr::Vector{Int64}, children_idx::UnitRange{Int64}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N,T}}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, subgrad_tol::Float64, - sparsity::Vector{Int}, - is_post::Bool, is_intersect::Bool, is_first_eval::Bool, interval_intersect::Bool) where {N,T<:RelaxTag} - - # get row indices - idx1 = first(children_idx) - idx2 = last(children_idx) - - # extract values for argument 1 - arg1_index = children_arr[idx1] - arg1_is_number = numvalued[arg1_index] - if arg1_is_number - set1 = zero(MC{N,T}) - num1 = numberstorage[arg1_index] - else - num1 = 0.0 - set1 = setstorage[arg1_index] - end - - # extract values for argument 2 - arg2_index = children_arr[idx2] - arg2_is_number = numvalued[arg2_index] - if arg2_is_number - num2 = numberstorage[arg2_index] - set2 = zero(MC{N,T}) - else - set2 = setstorage[arg2_index] - num2 = 0.0 - end - - output_is_number = arg1_is_number && arg2_is_number - - # a + b - if output_is_number - numberstorage[k] = num1 + num2 - - # x + b - elseif !arg1_is_number && arg2_is_number - outset = set1 + num2 - # is_first_eval ? (set1 + num2) : plus_kernel(set1, num2, setstorage[k].Intv) - - # a + y - elseif arg1_is_number && !arg2_is_number - outset = num1 + set2 - # is_first_eval ? (num1 + set2) : plus_kernel(num1, set2, setstorage[k].Intv) - - # x + y - else - outset = set1 + set2 - # is_first_eval ? (set1 + set2) : plus_kernel(set1, set2, setstorage[k].Intv) - - end - - numvalued[k] = output_is_number - if !output_is_number - setstorage[k] = overwrite_or_intersect(outset, setstorage[k], x, lbd, ubd, subgrad_tol, sparsity, false, - is_intersect, interval_intersect) - end - - return nothing -end - -""" -$(FUNCTIONNAME) - -Updates storage tapes with forward evalution of node representing `n = +(x, y, z,...)`. -""" -function forward_plus_narity!(k::Int64, children_arr::Vector{Int64}, children_idx::UnitRange{Int64}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N,T}}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, - subgrad_tol::Float64, sparsity::Vector{Int}, - is_post::Bool, is_intersect::Bool, interval_intersect::Bool) where {N,T<:RelaxTag} - - - # get row indices - idx = first(children_idx) - - # extract values for argument 1 - arg_index = children_arr[idx] - output_is_number = numvalued[arg_index] - if output_is_number - tmp_set = zero(MC{N,T}) - tmp_num = numberstorage[arg_index] - else - tmp_num = 0.0 - tmp_set = setstorage[arg_index] - end - output_is_number = true - - for idx = 2:length(children_idx) - cidx = children_idx[idx] - arg_index = children_arr[cidx] - arg_is_number = numvalued[arg_index] - if arg_is_number - tmp_num += numberstorage[arg_index] - else - tmp_set += setstorage[arg_index] - end - output_is_number &= arg_is_number - end - - numvalued[k] = output_is_number - if output_is_number - numberstorage[k] = tmp_num - else - tmp_set += tmp_num - setstorage[k] = overwrite_or_intersect(tmp_set, setstorage[k], x, lbd, ubd, subgrad_tol, sparsity, false, is_intersect, - interval_intersect) - end - - return nothing -end - -""" -$(FUNCTIONNAME) - -Updates storage tapes with forward evalution for node representing `n = x*y`. -""" -function forward_multiply_binary!(k::Int64, children_arr::Vector{Int64}, children_idx::UnitRange{Int64}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N,T}}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, subgrad_tol::Float64, - sparsity::Vector{Int}, - is_post::Bool, is_intersect::Bool, is_first_eval::Bool, interval_intersect::Bool) where {N,T<:RelaxTag} - # get row indices - idx1 = first(children_idx) - idx2 = last(children_idx) - - # extract values for argument 1 - arg1_index = children_arr[idx1] - arg1_is_number = numvalued[arg1_index] - if arg1_is_number - set1 = one(MC{N,T}) - num1 = numberstorage[arg1_index] - else - num1 = 1.0 - set1 = setstorage[arg1_index] - end - - # extract values for argument 2 - arg2_index = children_arr[idx2] - arg2_is_number = numvalued[arg2_index] - if arg2_is_number - num2 = numberstorage[arg2_index] - set2 = one(MC{N,T}) - else - set2 = setstorage[arg2_index] - num2 = 1.0 - end - - output_is_number = arg1_is_number && arg2_is_number - - # a * b - if output_is_number - numberstorage[k] = num1 * num2 - - # x * b - elseif !arg1_is_number && arg2_is_number - outset = set1*num2 #is_first_eval ? (set1 * num2) : mult_kernel(set1, num2, setstorage[k].Intv) - - # a * y - elseif arg1_is_number && !arg2_is_number - outset = num1*set2 #is_first_eval ? (num1 * set2) : mult_kernel(set2, num1, setstorage[k].Intv) - - # x * y - else - outset = set1*set2 #is_first_eval ? (set1 * set2) : mult_kernel(set1, set2, setstorage[k].Intv) - - end - - numvalued[k] = output_is_number - if !output_is_number - setstorage[k] = overwrite_or_intersect(outset, setstorage[k], x, lbd, ubd, subgrad_tol, sparsity, is_post, is_intersect, - interval_intersect) - end - - return nothing -end - - -""" -$(FUNCTIONNAME) - -Updates storage tapes with forward evalution of node representing `n = *(x, y, z,...)`. -""" -function forward_multiply_narity!(k::Int64, children_arr::Vector{Int64}, children_idx::UnitRange{Int64}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N,T}}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, subgrad_tol::Float64, - sparsity::Vector{Int}, - is_post::Bool, is_intersect::Bool, interval_intersect::Bool) where {N,T<:RelaxTag} - # get row indices - idx = first(children_idx) - - # extract values for argument 1 - arg_index = children_arr[idx] - output_is_number = numvalued[arg_index] - if output_is_number - tmp_set = 1.0#one(MC{N,T}) - tmp_num = numberstorage[arg_index] - else - tmp_num = 1.0 - tmp_set = setstorage[arg_index] - end - - - for idx = 2:length(children_idx) - cidx = children_idx[idx] - arg_index_t = children_arr[cidx] - arg_is_number_t = numvalued[arg_index_t] - if arg_is_number_t - tmp_num = tmp_num*numberstorage[arg_index_t] - else - tmp_set = tmp_set*setstorage[arg_index_t] - end - output_is_number &= arg_is_number_t - end - - numvalued[k] = output_is_number - if output_is_number - numberstorage[k] = tmp_num - else - tmp_set *= tmp_num - setstorage[k] = overwrite_or_intersect(tmp_set, setstorage[k], x, lbd, ubd, subgrad_tol, sparsity, is_post, is_intersect, - interval_intersect) - end - - return nothing -end - -""" -$(FUNCTIONNAME) - -Updates storage tapes with forward evalution for node representing `n = x-y`. -""" -function forward_minus!(k::Int64, children_arr::Vector{Int64}, children_idx::UnitRange{Int64}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N,T}}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, subgrad_tol::Float64, - sparsity::Vector{Int}, - is_post::Bool, is_intersect::Bool, is_first_eval::Bool, interval_intersect::Bool) where {N,T<:RelaxTag} - - # get row indices - idx1 = first(children_idx) - idx2 = last(children_idx) - - # extract values for argument 1 - arg1_index = children_arr[idx1] - arg1_is_number = numvalued[arg1_index] - if arg1_is_number - set1 = zero(MC{N,T}) - num1 = numberstorage[arg1_index] - else - num1 = 0.0 - set1 = setstorage[arg1_index] - end - - # extract values for argument 2 - arg2_index = children_arr[idx2] - arg2_is_number = numvalued[arg2_index] - if arg2_is_number - num2 = numberstorage[arg2_index] - set2 = zero(MC{N,T}) - else - set2 = setstorage[arg2_index] - num2 = 0.0 - end - - output_is_number = arg1_is_number && arg2_is_number - - # a - b - if output_is_number - numberstorage[k] = num1 - num2 - - # x - b - elseif !arg1_is_number && arg2_is_number - outset = set1 - num2 #is_first_eval ? (set1 - num2) : minus_kernel(set1, num2, setstorage[k].Intv) - - # a - y - elseif arg1_is_number && !arg2_is_number - outset = num1 - set2 #is_first_eval ? (num1 - set2) : minus_kernel(num1, set2, setstorage[k].Intv) - - # x - y - else - outset = set1 - set2 #is_first_eval ? (set1 - set2) : minus_kernel(set1, set2, setstorage[k].Intv) - - end - - numvalued[k] = output_is_number - if !output_is_number - setstorage[k] = overwrite_or_intersect(outset, setstorage[k], x, lbd, ubd, subgrad_tol, sparsity, is_post, is_intersect, - interval_intersect) - end - - return nothing -end - -""" -$(FUNCTIONNAME) - -Updates storage tapes with forward evalution for node representing `n = x^y`. -""" -function forward_power!(k::Int64, children_arr::Vector{Int64}, children_idx::UnitRange{Int64}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N,T}}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, - subgrad_tol::Float64, sparsity::Vector{Int}, - is_post::Bool, is_intersect::Bool, is_first_eval::Bool, interval_intersect::Bool, - ctx::GuardCtx) where {N,T<:RelaxTag} - - # get row indices - idx1 = first(children_idx) - idx2 = last(children_idx) - - # extract values for argument 1 - arg1_index = children_arr[idx1] - arg1_is_number = numvalued[arg1_index] - if arg1_is_number - set1 = zero(MC{N,T}) - num1 = numberstorage[arg1_index] - else - num1 = 0.0 - set1 = setstorage[arg1_index] - end - - # extract values for argument 2 - arg2_index = children_arr[idx2] - arg2_is_number = numvalued[arg2_index] - if arg2_is_number - num2 = numberstorage[arg2_index] - set2 = zero(MC{N,T}) - else - set2 = setstorage[arg2_index] - num2 = 0.0 - end - - # is output a number (by closure of the reals) - output_is_number = arg1_is_number && arg2_is_number - numvalued[k] = output_is_number - - # x^1 = x - if arg2_is_number && (num2 == 1.0) - if arg1_is_number - numberstorage[k] = num1 - else - setstorage[k] = set1 - end - return nothing - - # x^0 = 1 - elseif arg2_is_number && (num2 == 0.0) - if arg1_is_number - numberstorage[k] = 1.0 - else - setstorage[k] = zero(MC{N,T}) - end - return nothing - - else - # a^b - if arg1_is_number && arg2_is_number - numberstorage[k] = num1^num2 - - # x^b - elseif !arg1_is_number && arg2_is_number - outset = set1^num2 - #is_first_eval ? pow(set1, num2) : ^(set1, num2, setstorage[k].Intv) - - # a^y - elseif arg1_is_number && !arg2_is_number - guard_on = ctx.metadata.guard_on - outset = guard_on ? overdub(ctx, ^, num1, set2) : num1^set2 # overdub(ctx, pow, num1, set2) - #is_first_eval ? overdub(ctx, pow, num1, set2) : overdub(ctx, ^, num1, set2, setstorage[k].Intv) - - # x^y - elseif !arg1_is_number && !arg2_is_number - guard_on = ctx.metadata.guard_on - outset = guard_on ? overdub(ctx, ^, set1, set2) : set1^set2 - #is_first_eval ? overdub(ctx, pow, set1, set2) : overdub(ctx, ^, set1, set2, setstorage[k].Intv) - - end - end - - if !output_is_number - setstorage[k] = overwrite_or_intersect(outset, setstorage[k], x, lbd, ubd, subgrad_tol, sparsity, is_post, is_intersect, - interval_intersect) - end - - return nothing -end - -""" -$(FUNCTIONNAME) - -Updates storage tapes with forward evalution for node representing `n = x/y`. -""" -function forward_divide!(k::Int64, children_arr::Vector{Int64}, children_idx::UnitRange{Int64}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N,T}}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, subgrad_tol::Float64, - sparsity::Vector{Int}, - is_post::Bool, is_intersect::Bool, is_first_eval::Bool, interval_intersect::Bool, - ctx::GuardCtx) where {N,T<:RelaxTag} - - # get row indices - idx1 = first(children_idx) - idx2 = last(children_idx) - - # extract values for argument 1 - arg1_index = children_arr[idx1] - arg1_is_number = numvalued[arg1_index] - if arg1_is_number - set1 = zero(MC{N,T}) - num1 = numberstorage[arg1_index] - else - num1 = 0.0 - set1 = setstorage[arg1_index] - end - - # extract values for argument 2 - arg2_index = children_arr[idx2] - arg2_is_number = numvalued[arg2_index] - if arg2_is_number - num2 = numberstorage[arg2_index] - set2 = zero(MC{N,T}) - else - set2 = setstorage[arg2_index] - num2 = 0.0 - end - - # is output a number (by closure of the reals)? - output_is_number = arg1_is_number && arg2_is_number - numvalued[k] = output_is_number - - # a/b - if output_is_number - numberstorage[k] = num1/num2 - - # x/b - elseif !arg1_is_number && arg2_is_number - - guard_on = ctx.metadata.guard_on - if guard_on - outset = Cassette.overdub(ctx, /, set1, num2) - else - outset = set1/num2 - end - # is_first_eval ? set1/num2 : div_kernel(set1, num2, setstorage[k].Intv) - - # a/y - elseif arg1_is_number && !arg2_is_number - - guard_on = ctx.metadata.guard_on - if guard_on - outset = Cassette.overdub(ctx, /, num1, set2) - else - outset = num1/set2 - end - # is_first_eval ? num1/set2 : div_kernel(num1, set2, setstorage[k].Intv) - - # x/y - else - - guard_on = ctx.metadata.guard_on - if guard_on - outset = Cassette.overdub(ctx, /, set1, set2) - else - outset = set1/set2 - end - # is_first_eval ? set1/set2 : div_kernel(set1, set2, setstorage[k].Intv) - - end - - if !output_is_number - setstorage[k] = overwrite_or_intersect(outset, setstorage[k], x, lbd, ubd, subgrad_tol, sparsity, is_post, - is_intersect, interval_intersect) - end - - return nothing -end - -""" -$(FUNCTIONNAME) - -Updates storage tapes with forward evalution for node representing `n = user_f(x, y...)`. -""" -function forward_user_multivariate!(k::Int64, op::Int64, children_arr::Vector{Int64}, children_idx::UnitRange{Int64}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N,T}}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, subgrad_tol::Float64, - sparsity::Vector{Int}, - is_post::Bool, is_intersect::Bool, interval_intersect::Bool, ctx::GuardCtx, - user_operators::JuMP._Derivatives.UserOperatorRegistry, - num_mv_buffer::Vector{Float64}) where {N, T<:RelaxTag} - - n = length(children_idx) - evaluator = user_operators.multivariate_operator_evaluator[op - JuMP._Derivatives.USER_OPERATOR_ID_START + 1] - set_input = zeros(MC{N,T}, n) - num_input = view(num_mv_buffer, 1:n) - fill!(num_input, -Inf) - - buffer_count = 1 - output_is_number = true - for c_idx in children_idx - arg_index = children_arr[c_idx] - arg_is_number = numvalued[arg_index] - if arg_is_number - num_input[buffer_count] = numberstorage[arg_index] - else - set_input[buffer_count] = setstorage[arg_index] - end - output_is_number &= arg_is_number - buffer_count += 1 - end - - if output_is_number - numberstorage[k] = MOI.eval_objective(evaluator, num_input) - else - for i = 1:(buffer_count - 1) - if !isinf(num_input[i]) - set_input[buffer_count] = MC{N,T}(num_input[buffer_count]) - end - end - guard_on = ctx.metadata.guard_on - if guard_on - outset = Cassette.overdub(ctx, MOI.eval_objective, evaluator, set_input) - else - outset = MOI.eval_objective(evaluator, set_input) - end - setstorage[k] = overwrite_or_intersect(outset, setstorage[k], x, lbd, ubd, subgrad_tol, sparsity, is_post, is_intersect, - interval_intersect) - end - numvalued[k] = output_is_number - - return nothing -end - -""" -$(FUNCTIONNAME) - -Updates storage tapes with forward evalution for node representing `n = f(c)` where f is standard function -and `c` is a number. -""" -function forward_univariate_number!(k::Int64, op::Int64, arg_idx::Int, numvalued::Vector{Bool}, numberstorage::Vector{Float64}) - - tmp_num = numberstorage[arg_idx] - outnum = eval_univariate_set(op, tmp_num) - - numberstorage[k] = outnum - numvalued[k] = true - - return nothing -end - -""" -$(FUNCTIONNAME) - -Updates storage tapes with forward evalution for node representing `n = f(x)` where f is standard function -that requires a single tiepoint calculation per convex/concave relaxation (e.g. tan). -""" -function forward_univariate_tiepnt_1!(k::Int64, op::Int64, child_idx::Int64, setstorage::Vector{V}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, subgrad_tol::Float64, - sparsity::Vector{Int}, - tpdict::Dict{Int64, Tuple{Int64,Int64,Int64,Int64}}, - tp1storage::Vector{Float64}, tp2storage::Vector{Float64}, - is_post::Bool, is_intersect::Bool, is_first_eval::Bool, interval_intersect::Bool, ctx::GuardCtx) where V - - tmp_set = setstorage[child_idx] - - tidx1, tidx2 = tpdict[k] - tp1 = tp1storage[tidx1] - tp2 = tp2storage[tidx1] - new_tie_points = tp1 === Inf - - guard_on = ctx.metadata.guard_on - if guard_on - outset, tp1, tp2 = Cassette.overdub(ctx, single_tp_set, op, tmp_set, setstorage[k], tp1, tp2, is_first_eval) - else - outset, tp1, tp2 = single_tp_set(op, tmp_set, setstorage[k], tp1, tp2, is_first_eval) - end - - if new_tie_points - tp1storage[tidx1] = tp1 - tp2storage[tidx1] = tp2 - end - - setstorage[k] = overwrite_or_intersect(outset, setstorage[k], x, lbd, ubd, subgrad_tol, sparsity, is_post, is_intersect, - interval_intersect) - return nothing -end - -""" -$(FUNCTIONNAME) - -Updates storage tapes with forward evalution for node representing `n = f(x)` where f is standard function -that requires a two tiepoint calculations per convex/concave relaxation (e.g. sin). -""" -function forward_univariate_tiepnt_2!(k::Int64, op::Int64, child_idx::Int64, setstorage::Vector{V}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, - subgrad_tol::Float64, sparsity::Vector{Int}, - tpdict::Dict{Int64, Tuple{Int64,Int64,Int64,Int64}}, - tp1storage::Vector{Float64}, tp2storage::Vector{Float64}, - tp3storage::Vector{Float64}, tp4storage::Vector{Float64}, - is_post::Bool, is_intersect::Bool, is_first_eval::Bool, - interval_intersect::Bool, ctx::GuardCtx) where V - - tmp_set = setstorage[child_idx] - - # retreive previously calculated tie-points - # These are re-initialize to Inf for each box - tidx1, tidx2 = tpdict[k] - tp1 = tp1storage[tidx1] - tp2 = tp2storage[tidx1] - tp3 = tp3storage[tidx2] - tp4 = tp4storage[tidx2] - - new_tie_points = tp1 === Inf - - # Perform an evaluation of the univariate function overdubbed with Cassette.jl - guard_on = ctx.metadata.guard_on - if guard_on - outset, tp1, tp2, tp3, tp4 = Cassette.overdub(ctx, double_tp_set, op, tmp_set, setstorage[k], - tp1, tp2, tp3, tp4, is_first_eval) - else - outset, tp1, tp2, tp3, tp4 = double_tp_set(op, tmp_set, setstorage[k], tp1, tp2, tp3, tp4, is_first_eval) - end - #Cassette.overdub(ctx, double_tp_set, op, tmp_set, setstorage[k], tp1, tp2, tp3, tp4, is_first_eval) - - # Store new tiepoints if new evaluation - if new_tie_points - tp1storage[tidx1] = tp1 - tp2storage[tidx1] = tp2 - tp3storage[tidx2] = tp3 - tp4storage[tidx2] = tp4 - end - - setstorage[k] = overwrite_or_intersect(outset, setstorage[k], x, lbd, ubd, subgrad_tol, sparsity, is_post, is_intersect, - interval_intersect) - return nothing -end - -""" -$(FUNCTIONNAME) - -Updates storage tapes with forward evalution for node representing `n = user_f(x)`. -""" -function forward_univariate_user!(k::Int64, op::Int64, child_idx::Int64, arg_is_number::Bool, - setstorage::Vector{V}, x::Vector{Float64}, lbd::Vector{Float64}, - ubd::Vector{Float64}, subgrad_tol::Float64, sparsity::Vector{Int}, - is_post::Bool, is_intersect::Bool, is_first_eval::Bool, - interval_intersect::Bool, ctx::GuardCtx, user_operators) where V - - userop = op - JuMP._Derivatives.USER_UNIVAR_OPERATOR_ID_START + 1 - f = user_operators.univariate_operator_f[userop] - - if arg_is_number - tmp_num = setstorage[child_idx] - outnum = f(tmp_num) - numberstorage[k] = outnum - - else - tmp_set = setstorage[child_idx] - - guard_on = ctx.metadata.guard_on - if guard_on - outset = Cassette.overdub(ctx, f, tmp_set) - else - outset = f(tmp_set) - end - - setstorage[k] = overwrite_or_intersect(outset, setstorage[k], x, lbd, ubd, subgrad_tol, sparsity, - is_post, is_intersect, interval_intersect) - end - - return nothing -end - -""" -$(FUNCTIONNAME) - -Updates storage tapes with forward evalution for node representing `n = f(x)` where `f` is a standard function -that does not require a tiepoint evaluation (e.g. exp). -""" -function forward_univariate_other!(k::Int64, op::Int64, child_idx::Int64, setstorage::Vector{V}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, - subgrad_tol::Float64, sparsity::Vector{Int}, is_post::Bool, - is_intersect::Bool, is_first_eval::Bool, interval_intersect::Bool, - ctx::GuardCtx) where V - - tmp_set = setstorage[child_idx] - - guard_on = ctx.metadata.guard_on - if guard_on - outset = Cassette.overdub(ctx, eval_univariate_set, op, tmp_set) - else - outset = eval_univariate_set(op, tmp_set) - end - setstorage[k] = overwrite_or_intersect(outset, setstorage[k], x, lbd, ubd, subgrad_tol, sparsity, - is_post, is_intersect, interval_intersect) - - return nothing -end - -""" -$(FUNCTIONNAME) - -""" -function expand_set(::Type{MC{N2,T}}, x::MC{N1,T}, fsparse::Vector{Int64}, - subsparse::Vector{Int64}, cv_buffer::Vector{Float64}, - cc_buffer::Vector{Float64}) where {N1 ,N2, T<:RelaxTag} - - # TODO: Perform this in a manner that does not allocate via an ntuple - # operator or via generated code - cvg = x.cv_grad - ccg = x.cc_grad - xcount = 1 - xcurrent = subsparse[1] - for i = 1:N2 - if fsparse[i] === xcurrent - cv_buffer[i] = cvg[xcount] - cc_buffer[i] = ccg[xcount] - xcount += 1 - if xcount <= N1 - xcurrent = subsparse[xcount] - else - break - end - else - cv_buffer[i] = 0.0 - end - end - cv_grad = SVector{N2,Float64}(cv_buffer) - cc_grad = SVector{N2,Float64}(cc_buffer) - return MC{N2,T}(x.cv, x.cc, x.Intv, cv_grad, cc_grad, x.cnst) -end - -""" -$(FUNCTIONNAME) - -Unpacks the `MC{N1,T}` in the subexpression to a `MC{N2,T}` where `N1` is the sparsity of the -subexpression and `N2` is the sparsity of the function tape. Note that the sparsity of the -subexpression is less than the sparsity of the function itself. This limits the storage -required by tapes but prevents reverse mode subgradient propagation through expressions -[This may be subject to change in the future once the reverse mode propagation becomes -more robust]. -""" -function forward_get_subexpression!(k::Int64, active_subexpr::NonlinearExpression{MC{N,T}}, - setstorage::Vector{MC{Q,T}}, numberstorage::Vector{Float64}, - numvalued::Vector{Bool}, fsparsity::Vector{Int64}, - cv_buffer::Vector{Float64}, cc_buffer::Vector{Float64}) where {N,Q,T<:RelaxTag} - - is_number = active_subexpr.isnumber[1] - if is_number - numberstorage[k] = active_subexpr.setstorage[1] - else - set_arg = active_subexpr.setstorage[1] - setstorage[k] = expand_set(MC{Q,T}, set_arg, fsparsity, active_subexpr.grad_sparsity, cv_buffer, cc_buffer) - end - numvalued[k] = is_number - - return nothing -end - -const id_to_operator = Dict(value => key for (key, value) in JuMP.univariate_operator_to_id) - -""" -$(FUNCTIONNAME) - -Performs a forward pass using the tape information passed as arguments. Each variety of node calls an associated -forward_xxx function where xxx is a descriptor. -""" -function forward_pass_kernel!(nd::Vector{JuMP.NodeData}, adj::SparseMatrixCSC{Bool,Int64}, x::Vector{Float64}, - lbd::Vector{Float64}, ubd::Vector{Float64}, sparsity::Vector{Int}, - setstorage::Vector{MC{N,T}}, numberstorage::Vector{Float64}, numvalued::Vector{Bool}, - tpdict::Dict{Int64,Tuple{Int64,Int64,Int64,Int64}}, tp1storage::Vector{Float64}, - tp2storage::Vector{Float64}, tp3storage::Vector{Float64}, tp4storage::Vector{Float64}, - user_operators::JuMP._Derivatives.UserOperatorRegistry, subexpressions::Vector{NonlinearExpression}, - func_sparsity::Vector{Int64}, reverse_sparsity::Vector{Int64}, - num_mv_buffer::Vector{Float64}, ctx::GuardCtx, - is_post::Bool, is_intersect::Bool, is_first_eval::Bool, interval_intersect::Bool, - cv_buffer::Vector{Float64}, cc_buffer::Vector{Float64}, - treat_x_as_number::Vector{Bool}, subgrad_tol::Float64) where {N, T<:RelaxTag} - - children_arr = rowvals(adj) - - FORWARD_DEBUG && println(" ") - for k = length(nd):-1:1 - - oldset = setstorage[k] - nod = nd[k] - op = nod.index - - if nod.nodetype == JuMP._Derivatives.VALUE - numvalued[k] = true - FORWARD_DEBUG && println("value[$op] at k = $k -> $(numberstorage[k])") - - elseif nod.nodetype == JuMP._Derivatives.PARAMETER - numvalued[k] = true - FORWARD_DEBUG && println("parameter[$op] at k = $k -> $(numberstorage[k])") - - elseif nod.nodetype == JuMP._Derivatives.VARIABLE - isa_number = treat_x_as_number[op] - numvalued[k] = isa_number - xval = x[op] - if isa_number - numberstorage[k] = xval - else - seed_index = reverse_sparsity[op] - seed_grad = seed_gradient(seed_index, Val{N}()) - lower_bnd_val = lbd[op] - upper_bnd_val = ubd[op] - xcv_eps = xval - (xval - lower_bnd_val)*1E-8 - xcc_eps = xval + (upper_bnd_val - xval)*1E-8 - xMC = MC{N,T}(xcv_eps, xcc_eps, Interval{Float64}(lower_bnd_val, upper_bnd_val), - seed_grad, seed_grad, false) - setstorage[k] = is_first_eval ? xMC : (xMC ∩ setstorage[k].Intv) - end - FORWARD_DEBUG && println("variable[$op] at k = $k -> $(setstorage[k])") - elseif nod.nodetype == JuMP._Derivatives.SUBEXPRESSION - active_subexpr = subexpressions[op] - forward_get_subexpression!(k, active_subexpr, setstorage, numberstorage, numvalued, func_sparsity, - cv_buffer, cc_buffer) - - elseif nod.nodetype == JuMP._Derivatives.CALL - - children_idx = nzrange(adj, k) - n_children = length(children_idx) - - # :+ with arity two or greater - if op === 1 - n = length(children_idx) - if n === 2 - forward_plus_binary!(k, children_arr, children_idx, numvalued, numberstorage, - setstorage, x, lbd, ubd, subgrad_tol, sparsity, is_post, is_intersect, is_first_eval, - interval_intersect) - else - forward_plus_narity!(k, children_arr, children_idx, numvalued, numberstorage, - setstorage, x, lbd, ubd, subgrad_tol, sparsity, is_post, is_intersect, interval_intersect) - end - FORWARD_DEBUG && println("plus[$n] at k = $k -> $(setstorage[k])") - - # :- with arity two - elseif op === 2 - forward_minus!(k, children_arr, children_idx, numvalued, numberstorage, - setstorage, x, lbd, ubd, subgrad_tol, sparsity, is_post, is_intersect, is_first_eval, - interval_intersect) - FORWARD_DEBUG && println("minus at k = $k -> $(setstorage[k])") - - # :* with arity two or greater - elseif op === 3 - n = length(children_idx) - if n === 2 - forward_multiply_binary!(k, children_arr, children_idx, numvalued, - numberstorage, setstorage, x, lbd, ubd, subgrad_tol, sparsity, is_post, - is_intersect, is_first_eval, interval_intersect) - else - forward_multiply_narity!(k, children_arr, children_idx, numvalued, - numberstorage, setstorage, x, lbd, ubd, subgrad_tol, - sparsity, is_post, is_intersect, interval_intersect) - end - FORWARD_DEBUG && println("mult[$n] at k = $k -> $(setstorage[k])") - - # :^ - elseif op === 4 - forward_power!(k, children_arr, children_idx, numvalued, numberstorage, - setstorage, x, lbd, ubd, subgrad_tol, sparsity, is_post, is_intersect, is_first_eval, - interval_intersect, ctx) - - FORWARD_DEBUG && println("power at k = $k -> $(setstorage[k])") - - # :/ - elseif op === 5 - forward_divide!(k, children_arr, children_idx, numvalued, numberstorage, - setstorage, x, lbd, ubd, subgrad_tol, sparsity, is_post, is_intersect, is_first_eval, - interval_intersect, ctx) - - FORWARD_DEBUG && println("divide at k = $k -> $(setstorage[k])") - - # user multivariate function - elseif op >= JuMP._Derivatives.USER_OPERATOR_ID_START - forward_user_multivariate!(k, op, children_arr, children_idx, numvalued, numberstorage, - setstorage, x, lbd, ubd, subgrad_tol, sparsity, is_post, is_intersect, - interval_intersect, ctx, user_operators, num_mv_buffer) - FORWARD_DEBUG && println("user_mult at k = $k -> $(setstorage[k])") - - else - error("Unsupported operation $(operators[op])") - end - - elseif nod.nodetype == JuMP._Derivatives.CALLUNIVAR - - # checks to see if operator is a number - child_idx = first(nzrange(adj, k)) - arg_idx = children_arr[adj.colptr[k]] - arg_is_number = numvalued[arg_idx] - numvalued[k] = arg_is_number - - # performs univariate operators on number valued inputs - if op >= JuMP._Derivatives.USER_UNIVAR_OPERATOR_ID_START - forward_univariate_user!(k, op, arg_idx, arg_is_number, setstorage, x, lbd, ubd, subgrad_tol, - sparsity, is_post, is_intersect, is_first_eval, interval_intersect, ctx, - user_operators) - - elseif arg_is_number - forward_univariate_number!(k, op, arg_idx, numvalued, numberstorage) - - # performs set valued operators that require a single tiepoint calculation - elseif single_tp(op) - forward_univariate_tiepnt_1!(k, op, arg_idx, setstorage, x, lbd, ubd, subgrad_tol, sparsity, tpdict, - tp1storage, tp2storage, is_post, is_intersect, - is_first_eval, interval_intersect, ctx) - - # performs set valued operators that require two tiepoint calculations - elseif double_tp(op) - forward_univariate_tiepnt_2!(k, op, arg_idx, setstorage, x, lbd, ubd, subgrad_tol, sparsity, tpdict, - tp1storage, tp2storage, tp3storage, tp4storage, - is_post, is_intersect, is_first_eval, - interval_intersect, ctx) - - # performs set valued operator on other functions in base library - else - forward_univariate_other!(k, op, arg_idx, setstorage, x, lbd, ubd, subgrad_tol, sparsity, is_post, - is_intersect, is_first_eval, interval_intersect, ctx) - - end - FORWARD_DEBUG && println("fop[$op] at k = $k -> $(setstorage[k])") - else - error("Unrecognized node type $(nod.nodetype).") - - end - - end - - return nothing -end - - -### -### Define forward evaluation pass -### -function forward_pass!(evaluator::Evaluator, d::NonlinearExpression{V}) where V - # check that prior subexpressions have been evaluated - # i.e. box_id is same and reference point is the same - for i = 1:d.dependent_subexpression_count - if !prior_eval(evaluator, i) - subexpr = evaluator.subexpressions[i] - forward_pass!(evaluator, subexpr) - end - end - - forward_pass_kernel!(d.nd, d.adj, evaluator.x, evaluator.lower_variable_bounds, - evaluator.upper_variable_bounds, d.grad_sparsity, - d.setstorage, - d.numberstorage, d.isnumber, d.tpdict, - d.tp1storage, d.tp2storage, d.tp3storage, d.tp4storage, - evaluator.user_operators, evaluator.subexpressions, - d.grad_sparsity, d.reverse_sparsity, - evaluator.num_mv_buffer, evaluator.ctx, - evaluator.is_post, evaluator.is_intersect, - evaluator.is_first_eval, evaluator.interval_intersect, - d.cv_grad_buffer, d.cc_grad_buffer, - evaluator.treat_x_as_number, evaluator.subgrad_tol) - return nothing -end - -function forward_pass!(evaluator::Evaluator, d::BufferedNonlinearFunction{V}) where V - - forward_pass!(evaluator, d.expr) - d.has_value = true - d.last_past_reverse = false - - return nothing -end diff --git a/src/eago_optimizer/functions/nonlinear/graph/expressions.jl b/src/eago_optimizer/functions/nonlinear/graph/expressions.jl new file mode 100644 index 00000000..3b72e236 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/graph/expressions.jl @@ -0,0 +1,143 @@ +# Definitions borrow from https://github.com/FluxML/NNlib.jl (names used to +# standardize package). TODO: Decide how/if to incorporate NNlib depedency into +# McCormick.jl/EAGO.jl. +oftf(x, y) = oftype(float(x), y) +leakyrelu(x, a=oftf(x, 0.01)) = max(a * x, x) +swish1(x) = x * sigmoid(x) + +""" + AtomType +""" +@enum(AtomType, VAR_ATOM, PARAM_ATOM, CONST_ATOM, SELECT_ATOM, + ABS, ABS2, INV, RAD2DEG, DEG2RAD, ONE, ZERO, + MULT, DIV, PLUS, MINUS, POW, MIN, MAX, STEP, SIGN, + LOG, LOG2, LOG10, LOG1P, EXP, EXP2, EXP10, EXPM1, + SIN, COS, TAN, CSC, SEC, COT, + ASIN, ACOS, ATAN, ACSC, ASEC, ACOT, + SINH, COSH, TANH, CSCH, SECH, COTH, + ASINH, ACOSH, ATANH, ACSCH, ASECH, ACOTH, + ERF, ERFC, ERFINV, ERFCINV, SQRT, CBRT, + # ML specific functions + RELU, LEAKYRELU, SOFTPLUS, SOFTSIGN, GELU, SILU, SIGMOID, + # EAGO modeling functions + XLOGX, ARH, + # EAGO bound enforcing functions + POS, NEG, LOWER_BND, UPPER_BND, BND, + # Atoms for nonexpression types + USER, USERN, SUBEXPR) + + +# Define univariate functions that correspond to the AtomType +const UNIVARIATE_ATOM_DICT = Dict{AtomType, Symbol}() + +UNIVARIATE_ATOM_DICT[PLUS] = :+ +UNIVARIATE_ATOM_DICT[MINUS] = :- +UNIVARIATE_ATOM_DICT[INV] = :inv +UNIVARIATE_ATOM_DICT[ONE] = :one +UNIVARIATE_ATOM_DICT[ZERO] = :zero +UNIVARIATE_ATOM_DICT[ABS] = :abs +UNIVARIATE_ATOM_DICT[ABS2] = :abs2 +UNIVARIATE_ATOM_DICT[RAD2DEG] = :rad2deg +UNIVARIATE_ATOM_DICT[DEG2RAD] = :deg2rad +UNIVARIATE_ATOM_DICT[STEP] = :step +UNIVARIATE_ATOM_DICT[SIGN] = :sign + +UNIVARIATE_ATOM_DICT[LOG] = :log +UNIVARIATE_ATOM_DICT[LOG2] = :log2 +UNIVARIATE_ATOM_DICT[LOG10] = :log10 +UNIVARIATE_ATOM_DICT[LOG1P] = :log1p +UNIVARIATE_ATOM_DICT[EXP] = :exp +UNIVARIATE_ATOM_DICT[EXP2] = :exp2 +UNIVARIATE_ATOM_DICT[EXP10] = :exp10 +UNIVARIATE_ATOM_DICT[EXPM1] = :expm1 + +UNIVARIATE_ATOM_DICT[SIN] = :sin +UNIVARIATE_ATOM_DICT[COS] = :cos +UNIVARIATE_ATOM_DICT[TAN] = :tan +UNIVARIATE_ATOM_DICT[CSC] = :csc +UNIVARIATE_ATOM_DICT[SEC] = :sec +UNIVARIATE_ATOM_DICT[COT] = :cot +UNIVARIATE_ATOM_DICT[ASIN] = :asin +UNIVARIATE_ATOM_DICT[ACOS] = :acos +UNIVARIATE_ATOM_DICT[ATAN] = :atan +UNIVARIATE_ATOM_DICT[ACSC] = :acsc +UNIVARIATE_ATOM_DICT[ASEC] = :asec +UNIVARIATE_ATOM_DICT[ACOT] = :acot +UNIVARIATE_ATOM_DICT[SINH] = :sinh +UNIVARIATE_ATOM_DICT[COSH] = :cosh +UNIVARIATE_ATOM_DICT[TANH] = :tanh +UNIVARIATE_ATOM_DICT[CSCH] = :csch +UNIVARIATE_ATOM_DICT[SECH] = :sech +UNIVARIATE_ATOM_DICT[COTH] = :coth +UNIVARIATE_ATOM_DICT[ASINH] = :asinh +UNIVARIATE_ATOM_DICT[ACOSH] = :acosh +UNIVARIATE_ATOM_DICT[ATANH] = :atanh +UNIVARIATE_ATOM_DICT[ACSCH] = :acsch +UNIVARIATE_ATOM_DICT[ASECH] = :asech +UNIVARIATE_ATOM_DICT[ACOTH] = :acoth + +UNIVARIATE_ATOM_DICT[ERF] = :erf +UNIVARIATE_ATOM_DICT[ERFC] = :erfc +UNIVARIATE_ATOM_DICT[ERFINV] = :erfinv +UNIVARIATE_ATOM_DICT[ERFCINV] = :erfcinv + +UNIVARIATE_ATOM_DICT[SQRT] = :sqrt +UNIVARIATE_ATOM_DICT[CBRT] = :cbrt + +UNIVARIATE_ATOM_DICT[POS] = :positive +UNIVARIATE_ATOM_DICT[NEG] = :negative +UNIVARIATE_ATOM_DICT[USER] = :user + +UNIVARIATE_ATOM_DICT[RELU] = :relu +UNIVARIATE_ATOM_DICT[LEAKYRELU] = :leakyrelu +UNIVARIATE_ATOM_DICT[SIGMOID] = :sigmoid +UNIVARIATE_ATOM_DICT[GELU] = :gelu +UNIVARIATE_ATOM_DICT[SOFTPLUS] = :softplus +UNIVARIATE_ATOM_DICT[SOFTSIGN] = :softsign +UNIVARIATE_ATOM_DICT[SILU] = :swish1 + +UNIVARIATE_ATOM_DICT[XLOGX] = :xlogx + +# Define bivariate functions that correspond to the AtomType +const BIVARIATE_ATOM_DICT = Dict{AtomType, Symbol}() +BIVARIATE_ATOM_DICT[DIV] = :/ +BIVARIATE_ATOM_DICT[POW] = :^ +BIVARIATE_ATOM_DICT[LOWER_BND] = :lower_bnd +BIVARIATE_ATOM_DICT[UPPER_BND] = :upper_bnd +BIVARIATE_ATOM_DICT[ARH] = :arh +BIVARIATE_ATOM_DICT[MINUS] = :- +BIVARIATE_ATOM_DICT[MIN] = :min +BIVARIATE_ATOM_DICT[MAX] = :max +BIVARIATE_ATOM_DICT[PLUS] = :+ +BIVARIATE_ATOM_DICT[MULT] = :* +#BIVARIATE_ATOM_DICT[EXPAX] = :lower_bnd +#BIVARIATE_ATOM_DICT[EXPXY] = :lower_bnd + +# Define n-arity functions that correspond to the AtomType +const NARITY_ATOM_DICT = Dict{AtomType, Symbol}() +NARITY_ATOM_DICT[MIN] = :min +NARITY_ATOM_DICT[MAX] = :max +NARITY_ATOM_DICT[PLUS] = :+ +NARITY_ATOM_DICT[MULT] = :* +NARITY_ATOM_DICT[USERN] = :usern + +const ALL_ATOM_DICT = Dict{AtomType, Symbol}() +foreach(x -> setindex!(ALL_ATOM_DICT, x[2], x[1]), UNIVARIATE_ATOM_DICT) +foreach(x -> setindex!(ALL_ATOM_DICT, x[2], x[1]), BIVARIATE_ATOM_DICT) +foreach(x -> setindex!(ALL_ATOM_DICT, x[2], x[1]), NARITY_ATOM_DICT) + +# A functions that may be 1 to n-arity functions that correspond to the AtomType +ALL_ATOM_DICT[BND] = :bnd +#ATM_EVAL[QUAD1] = :quad1 +#ATM_EVAL[QUAD2] = :quad2 + +# List of keys only +const UNIVARIATE_ATOM_TYPES = AtomType[k for k in keys(UNIVARIATE_ATOM_DICT)] +const BIVARIATE_ATOM_TYPES = AtomType[k for k in keys(BIVARIATE_ATOM_DICT)] +const NARITY_ATOM_TYPES = AtomType[k for k in keys(NARITY_ATOM_DICT)] +const ALL_ATOM_TYPES = AtomType[k for k in keys(ALL_ATOM_DICT)] + +# Reverse lookup dicts +const REV_UNIVARIATE_ATOM_DICT = Dict(UNIVARIATE_ATOM_DICT[k] => k for k in keys(UNIVARIATE_ATOM_DICT)) +const REV_BIVARIATE_ATOM_DICT = Dict(BIVARIATE_ATOM_DICT[k] => k for k in keys(BIVARIATE_ATOM_DICT)) +const REV_NARITY_ATOM_DICT = Dict(NARITY_ATOM_DICT[k] => k for k in keys(NARITY_ATOM_DICT)) diff --git a/src/eago_optimizer/functions/nonlinear/graph/graph.jl b/src/eago_optimizer/functions/nonlinear/graph/graph.jl new file mode 100644 index 00000000..0d8d2485 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/graph/graph.jl @@ -0,0 +1,45 @@ +#= +TODO: Each graph representation is assumed to be static... so +=# + +abstract type AbstractDirectedGraph end +abstract type AbstractDirectedAcyclicGraph <: AbstractDirectedGraph end + +const AbstractDG = AbstractDirectedGraph +const AbstractDAG = AbstractDirectedAcyclicGraph + +@enum Linearity LIN_CONSTANT LIN_LINEAR LIN_PIECEWISE_LINEAR LIN_NONLINEAR +@enum VariableType VT_BIN VT_INT VT_CONT + +function _variable_count(g::AbstractDG)::Int + error("Variable count not defined for graph type = $(typeof(g))") +end + +# added id field from JuMP UserOperatorRegistry, expect more extensive changes in future. +struct OperatorRegistry + multivariate_id::Vector{Symbol} + multivariate_operator_to_id::Dict{Symbol,Int} + multivariate_operator_evaluator::Vector{MOI.AbstractNLPEvaluator} + univariate_operator_id::Vector{Symbol} + univariate_operator_to_id::Dict{Symbol,Int} + univariate_operator_f::Vector{Any} + univariate_operator_fprime::Vector{Any} + univariate_operator_fprimeprime::Vector{Any} +end +function OperatorRegistry() + return OperatorRegistry( + Symbol[], + Dict{Symbol,Int}(), + MOI.AbstractNLPEvaluator[], + Symbol[], + Dict{Symbol,Int}(), + [], + [], + [], + ) +end + +include(joinpath(@__DIR__, "expressions.jl")) +include(joinpath(@__DIR__, "node.jl")) +include(joinpath(@__DIR__, "utilities.jl")) +include(joinpath(@__DIR__, "graphs", "directed_tree.jl")) \ No newline at end of file diff --git a/src/eago_optimizer/functions/nonlinear/graph/graphs/directed_tree.jl b/src/eago_optimizer/functions/nonlinear/graph/graphs/directed_tree.jl new file mode 100644 index 00000000..3a64892f --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/graph/graphs/directed_tree.jl @@ -0,0 +1,178 @@ +""" + AbstractCache + +Abstract supertype used for information storage object the directed acyclic graph. +""" +abstract type AbstractCache end + +""" + AbstractCacheAttribute + +Abstract supertype used for attributes stored in a cache. +""" +abstract type AbstractCacheAttribute end + +Base.@kwdef mutable struct VariableValues{T<:Real} + x0::Vector{T} = T[] + x::Vector{T} = T[] + lower_variable_bounds::Vector{T} = T[] + upper_variable_bounds::Vector{T} = T[] + node_to_variable_map::Vector{Int} = Int[] + variable_to_node_map::Vector{Int} = Int[] + variable_types::Vector{VariableType} = VariableType[] +end + +@inline val(b::VariableValues{T}, i::Int) where T = @inbounds b.x[i] +@inline lbd(b::VariableValues{T}, i::Int) where T = @inbounds b.lower_variable_bounds[i] +@inline ubd(b::VariableValues{T}, i::Int) where T = @inbounds b.upper_variable_bounds[i] +val(b::VariableValues{T}) where T = b.x +lbd(b::VariableValues{T}) where T = b.lower_variable_bounds +ubd(b::VariableValues{T}) where T = b.upper_variable_bounds + +function _get_x!(::Type{BranchVar}, out::Vector{T}, v::VariableValues{T}) where T<:Real + @inbounds for i = 1:length(v.node_to_variable_map) + out[i] = v.x[v.node_to_variable_map[i]] + end + return nothing +end + +function _initialize_or_copy!(y::VariableValues{T}, x::VariableValues{T}, s::Symbol) where {T<:Real} + isempty(getfield(y, s)) ? setfield!(y, s, copy(getfield(x, s))) : copy!(getfield(y, s), getfield(x, s)) +end +function update_box_and_pnt!(y::VariableValues{T}, x::VariableValues{T}, update_box::Bool) where {T<:Real} + if update_box + _initialize_or_copy!(y, x, :x0) + _initialize_or_copy!(y, x, :lower_variable_bounds) + _initialize_or_copy!(y, x, :upper_variable_bounds) + end + _initialize_or_copy!(y, x, :x) +end + +""" + DirectedTree + +A tree graph with a single sink node. +""" +Base.@kwdef mutable struct DirectedTree <: AbstractDirectedAcyclicGraph + "List of nodes" + nodes::Vector{Node} = Node[] + "List of index of variables in this tree" + variables::Dict{Int,Int} = Dict{Int,Int}() + "Information on all variables..." + v::VariableValues{Float64} = VariableValues{Float64}() + "List of constant values" + constant_values::Vector{Float64} = Float64[] + "List of constant values" + parameter_values::Vector{Float64} = Float64[] + "Number of nodes" + node_count::Int = 0 + "Number of variables" + variable_count::Int = 0 + "Number of constants" + constant_count::Int = 0 + sink_bnd::Interval{Float64} = Interval{Float64}(-Inf,Inf) + "" + sparsity::Vector{Int} = Int[] + "" + rev_sparsity::Dict{Int,Int} = Dict{Int,Int}() + dependent_variable_count::Int = 0 + dep_subexpr_count::Int = 0 + dependent_subexpressions::Vector{Int} = Int[] + dependent_subexpression_dict::Dict{Int,Int} = Dict{Int,Int}() + linearity::Linearity = LIN_CONSTANT + user_operators::OperatorRegistry = OperatorRegistry() + #children::SpraseMatrixCSC{Bool,Int} = spzeros(Bool,1,1) + #parents::SparseMatrixCSC{Bool,Int} = spzeros(Bool,1,1) +end +#DirectedTree(n::Int) = DirectedTree(children = spzeros(Bool,n,n), parents=spzeros(Bool,n,n)) +const DAT = DirectedTree + +# node property access functions that can be defined at abstract type +node(g::DAT, i) = g.nodes[i] +nodes(g::DAT) = g.nodes + +variable(g::DAT, i) = g.variables[i] +variables(g::DAT) = g.variables + +constant_value(g::DAT, i) = g.constant_values[i] +constant_values(g::DAT) = g.constant_values +parameter_value(g::DAT, i) = g.parameter_values[i] +parameter_values(g::DAT) = g.parameter_values + +node_class(g::DAT, i) = node_class(node(g, i)) +ex_type(g::DAT, i) = ex_type(node(g, i)) +first_index(g::DAT, i) = first_index(node(g, i)) +secondary_index(g::DAT, i) = secondary_index(node(g, i)) +arity(g::DAT, i) = arity(node(g, i)) +children(g::DAT, i) = children(node(g, i)) +child(g::DAT, i, j) = child(node(g, j), i) + +is_binary(g::DAT, i) = arity(g, i) == 2 + +node_count(g::DAT) = g.node_count +variable_count(g::DAT) = g.variable_count +constant_count(g::DAT) = g.constant_count + +dependent_subexpression_index(g::DAT, i) = g.dependent_subexpression_dict[i] +dep_subexpr_count(g::DAT) = length(g.dependent_subexpressions) +sparsity(g::DAT, i) = g.sparsity +rev_sparsity(g::DAT, i::Int, k::Int) = g.rev_sparsity[i] + +user_univariate_operator(g::DAT, i) = g.user_operators.univariate_operator_f[i] +user_multivariate_operator(g::DAT, i) = g.user_operators.multivariate_operator_evaluator[i] + +function DirectedTree(aux_info, d, op::OperatorRegistry, sub_sparsity::Dict{Int,Vector{Int}}, subexpr_linearity, parameter_values, is_sub, subexpr_indx) + + nd = copy(d.nd) + adj = copy(d.adj) + const_values = copy(d.const_values) + + sparsity, dependent_subexpressions = _compute_sparsity(d, sub_sparsity, is_sub, subexpr_indx) + dependent_subexpression_dict = Dict{Int,Int}() + for (i, v) in enumerate(dependent_subexpressions) + dependent_subexpression_dict[v] = i + end + rev_sparsity = Dict{Int,Int}() + for (i,s) in enumerate(sparsity) + rev_sparsity[s] = i + end + + nodes = _convert_node_list(aux_info, d.nd, op) + lin = linearity(nd, adj, subexpr_linearity) + DirectedTree(nodes = nodes, + variables = rev_sparsity, + constant_values = const_values, + parameter_values = parameter_values, + node_count = length(nodes), + variable_count = length(sparsity), + constant_count = length(const_values), + sparsity = sparsity, + rev_sparsity = rev_sparsity, + dependent_variable_count = length(sparsity), + dep_subexpr_count = length(dependent_subexpressions), + dependent_subexpressions = copy(dependent_subexpressions), + dependent_subexpression_dict = dependent_subexpression_dict, + linearity = lin[1], + user_operators = op + ) +end + +forward_uni = [i for i in instances(AtomType)] +setdiff!(forward_uni, [VAR_ATOM; PARAM_ATOM; CONST_ATOM; SELECT_ATOM; SUBEXPR]) +f_switch = binary_switch(forward_uni, is_forward = true) +@eval function fprop!(t::T, ex::Expression, g::DAT, c::AbstractCache , k::Int) where T<:AbstractCacheAttribute + id = ex_type(g, k) + $f_switch + error("fprop! for ex_type = $id not defined.") + return +end + +reverse_uni = [i for i in instances(AtomType)] +setdiff!(reverse_uni, [VAR_ATOM; PARAM_ATOM; CONST_ATOM; SELECT_ATOM; SUBEXPR]) +r_switch = binary_switch(reverse_uni, is_forward = false) +@eval function rprop!(t::T, ex::Expression, g::DAT, c::AbstractCache, k::Int) where T<:AbstractCacheAttribute + id = ex_type(g, k) + $r_switch + error("rprop! for ex_type = $id not defined.") + return +end \ No newline at end of file diff --git a/src/eago_optimizer/functions/nonlinear/graph/node.jl b/src/eago_optimizer/functions/nonlinear/graph/node.jl new file mode 100644 index 00000000..a0a4a65d --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/graph/node.jl @@ -0,0 +1,155 @@ + +""" + NodeType + +Each node in the directed graph can be classified into the following types +- VARIABLE: Denotes a decision variable. +- PARAMETER: An adjustable parameter value (not a decision variable). +- CONSTANT: A constant value +- EXPRESSION: Any other expression that isn't a subexpression +- SUBEXPRESSION: Any expression referencing a different graph representation. +""" +@enum(NodeClass, CONSTANT, PARAMETER, VARIABLE, EXPRESSION, SUBEXPRESSION) +#= +- SELECT: These nodes do not store values rather they reference the output + stored in the buffer assocatied with an EXPRESSION. These nodes + only occur after a multiple output function. In the mimo JuMP + extension they correspond to intermediate variables introduced + by the user. +=# + +struct Variable end +struct Parameter end +struct Constant end +struct Expression end +struct Subexpression end +struct User end + +function user end +function usern end + +const ATOM_TYPE_INSTANCES = instances(AtomType) + +abstract type AbstractNode end + +""" +Describes connectivity and expression represented by node. +""" +struct Node <: AbstractNode + node_class::NodeClass + ex_type::AtomType + first_index::Int + second_index::Int + arity::Int + children::Vector{Int} +end + +for (t, s, a) in ((Variable, VARIABLE, VAR_ATOM), + (Parameter, PARAMETER, PARAM_ATOM), + (Constant, CONSTANT, CONST_ATOM), + (Subexpression, SUBEXPRESSION, SUBEXPR),) + @eval Node(::$t, i::Int) = Node($s, $a, i, 0, 0, Int[]) +end + +for v in (PLUS, MINUS, MULT, POW, DIV, MAX, MIN) + @eval Node(::Val{true}, ::Val{$v}, c::Vector{Int}) = Node(EXPRESSION, $v, 0, 0, length(c), c) +end +for d in ALL_ATOM_TYPES + @eval Node(::Val{false}, ::Val{$d}, c::Vector{Int}) = Node(EXPRESSION, $d, 0, 0, 1, c) +end +Node(::Val{true}, ::Val{USER}, i::Int, c::Vector{Int}) = Node(EXPRESSION, USER, i, 0, 1, c) +Node(::Val{true}, ::Val{USERN}, i::Int, c::Vector{Int}) = Node(EXPRESSION, USERN, i, 0, length(c), c) + + +Node(::Val{:first_index}, n::Node, i::Int) = Node(node_class(n), ex_type(n), i, second_index(n), arity(n), children(n)) + +node_class(n::Node) = n.node_class +ex_type(n::Node) = n.ex_type +first_index(n::Node) = n.first_index +second_index(n::Node) = n.node_second_index +arity(n::Node) = n.arity +children(n::Node) = n.children +child(n::Node, i) = @inbounds getindex(n.children, i) + +node_is_class(::Variable, n::Node) = node_class(n) == VARIABLE +node_is_class(::Parameter, n::Node) = node_class(n) == PARAMETER +node_is_class(::Constant, n::Node) = node_class(n) == CONSTANT + +mv_eago_not_jump = setdiff(JuMP._Derivatives.operators, + union(Symbol[k for k in keys(REV_BIVARIATE_ATOM_DICT)], + Symbol[k for k in keys(REV_NARITY_ATOM_DICT)])) +eago_mv_switch = quote end +for s in mv_eago_not_jump + global eago_mv_switch = quote + $eago_mv_switch + (d == $s) && (return Node(Val(true), Val($s), v[c])) + end +end +@eval function _create_call_node(i, v, c::UnitRange{Int}, op::OperatorRegistry) + if i == 1 + return Node(Val(true), Val(PLUS), v[c]) + elseif i == 2 + return Node(Val(true), Val(MINUS), v[c]) + elseif i == 3 + return Node(Val(true), Val(MULT), v[c]) + elseif i == 4 + return Node(Val(true), Val(POW), v[c]) + elseif i == 5 + return Node(Val(true), Val(DIV), v[c]) + elseif i == 6 + error("If-else currently unsupported...") + elseif i >= JuMP._Derivatives.USER_OPERATOR_ID_START + i_mv = i - JuMP._Derivatives.USER_OPERATOR_ID_START + 1 + d = op.multivariate_id[i_mv] + $eago_mv_switch + return Node(Val(true), Val(USERN), i_mv, v[c]) + end +end + +function binary_switch_typ(ids, exprs) + if length(exprs) <= 3 + out = Expr(:if, Expr(:call, :(==), :i, ids[1]), + :(Node(Val(false), Val($(exprs[1])), v[c]))) + if length(exprs) > 1 + push!(out.args, binary_switch_typ(ids[2:end], exprs[2:end])) + end + return out + else + mid = length(exprs) >>> 1 + return Expr(:if, Expr(:call, :(<=), :i, ids[mid]), + binary_switch_typ(ids[1:mid], exprs[1:mid]), + binary_switch_typ(ids[mid+1:end], exprs[mid+1:end])) + end +end + +indx_JuMP = Int[] +indx_EAGO = AtomType[] +for k in univariate_operators + if haskey(REV_UNIVARIATE_ATOM_DICT, k) + k_EAGO = REV_UNIVARIATE_ATOM_DICT[k] + push!(indx_JuMP, univariate_operator_to_id[k]) + push!(indx_EAGO, k_EAGO) + end +end + +uni_eago_not_jump = setdiff(univariate_operators, Symbol[k for k in keys(REV_UNIVARIATE_ATOM_DICT)]) +uni_eago_not_jump = push!(uni_eago_not_jump, :-) +eago_uni_switch = quote end +for s in uni_eago_not_jump + global eago_uni_switch = quote + $eago_uni_switch + (d == $s) && (return Node(Val(false), Val($s), v[c])) + end +end +atom_switch = binary_switch_typ(indx_JuMP, indx_EAGO) +@eval function _create_call_node_uni(i::Int, v, c::UnitRange{Int}, op::OperatorRegistry) + + if i >= JuMP._Derivatives.USER_UNIVAR_OPERATOR_ID_START + j = i - JuMP._Derivatives.USER_UNIVAR_OPERATOR_ID_START + 1 + dop = op.univariate_operator_id[j] + d = op.univariate_operator_to_id[dop] + $eago_uni_switch + return Node(Val(true), Val(USER), j, v[c]) + end + $atom_switch +end \ No newline at end of file diff --git a/src/eago_optimizer/functions/nonlinear/graph/utilities.jl b/src/eago_optimizer/functions/nonlinear/graph/utilities.jl new file mode 100644 index 00000000..283b1703 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/graph/utilities.jl @@ -0,0 +1,122 @@ + +# Define standard forward and reverse propagation to switch of expression definitions +# for expressions. +function binary_switch(ids; is_forward = true) + if length(ids) <= 3 + if is_forward + out = Expr(:if, Expr(:call, :(==), :id, ids[1]), :(return fprop!(t, Val($(ids[1])), g, c, k))) + else + out = Expr(:if, Expr(:call, :(==), :id, ids[1]), :(return rprop!(t, Val$((ids[1])), g, c, k))) + end + (length(ids) > 1) && push!(out.args, binary_switch(ids[2:end])) + return out + else + mid = length(ids) >>> 1 + return Expr(:if, Expr(:call, :(<=), :id, ids[mid]), binary_switch(ids[1:mid]), binary_switch(ids[mid+1:end])) + end +end + +function Node(aux_info, d::JuMP._Derivatives.NodeData, child_vec, c::UnitRange{Int}, op) + nt = d.nodetype + i = d.index + (nt == JuMP._Derivatives.CALL) && return _create_call_node(i, child_vec, c, op) + (nt == JuMP._Derivatives.CALLUNIVAR) && return _create_call_node_uni(i, child_vec, c, op) + (nt == JuMP._Derivatives.VALUE) && return Node(Constant(), i) + (nt == JuMP._Derivatives.PARAMETER) && return Node(Parameter(), i) + (nt == JuMP._Derivatives.SUBEXPRESSION) && return Node(Subexpression(), i) + (nt == JuMP._Derivatives.VARIABLE) && return !is_auxilliary_variable(aux_info, i) ? Node(Variable(), i) : Node(Select(), i) + (nt == JuMP._Derivatives.LOGIC) && error("Unable to load JuMP expression. Logical operators not currently supported.") + (nt == JuMP._Derivatives.COMPARISON) && error("Unable to load JuMP expression. Comparisons not currently supported.") + error("Node type = $nt not expected from JuMP.") +end + +function _convert_node_list(aux_info, x::Vector{JuMP._Derivatives.NodeData}, op) + y = Vector{Node}(undef, length(x)) + adj = JuMP._Derivatives.adjmat(x) + child_vec = rowvals(adj) + for i in eachindex(x) + y[i] = Node(aux_info, x[i], child_vec, nzrange(adj, i), op) + end + return y +end + +# Access gradient sparsity of JuMP storage. +sparsity(d::JuMP._FunctionStorage) = d.grad_sparsity +sparsity(d::JuMP._SubexpressionStorage) = d.sparsity + +# Compute gradient sparsity from JuMP storage. +function _compute_sparsity(d::JuMP._FunctionStorage, sparse_dict::Dict{Int,Vector{Int}}, is_sub, subexpr_indx) + dep_subexpression = Int[] + variable_dict = Dict{Int,Bool}() + for n in d.nd + if n.nodetype == JuMP._Derivatives.VARIABLE + if !haskey(variable_dict, n.index) + variable_dict[n.index] = true + end + end + if n.nodetype == JuMP._Derivatives.SUBEXPRESSION + push!(dep_subexpression, n.index) + end + end + sparsity = collect(keys(variable_dict)) + unique!(dep_subexpression) + sort!(dep_subexpression) + for s in dep_subexpression + append!(sparsity, sparse_dict[s]) + end + unique!(sparsity) + sort!(sparsity) + if is_sub + sparse_dict[subexpr_indx] = sparsity + end + sparsity, dep_subexpression +end +function _compute_sparsity(d::JuMP._SubexpressionStorage, sparse_dict::Dict{Int,Vector{Int}}, is_sub, subexpr_indx) + dep_subexpression = Int[] + variable_dict = Dict{Int,Bool}() + for n in d.nd + if n.nodetype == JuMP._Derivatives.VARIABLE + if !haskey(variable_dict, n.index) + variable_dict[n.index] = true + end + end + if n.nodetype == JuMP._Derivatives.SUBEXPRESSION + push!(dep_subexpression, n.index) + end + end + sparsity = collect(keys(variable_dict)) + unique!(dep_subexpression) + sort!(dep_subexpression) + for s in dep_subexpression + append!(sparsity, sparse_dict[s]) + end + unique!(sparsity) + sort!(sparsity) + if is_sub + sparse_dict[subexpr_indx] = sparsity + end + sparsity, dep_subexpression +end + +function linearity(d::JuMP._Derivatives.Linearity) + (d == JuMP._Derivatives.LINEAR) && return LIN_LINEAR + (d == JuMP._Derivatives.PIECEWISE_LINEAR) && return LIN_PIECEWISE_LINEAR + (d == JuMP._Derivatives.NONLINEAR) && return LIN_NONLINEAR + LIN_CONSTANT # assumes d is then JuMP._Derivatives.CONSTANT +end +function linearity(nd::Vector{JuMP._Derivatives.NodeData}, adj::SparseMatrixCSC{Bool,Int}, d::Vector{JuMP._Derivatives.Linearity}) + x = JuMP._Derivatives.classify_linearity(nd, adj, d) + linearity.(x) +end + +function OperatorRegistry(d::JuMP._Derivatives.UserOperatorRegistry) + mv_id = collect(keys(d.multivariate_operator_to_id)) + mv_operator_to_id = d.multivariate_operator_to_id + mv_operator_evaluator = d.multivariate_operator_evaluator + u_operator_id = collect(keys(d.univariate_operator_to_id)) + u_to_id = d.univariate_operator_to_id + u_to_f = d.univariate_operator_f + u_fprime = d.univariate_operator_fprime + u_fprimeprime = d.univariate_operator_fprimeprime + OperatorRegistry(mv_id, mv_operator_to_id, mv_operator_evaluator, u_operator_id, u_to_id, u_to_f, u_fprime, u_fprimeprime) +end \ No newline at end of file diff --git a/src/eago_optimizer/functions/nonlinear/interval/forward.jl b/src/eago_optimizer/functions/nonlinear/interval/forward.jl new file mode 100644 index 00000000..8485df2f --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/interval/forward.jl @@ -0,0 +1,69 @@ + +function fprop!(::RelaxInterval, vt::Variable, g::DAT, b::IntervalCache{T}, k) where T<:Real + i = first_index(g, k) + l = lbd(b, i) + u = ubd(b, i) + b[k] = (l == u) ? Interval(l) : Interval(l, u) + nothing +end + +function fprop!(t::RelaxInterval, ex::Subexpression, g::DAT, b::IntervalCache{T}, k) where T<:Real + b[k] = subexpression_set(t, b, first_index(g, k)) +end + +for (F, f) in ((DIV, :/), (ARH, :arh), (POW, :^)) + @eval fprop!(t::RelaxInterval, v::Val{$F}, g::DAT, b::IntervalCache{T}, k) where T<:Real = (b[k] = ($f)(set(t, b, child(g, 1, k)), set(b, child(g, 2, k))); nothing) +end + +function fprop!(t::RelaxInterval, v::Val{MINUS}, g::DAT, b::IntervalCache{T}, k) where T<:Real + x = child(g, 1, k) + b[k] = is_binary(g, k) ? (set(t, b, x) - set(t, b, child(g, 2, k))) : - set(t, b, x) +end + +for (F, f) in ((PLUS, :sum), (MIN, :minimum), (MAX, :maximum), (MULT, :prod)) + @eval fprop!(t::Interval, v::Val{$F}, g::DAT, b::IntervalCache{T}, k) where T<:Real = (b[k] = ($f)(i -> set(t, b, i), children(g, k)); nothing) +end + +function fprop!(t::RelaxInterval, v::Val{USER}, g::DAT, b::IntervalCache{T}, k) where T<:Real + f = user_univariate_operator(g, first_index(g, k)) + b[k] = f(set(t, b, child(g, 1, k))) +end + +function fprop!(t::RelaxInterval, v::Val{USERN}, g::DAT, b::IntervalCache{T}, k) where T<:Real + mv = user_multivariate_operator(g, first_index(g, k)) + set_input = set_input(t, b, arity(g, k)) + for c in children(g, k) + set_input[i] = set(t, b, c) + end + b[k] = MOI.eval_objective(mv, set_input)::Interval{Float64} +end + +for ft in UNIVARIATE_ATOM_TYPES + f = UNIVARIATE_ATOM_DICT[ft] + (f == :user || f == :+ || f == :-) && continue + @eval function fprop!(t::RelaxInterval, v::Val{$ft}, g::DAT, b::IntervalCache{T}, k) where T<:Real + x = child(g, 1, k) + b[k] = ($f)(set(t, b, x)) + end +end + +for (F, f) in ((LOWER_BND, :lower_bnd), (UPPER_BND, :upper_bnd)) + @eval function fprop!(t::RelaxInterval, v::Val{$F}, g::DAT, b::IntervalCache{T}, k) where T<:Real + y = child(g, 2, k) + if is_num(t, b, y) + z = set(t, b, child(g, 1, k)) + b[k] = ($f)(z, num(t, b, y)) + end + nothing + end +end + +function fprop!(t::RelaxInterval, v::Val{BND}, g::DAT, b::IntervalCache{T}, k) where T<:Real + y = child(g, 2, k) + r = child(g, 3, k) + if is_num(t, b, y) && is_num(t, b, r) + z = set(t, b, child(g, 1, k)) + b[k] = bnd(z, num(t, b, y), num(t, b, r)) + end + nothing +end \ No newline at end of file diff --git a/src/eago_optimizer/functions/nonlinear/interval/interval.jl b/src/eago_optimizer/functions/nonlinear/interval/interval.jl new file mode 100644 index 00000000..815dfc72 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/interval/interval.jl @@ -0,0 +1,39 @@ +struct RelaxInterval <: AbstractCacheAttribute end + +Base.@kwdef mutable struct IntervalCache{T<:Real} <: AbstractCache + v::VariableValues{T} = VariableValues{T}() + _set::Vector{Interval{T}} = Interval{T}[] + _subexpression_set::Dict{Int,Interval{T}} = Dict{Int,Interval{T}}() + _set_mv_buffer::Vector{Interval{T}} = Interval{T}[] + first_eval::Bool = true +end +function IntervalCache{T}(::RelaxInterval, n::Int, m::Int, p::Int) where T<:Real + IntervalCache{T}(_set = zeros(Interval{T}, n), _set_mv_buffer = zeros(Interval{T}, p)) +end +function initialize!(c::IntervalCache{T}, g::DirectedTree) where T<:Real + c._set = zeros(Interval{T}, node_count(g)) + c._set_mv_buffer = zeros(Interval{T}, length(sparsity(g, 1))) + return +end + +set(::RelaxInterval, b::IntervalCache{T}, i) where T<:Real = b._set[i] +num(::RelaxInterval, b::IntervalCache{T}, i) where T<:Real = b._set[i].lo +is_num(::RelaxInterval, b::IntervalCache{T}, i) where T<:Real = b._set[i] +subexpression_set(::RelaxInterval, b::IntervalCache{T}, i) where T<:Real = b._subexpression_set[i] +set_mv_buffer(::RelaxInterval, b::IntervalCache{T}, i) where T<:Real = b._set_mv_buffer[i] + +val(b::IntervalCache{T}, i::Int) where T<:Real = val(b.v, i) +lbd(b::IntervalCache{T}, i::Int) where T<:Real = lbd(b.v, i) +ubd(b::IntervalCache{T}, i::Int) where T<:Real = ubd(b.v, i) + +function Base.setindex!(b::IntervalCache{T}, v::Interval{T}, i::Int) where T<:Real + if first_eval + b._set[i] = v + else + b._set[i] = b._set[i] ∩ v + end + nothing +end + +include(joinpath(@__DIR__, "forward.jl")) +include(joinpath(@__DIR__, "reverse.jl")) \ No newline at end of file diff --git a/src/eago_optimizer/functions/nonlinear/interval/reverse.jl b/src/eago_optimizer/functions/nonlinear/interval/reverse.jl new file mode 100644 index 00000000..1a74cdc6 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/interval/reverse.jl @@ -0,0 +1,135 @@ +function r_init!(t::RelaxInterval, g::DAT, b::IntervalCache{T}) where T<:Real + z = set(b, 1) ∩ g.sink_bnd + b[1] = z + return !isempty(z) +end + +function rprop!(t::RelaxInterval, v::Variable, g::DAT, c::IntervalCache{T}, k) where T<:Real + z = z ∩ set(t, c, k) + c[k] = z + return !isempty(z) +end + +function rprop!(t::RelaxInterval, v::Subexpression, g::DAT, c::IntervalCache{T}, k) where T<:Real + store_subexpression!(c, set(t, c, k), first_index(g, k)) + return true +end + +# needed for O(n) reverse interval propagation of + +# returns q for x = q + y +function hukuhara_diff(x::Interval{T}, y::Interval{T}) where T<:Real + isempty(x) && return x + isempty(y) && return y + l = sub_round(x.lo, y.lo, RoundDown) + u = sub_round(x.hi, y.hi, RoundUp) + Interval{T}(l, u) +end + +""" +$(FUNCTIONNAME) + +Updates storage tapes with reverse evalution of node representing `n = +(x,y,z...)` which updates x, y, z and so on. +""" +function rprop!(t::RelaxInterval, v::Val{PLUS}, g::DAT, c::IntervalCache{T}, k::Int) where T<:Real + tsum = sum(j -> set(t, c, j), children(g, k)) + for j in children(g, k) + tmsum = hukuhara_diff(tsum, set(c, j)) + _, w, _ = IntervalContractors.plus_rev(set(t, c, k), set(t, c, i), tmsum) + isempty(w) && return false + c[i] = v + end + return true +end + +# needed for close to O(n) reverse interval propagation of * +# returns q for x = q*y +function hukuhara_div(x::Interval{T}, y::Interval{T}) where T<:Real + isempty(x) && return x + isempty(y) && return y + if y.lo >= zero(T) + if x.lo >= zero(T) + l = div_round(x.lo, y.lo, RoundDown) + u = div_round(x.hi, y.hi, RoundUp) + elseif x.hi <= zero(T) + l = div_round(x.lo, y.hi, RoundDown) + u = div_round(x.hi, y.lo, RoundUp) + else + l = div_round(x.lo, y.hi, RoundDown) + u = div_round(x.hi, y.hi, RoundUp) + end + return Interval(l, u), true + elseif y.hi <= zero(T) + if x.lo >= zero(T) + l = div_round(x.hi, y.lo, RoundDown) + u = div_round(x.lo, y.hi, RoundUp) + elseif x.hi <= zero(T) + l = div_round(x.hi, y.hi, RoundDown) + u = div_round(x.lo, y.lo, RoundUp) + else + l = div_round(x.hi, y.lo, RoundDown) + u = div_round(x.lo, y.lo, RoundUp) + end + return Interval(l, u), true + else + if x.lo > zero(T) + l = div_round(x.hi, y.lo, RoundDown) + u = div_round(x.hi, y.hi, RoundUp) + return Interval(l, u), true + elseif x.hi < zero(T) + l = div_round(x.lo, y.hi, RoundDown) + u = div_round(x.lo, y.lo, RoundUp) + return Interval(l, u), true + end + + end + empty(Interval{T}), false +end + +""" +$(FUNCTIONNAME) + +Updates storage tapes with reverse evalution of node representing `n = *(x,y,z...)` which updates x, y, z and so on. +""" +function rprop!(t::RelaxInterval, v::Val{MULT}, g::DAT, c::IntervalCache{T}, k::Int) where T<:Real + tmul = sum(j -> set(t, c, j), children(g, k)) + for j in children(g, k) + tmulm, hdiv = hukuhara_div(tmul, set(t, b, j)) + if !hdiv + tmulmf = one(Interval{T}) + for i in children(g, k) + if i != j + tmulmf *= set(t, b, i) + end + end + tmulm = tmulmf + end + _, w, _ = IntervalContractors.mul_rev(set(t, b, k), set(t, b, c), tmulm) + isempty(w) && return false + c[i] = w + end + return true +end + +for (f, fc, F) in ((-, MINUS, IntervalContractors.minus_rev), + (^, POW, IntervalContractors.power_rev), + (/, DIV, IntervalContractors.div_rev)) + @eval function rprop!(t::RelaxInterval, v::Val{$fc}, g::DAT, b::IntervalCache{T}, k) where T<:Real + x = child(g, 1, k) + y = child(g, 2, k) + z, u, v = ($F)(set(t, b, k), set(t, b, x), set(t, b, y)) + isempty(u) && return false + isempty(v) && return false + b[x] = u + b[y] = v + return true + end +end + +rprop!(t::RelaxInterval, v::Val{USER}, g::DAT, b::IntervalCache, k::Int) = true +rprop!(t::RelaxInterval, v::Val{USERN}, g::DAT, b::IntervalCache, k::Int) = true + +for ft in UNIVARIATE_ATOM_TYPES + f = UNIVARIATE_ATOM_DICT[ft] + (f == :user || f == :+ || f == :-) && continue + @eval rprop!(t::RelaxInterval, v::Val{$ft}, g::DAT, b::IntervalCache, k::Int) = true +end diff --git a/src/eago_optimizer/functions/nonlinear/nonlinear.jl b/src/eago_optimizer/functions/nonlinear/nonlinear.jl index cee31acc..fec87fa6 100644 --- a/src/eago_optimizer/functions/nonlinear/nonlinear.jl +++ b/src/eago_optimizer/functions/nonlinear/nonlinear.jl @@ -12,312 +12,159 @@ # copy_subexpression_value!, eliminate_fixed_variables! ############################################################################# -include("register_special.jl") -include("empty_evaluator.jl") -include("univariate.jl") +const DEBUG_NL = false + +include(joinpath(@__DIR__, "register_special.jl")) +include(joinpath(@__DIR__, "graph", "graph.jl")) +include(joinpath(@__DIR__, "interval", "interval.jl")) +include(joinpath(@__DIR__, "composite_relax", "composite_relax.jl")) +include(joinpath(@__DIR__, "apriori_relax", "apriori_relax.jl")) + +@enum(RelaxType, STD_RELAX, MC_AFF_RELAX, MC_ENUM_RELAX) + +_set_has_value!(d, v) = nothing """ $(TYPEDEF) Stores a general quadratic function with a buffer. """ -mutable struct NonlinearExpression{V} <: AbstractEAGOConstraint - - "List of nodes in nonlinear expression" - nd::Vector{JuMP.NodeData} - "Adjacency Matrix for the expression" - adj::SparseMatrixCSC{Bool,Int64} - const_values::Vector{Float64} - - setstorage::Vector{V} - numberstorage::Vector{Float64} - isnumber::Vector{Bool} - value::V - value_available::Bool - - tp1storage::Vector{Float64} - tp2storage::Vector{Float64} - tp3storage::Vector{Float64} - tp4storage::Vector{Float64} - tpdict::Dict{Int64,Tuple{Int64,Int64,Int64,Int64}} - - # sparsity of constraint + indices in node to reference - grad_sparsity::Vector{Int64} # indices of variables in the problem space (size = np) - reverse_sparsity::Vector{Int64} - - # role in problem - dependent_variable_count::Int64 - dependent_subexpression_count::Int64 - dependent_subexpressions::Vector{Int64} - linearity::JuMP._Derivatives.Linearity - - # buffer for subgradients - cv_grad_buffer::Vector{Float64} - cc_grad_buffer::Vector{Float64} +mutable struct NonlinearExpression{V,N,T<:RelaxTag} <: AbstractEAGOConstraint + g::DirectedTree + relax_cache::RelaxCache{V,N,T} + has_value::Bool + last_reverse::Bool + lower_bound::Float64 + upper_bound::Float64 + grad_sparsity::Vector{Int} +end +function NonlinearExpression() + g = DirectedTree() + c = RelaxCache{MC{1,NS},1,NS}() + return NonlinearExpression{MC{1,NS},1,NS}(g, c, false, false, -Inf, Inf, Int[]) +end + +relax_info(s::Relax, n::Int, t::T) where T = MC{n,T} +function NonlinearExpression!(aux_info, rtype::S, sub::Union{JuMP._SubexpressionStorage,JuMP._FunctionStorage}, + b::MOI.NLPBoundsPair, sub_sparsity::Dict{Int,Vector{Int}}, + subexpr_indx::Int, + subexpr_linearity::Vector{JuMP._Derivatives.Linearity}, + op::OperatorRegistry, parameter_values, + tag::T, use_apriori_flag::Bool; is_sub::Bool = false) where {S,T} + g = DirectedTree(aux_info, sub, op, sub_sparsity, subexpr_linearity, parameter_values, is_sub, subexpr_indx) + grad_sparsity = sparsity(g, 1) + n = length(grad_sparsity) + V = relax_info(rtype, n, tag) + c = RelaxCache{V,n,T}() + c.use_apriori_mul = use_apriori_flag + initialize!(c, g) + return NonlinearExpression{V,n,T}(g, c, false, false, b.lower, b.upper, grad_sparsity) end +@inline has_value(d::NonlinearExpression) = d.has_value +@inline dep_subexpr_count(d::NonlinearExpression) = dep_subexpr_count(d.g) +@inline set_has_value!(d::NonlinearExpression, v::Bool) = (d.has_value = v; return ) +function _set_last_reverse!(d::NonlinearExpression{V, N, T}, v::Bool) where {V,N,T<:RelaxTag} + d.last_reverse = v; + return +end +function set_variable_storage!(d::NonlinearExpression, v::VariableValues{S}) where S<:Real + d.relax_cache.ic.v = v + return +end +@inbounds sparsity(d::NonlinearExpression) = sparsity(d.g, 1) +@inbounds set(d::NonlinearExpression{V,N,T}) where {V,N,T<:RelaxTag} = set(d.relax_cache, 1) +@inbounds info(d::NonlinearExpression{V,N,T}) where {V,N,T<:RelaxTag} = info(d.relax_cache, 1) +@inbounds num(d::NonlinearExpression{V,N,T}) where {V,N,T<:RelaxTag} = num(d.relax_cache, 1) +@inbounds is_num(d::NonlinearExpression) = is_num(d.relax_cache, 1) +var_num(d::NonlinearExpression{V,N,T}) where {V,N,T<:RelaxTag} = N + +mc_type(rc::NonlinearExpression{V,N,T}) where {V,N,T<:RelaxTag} = MC{N,T} + """ $(TYPEDEF) Stores a general nonlinear function with a buffer represented by the sum of a tape and a scalar affine function. """ -mutable struct BufferedNonlinearFunction{V} <: AbstractEAGOConstraint - expr::NonlinearExpression{V} +mutable struct BufferedNonlinearFunction{V,N,T<:RelaxTag} <: AbstractEAGOConstraint + ex::NonlinearExpression{V,N,T} saf::SAF - lower_bound::Float64 - upper_bound::Float64 - last_relax_convex::Bool - last_relax_concave::Bool - last_past_reverse::Bool - has_value::Bool end - -### -### Constructor definitions -### -function NonlinearExpression!(sub::JuMP._SubexpressionStorage, sub_sparsity_dict::Dict{Int64,Vector{Int64}}, - subexpr_indx::Int64, subexpr_linearity::Vector{JuMP._Derivatives.Linearity}, - tag::T) where T - nd = copy(sub.nd) - adj = copy(sub.adj) - const_values = copy(sub.const_values) - - lenx = length(nd) - numberstorage = zeros(lenx) - isnumber = fill(false, lenx) - - # creates tiepoint storage and populates the sparsity of subexpression - # not including dependent subexpressions - tpdict = Dict{Int64,Tuple{Int64,Int64,Int64,Int64}}() - tp1_count = 0 - tp2_count = 0 - for i = 1:lenx - # tiepoint storage part - node = @inbounds nd[i] - op = node.index - if double_tp(op) - tp1_count += 1 - tp2_count += 1 - tpdict[i] = (tp1_count, tp1_count, tp2_count, tp2_count) - elseif single_tp(op) - tp1_count += 1 - tpdict[i] = (tp1_count, tp1_count, -1, -1) - end - end - tp1storage = zeros(tp1_count) - tp2storage = zeros(tp1_count) - tp3storage = zeros(tp2_count) - tp4storage = zeros(tp2_count) - - linearity = JuMP._Derivatives.classify_linearity(nd, adj, subexpr_linearity) - - # counts variables in subexpression and collects list of subexpressions - dependent_subexpressions = Int64[] - variable_dict = Dict{Int,Bool}() - for (i,node) in enumerate(nd) - if node.nodetype === JuMP._Derivatives.VARIABLE - indx = node.index - if !haskey(variable_dict, indx) - variable_dict[indx] = true - end - end - if node.nodetype === JuMP._Derivatives.VALUE - indx = node.index - numberstorage[i] = const_values[indx] - end - if node.nodetype === JuMP._Derivatives.SUBEXPRESSION - indx = node.index - push!(dependent_subexpressions, indx) - end - end - grad_sparsity = collect(keys(variable_dict)) - unique!(dependent_subexpressions) - sort!(dependent_subexpressions) - - # adds sparsity of dependent subexpressions to sparsity of current subexpression - dependent_subexpression_count = length(dependent_subexpressions) - for i = 1:dependent_subexpression_count - dependent_indx = dependent_subexpressions[i] - append!(grad_sparsity, sub_sparsity_dict[dependent_indx]) - end - unique!(grad_sparsity) - sort!(grad_sparsity) - - # sets subexpression sparsity dictionary - sub_sparsity_dict[subexpr_indx] = copy(grad_sparsity) - - reverse_sparsity_length = grad_sparsity[end] - reverse_sparsity = zeros(Int64, reverse_sparsity_length) - current_grad_sparsity = grad_sparsity[1] - current_grad_sparsity_count = 1 - for i = 1:reverse_sparsity_length - if i == current_grad_sparsity - reverse_sparsity[i] = current_grad_sparsity_count - current_grad_sparsity_count += 1 - if current_grad_sparsity_count <= length(grad_sparsity) - current_grad_sparsity = grad_sparsity[current_grad_sparsity_count] - else - break - end - end - end - - dependent_variable_count = length(grad_sparsity) - N = dependent_variable_count - - cv_grad_buffer = zeros(dependent_variable_count) - cc_grad_buffer = zeros(dependent_variable_count) - - setstorage = fill(MC{N,T}(Interval(-Inf, Inf)), lenx) - subexpression = NonlinearExpression{MC{N,T}}(nd, adj, const_values, setstorage, numberstorage, - isnumber, zero(MC{N,T}), false, - tp1storage, tp2storage, - tp3storage, tp4storage, tpdict, grad_sparsity, - reverse_sparsity, dependent_variable_count, - dependent_subexpression_count, - dependent_subexpressions, JuMP._Derivatives.CONSTANT, - cv_grad_buffer, cc_grad_buffer) - return subexpression +function BufferedNonlinearFunction() + ex = NonlinearExpression() + saf = SAF(SAT[], 0.0) + return BufferedNonlinearFunction{MC{1,NS},1,NS}(ex, saf) end -function NonlinearExpression() - return NonlinearExpression{MC{1,NS}}(JuMP.NodeData[], spzeros(Bool, 1), Float64[], - MC{1,NS}[], Float64[], Bool[], zero(MC{1,NS}), false, - Float64[], Float64[], Float64[], Float64[], - Dict{Int64,Tuple{Int64,Int64,Int64,Int64}}(), - Int64[], Int64[], 0, 0, Int64[], JuMP._Derivatives.CONSTANT, - Float64[], Float64[]) +function BufferedNonlinearFunction(aux_info, rtype::RELAX_ATTRIBUTE, f::JuMP._FunctionStorage, b::MOI.NLPBoundsPair, + sub_sparsity::Dict{Int,Vector{Int}}, + subexpr_lin::Vector{JuMP._Derivatives.Linearity}, + op::OperatorRegistry, parameter_values, + tag::T, use_apriori_flag::Bool) where T <: RelaxTag + + ex = NonlinearExpression!(aux_info, rtype, f, b, sub_sparsity, -1, subexpr_lin, op, parameter_values, tag, use_apriori_flag) + n = length(sparsity(ex.g, 1)) + saf = SAF(SAT[SAT(0.0, VI(i)) for i = 1:n], 0.0) + V = relax_info(rtype, n, tag) + return BufferedNonlinearFunction{V,n,T}(ex, saf) end -function BufferedNonlinearFunction(func::JuMP._FunctionStorage, bnds::MOI.NLPBoundsPair, - sub_sparsity_dict::Dict{Int64,Vector{Int64}}, - subexpr_linearity::Vector{JuMP._Derivatives.Linearity}, - tag::T) where T <: RelaxTag - - nd = copy(func.nd) - adj = copy(func.adj) - const_values = copy(func.const_values) - - # sorted by JuMP, _FunctionStorage but amalagation of the sparsity of the - # function and the nonlinear expressions is not necessarily - # so we - grad_sparsity = copy(func.grad_sparsity) - for nd in func.nd - if nd.nodetype === JuMP._Derivatives.SUBEXPRESSION - append!(grad_sparsity, sub_sparsity_dict[nd.index]) - end - end - unique!(grad_sparsity) - sort!(grad_sparsity) - - reverse_sparsity_length = grad_sparsity[end] - reverse_sparsity = zeros(Int64, reverse_sparsity_length) - current_grad_sparsity = grad_sparsity[1] - current_grad_sparsity_count = 1 - for i = 1:reverse_sparsity_length - if i == current_grad_sparsity - reverse_sparsity[i] = current_grad_sparsity_count - current_grad_sparsity_count += 1 - if current_grad_sparsity_count <= length(grad_sparsity) - current_grad_sparsity = grad_sparsity[current_grad_sparsity_count] - else +function expand_sv!(out::Vector{Float64}, n::Int, m::Int, vs::Vector{Int}, gs::Vector{Int}, x::SVector{N,T}) where {N,T} + k = 1 + for q = 1:m + i = @inbounds vs[q] + for j = k:n + if i == @inbounds gs[j] + @inbounds out[j] = x[q] + k = j break end end end - - N = length(grad_sparsity) - dependent_variable_count = length(grad_sparsity) - - lenx = length(nd) - setstorage = fill(MC{N,T}(Interval(-Inf, Inf)), lenx) - numberstorage = zeros(lenx) - isnumber = fill(false, lenx) - - tpdict = Dict{Int64,Tuple{Int64,Int64,Int64,Int64}}() - tp1_count = 0 - tp2_count = 0 - for i = 1:lenx - node = @inbounds nd[i] - op = node.index - if double_tp(op) - tp1_count += 1 - tp2_count += 1 - tpdict[i] = (tp1_count, tp1_count, tp2_count, tp2_count) - elseif single_tp(op) - tp1_count += 1 - tpdict[i] = (tp1_count, tp1_count, -1, -1) - end - end - - for (i,node) in enumerate(nd) - if node.nodetype === JuMP._Derivatives.VALUE - indx = node.index - numberstorage[i] = const_values[indx] + nothing +end +function _load_subexprs!(d::RelaxCache{V,N,T}, g, subexpressions, dep_subexprs) where {V,N,T<:RelaxTag} + gs = sparsity(g, 1) + for (i,ds) in enumerate(dep_subexprs) + s = subexpressions[ds] + if is_num(s) + store_subexpression_num!(d, num(s), i) + else + vs = sparsity(s) + v = set(s) + m = var_num(s) + expand_sv!(d._cv_grad_buffer, N, m, vs, gs, v.cv_grad) + expand_sv!(d._cc_grad_buffer, N, m, vs, gs, v.cc_grad) + cvg = SVector{N,Float64}(d._cv_grad_buffer) + ccg = SVector{N,Float64}(d._cc_grad_buffer) + store_subexpression_set!(d, MC{N,T}(v.cv, v.cc, v.Intv, cvg, ccg, false), i) end end - - tp1storage = zeros(tp1_count) - tp2storage = zeros(tp1_count) - tp3storage = zeros(tp2_count) - tp4storage = zeros(tp2_count) - - dependent_subexpressions = copy(func.dependent_subexpressions) - dependent_subexpression_count = length(dependent_subexpressions) - - linearity = JuMP._Derivatives.classify_linearity(nd, adj, subexpr_linearity) - - cv_grad_buffer = zeros(dependent_variable_count) - cc_grad_buffer = zeros(dependent_variable_count) - - expression = NonlinearExpression{MC{N,T}}(nd, adj, const_values, setstorage, numberstorage, - isnumber, zero(MC{N,T}), false, - tp1storage, tp2storage, tp3storage, tp4storage, - tpdict, grad_sparsity, reverse_sparsity, - dependent_variable_count, dependent_subexpression_count, - dependent_subexpressions, JuMP._Derivatives.CONSTANT, - cv_grad_buffer, cc_grad_buffer) - - saf = SAF(SAT[SAT(0.0, VI(i)) for i = 1:length(grad_sparsity)], 0.0) - - lower_bound = bnds.lower - upper_bound = bnds.upper - - last_relax_convex = false - last_relax_concave = false - last_past_reverse = false - has_value = false - - return BufferedNonlinearFunction{MC{N,T}}(expression, saf, lower_bound, upper_bound, - last_relax_convex, last_relax_concave, - last_past_reverse, has_value) + return end -function BufferedNonlinearFunction() - return BufferedNonlinearFunction{MC{1,NS}}(NonlinearExpression(), SAF(SAT[], 0.0), - -Inf, Inf, false, false, false, false) +@inline _set_last_reverse!(d::BufferedNonlinearFunction{V,N,T}, v::Bool) where {V,N,T<:RelaxTag} = _set_last_reverse!(d.ex, v) +function set_variable_storage!(d::BufferedNonlinearFunction{V,N,T}, v::VariableValues{Float64}) where {V,N,T<:RelaxTag} + set_variable_storage!(d.ex, v) end -function set_intersect_value!(expr::NonlinearExpression{V}, value) where V - if !expr.isnumber[1] - expr.value = expr.setstorage[1] ∩ value - expr.setstorage[1] = expr.value - end +has_value(d::BufferedNonlinearFunction) = has_value(d.ex) +dep_subexpr_count(d::BufferedNonlinearFunction) = dep_subexpr_count(d.ex) +set_has_value!(d::BufferedNonlinearFunction, v::Bool) = set_has_value!(d.ex, v) +sparsity(d::BufferedNonlinearFunction) = sparsity(d.ex) +set(d::BufferedNonlinearFunction{V,N,T}) where {V,N,T<:RelaxTag} = set(d.ex) +num(d::BufferedNonlinearFunction{V,N,T}) where {V,N,T<:RelaxTag} = num(d.ex) +lower_bound(d::BufferedNonlinearFunction{V,N,T}) where {V,N,T<:RelaxTag} = d.ex.lower_bound +upper_bound(d::BufferedNonlinearFunction{V,N,T}) where {V,N,T<:RelaxTag} = d.ex.upper_bound - return nothing -end +# returns the interval bounds associated with the set +interval(d::BufferedNonlinearFunction{V,N,T}) where {V,N,T<:RelaxTag} = Interval{Float64}(set(d)) +is_num(d::BufferedNonlinearFunction) = is_num(d.ex) -function set_node_flag!(f::BufferedNonlinearFunction{V}) where V - f.last_relax_convex = false - f.last_relax_concave = false +mc_type(rc::BufferedNonlinearFunction{V,N,T}) where {V,N,T<:RelaxTag} = MC{N,T} - return nothing -end -### -### Defines evaluator storage structure -### """ Evaluator @@ -328,105 +175,70 @@ Checks that the resulting value should be a number... $(TYPEDFIELDS) """ Base.@kwdef mutable struct Evaluator <: MOI.AbstractNLPEvaluator - - user_operators::JuMP._Derivatives.UserOperatorRegistry = JuMP._Derivatives.UserOperatorRegistry() + user_operators::OperatorRegistry = OperatorRegistry() has_user_mv_operator::Bool = false num_mv_buffer::Vector{Float64} = Float64[] parameter_values::Vector{Float64} = Float64[] - - current_node::NodeBB = NodeBB() - treat_x_as_number = Bool[] - lower_variable_bounds::Vector{Float64} = Float64[] - upper_variable_bounds::Vector{Float64} = Float64[] - x::Vector{Float64} = Float64[] - node_to_variable_map::Vector{Int64} = Int64[] - variable_to_node_map::Vector{Int64} = Int64[] - - variable_count::Int64 = 0 - node_count::Int64 = 0 - - "Context used to guard against domain violations & branch on these violations if necessary" + node::NodeBB = NodeBB() + variable_values::VariableValues{Float64} = VariableValues{Float64}() subgrad_tighten::Bool = false reverse_subgrad_tighten::Bool = false - ctx::GuardCtx = GuardCtx() - subexpressions::Vector{NonlinearExpression} = NonlinearExpression[] - subexpressions_eval::Vector{Bool} = Bool[] - + subexpressions_eval::Vector{Bool} = Bool[] is_post::Bool = false is_intersect::Bool = false is_first_eval::Bool = false interval_intersect::Bool = false - subgrad_tol::Float64 = 1E-10 + relax_type::RelaxType = STD_RELAX + pass_number::Int = 0 end +set_variable_values!(d::Evaluator, v) = d.variable_values = v """ $(FUNCTIONNAME) Sets the current node in the Evaluator structure. """ -function set_node!(evaluator::Evaluator, n::NodeBB) - - evaluator.current_node = NodeBB(n) - node_to_variable_map = evaluator.node_to_variable_map - node_lower_bounds = n.lower_variable_bounds - node_upper_bounds = n.upper_variable_bounds - eval_lower_bounds = evaluator.lower_variable_bounds - eval_upper_bounds = evaluator.upper_variable_bounds - - for i = 1:length(evaluator.current_node) - full_variable_index = node_to_variable_map[i] - eval_lower_bounds[full_variable_index] = node_lower_bounds[i] - eval_upper_bounds[full_variable_index] = node_upper_bounds[i] +function set_node!(d::Evaluator, n::NodeBB) + d.node = NodeBB(n) + for i = 1:length(n) + vi = d.variable_values.node_to_variable_map[i] + d.variable_values.lower_variable_bounds[vi] = n.lower_variable_bounds[i] + d.variable_values.upper_variable_bounds[vi] = n.upper_variable_bounds[i] end - fill!(evaluator.subexpressions_eval, false) - evaluator.is_first_eval = true - - #@show node_lower_bounds - #@show node_upper_bounds - #@show eval_lower_bounds - #@show eval_upper_bounds - + fill!(d.subexpressions_eval, false) + d.is_first_eval = true return nothing end function retrieve_node(d::Evaluator) - cn = d.current_node - node_to_variable_map = d.node_to_variable_map - - return NodeBB(copy(d.lower_variable_bounds[node_to_variable_map]), - copy(d.upper_variable_bounds[node_to_variable_map]), - cn.lower_bound, cn.upper_bound, cn.depth, cn.id) + n = d.node + nv_map = d.variable_values.node_to_variable_map + return NodeBB(copy(d.variable_values.lower_variable_bounds[nv_map]), + copy(d.variable_values.upper_variable_bounds[nv_map]), + copy(n.is_integer), n.continuous, + n.lower_bound, n.upper_bound, n.depth, n.cont_depth, n.id, + n.branch_direction, n.last_branch, n.branch_extent) end - -function retrieve_x!(out::Vector{Float64}, d::Evaluator) - x = d.x - node_to_variable_map = d.node_to_variable_map - for i in 1:length(node_to_variable_map) - vindx = node_to_variable_map[i] - out[i] = x[vindx] - end - - return nothing +@inline function _get_x!(::Type{BranchVar}, out::Vector{Float64}, d::Evaluator) + return _get_x!(BranchVar, out, d.variable_values) end - -# Returns false if subexpression has been evaluated at current reference point -prior_eval(d::Evaluator, i::Int64) = d.subexpressions_eval[i] +prior_eval(d::Evaluator, i::Int) = d.subexpressions_eval[i] #= Assumes the sparsities are sorted... =# -function copy_subexpression_value!(k::Int, op::Int, subexpression::NonlinearExpression{MC{N1,T}}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N2,T}}, - cv_buffer::Vector{Float64}, cc_buffer::Vector{Float64}, - func_sparsity::Vector{Int64}) where {N1, N2, T <: RelaxTag} +function copy_subexpression_value!(k::Int, op::Int, subexpression::NonlinearExpression{V,MC{N1,T}}, + numvalued::Vector{Bool}, numberstorage::Vector{S}, setstorage::Vector{MC{N2,T}}, + cv_buffer::Vector{S}, cc_buffer::Vector{S}, + func_sparsity::Vector{Int}) where {V, N1, N2, S, T <: RelaxTag} # fill cv_grad/cc_grad buffers sub_sparsity = subexpression.grad_sparsity sset = subexpression.setstorage[1] - fill!(cv_buffer, 0.0) - fill!(cc_buffer, 0.0) + fill!(cv_buffer, zero(S)) + fill!(cc_buffer, zero(S)) sub_sparsity_count = 1 subs_index = @inbounds sub_sparsity[1] @@ -448,12 +260,11 @@ function copy_subexpression_value!(k::Int, op::Int, subexpression::NonlinearExpr return nothing end -function eliminate_fixed_variables!(f::NonlinearExpression{V}, v::Vector{VariableInfo}) where V +function eliminate_fixed_variables!(f::NonlinearExpression{V,N,T}, v::Vector{VariableInfo}) where {V,N,T<:RelaxTag} num_constants = length(f.const_values) indx_to_const_loc = Dict{Int,Int}() for i = 1:length(expr.nd) nd = @inbounds expr.nd[i] - # Assumes MOI Variable have been eliminated previously... if nd.nodetype === JuMP._Derivatives.VARIABLE indx = nd.index if v[indx].is_fixed @@ -470,13 +281,57 @@ function eliminate_fixed_variables!(f::NonlinearExpression{V}, v::Vector{Variabl end end end - return nothing end -function eliminate_fixed_variables!(f::BufferedNonlinearFunction{V}, v::Vector{VariableInfo}) where V - eliminate_fixed_variables!(f.expr, v) +eliminate_fixed_variables!(f::BufferedNonlinearFunction{N,T}, v::Vector{VariableInfo}) where {N,T<:RelaxTag} = eliminate_fixed_variables!(f.ex, v) +f_init_prop!(t, g::DAT, c::RelaxCache, flag::Bool) = flag ? f_init!(t, g, c) : fprop!(t, g, c) +function forward_pass!(z::Evaluator, d::NonlinearExpression{V,N,T}) where {V,N,T<:RelaxTag} + #println("ran forward pass") + b = d.relax_cache + update_box_and_pnt!(b.ic.v, z.variable_values, z.is_first_eval) + if b.use_apriori_mul + s = sparsity(d) + v = b.ic.v + x = v.x + for j in s + if isone(z.pass_number) + v.x0[j] = x[j] + end + end + x0 = v.x0 + isempty(b.dp) && (b.dp = zeros(length(x));) + isempty(b.dP) && (b.dP = zeros(Interval{Float64}, length(x));) + isempty(b.p_rel) && (b.p_rel = zeros(length(x));) + isempty(b.p_diam) && (b.p_diam = zeros(length(x));) + for j in s + l = lbd(b, j) + u = ubd(b, j) + b.dp[j] = x[j] - x0[j] + b.dP[j] = Interval(l, u) - x0[j] + b.p_rel[j] = (x[j] - 0.5*(u + l))/(0.5*(u - l)) + b.p_diam[j] = u - l + end + end + for i = 1:dep_subexpr_count(d) + j = d.g.dependent_subexpressions[i] + forward_pass!(z, z.subexpressions[j]) + end + _load_subexprs!(d.relax_cache, d.g, z.subexpressions, d.g.dependent_subexpressions) + (z.relax_type == STD_RELAX) && (return f_init_prop!(Relax(), d.g, d.relax_cache, z.is_first_eval)) + (z.relax_type == MC_AFF_RELAX) && (return f_init_prop!(RelaxAA(d.grad_sparsity), d.g, d.relax_cache, z.is_first_eval)) + return f_init_prop!(RelaxMulEnum(d.grad_sparsity), d.g, d.relax_cache, z.is_first_eval) +end + +function forward_pass!(x::Evaluator, d::BufferedNonlinearFunction{V,N,T}) where {V,N,T<:RelaxTag} + forward_pass!(x, d.ex) + _set_has_value!(d, true) + _set_last_reverse!(d, false) + return end -include("forward_pass.jl") -include("reverse_pass.jl") +rprop!(::Relax, x::Evaluator, d::NonlinearExpression{V,N,T}) where {V,N,T<:RelaxTag} = rprop!(Relax(), d.g, d.relax_cache) +function rprop!(::Relax, x::Evaluator, d::BufferedNonlinearFunction{V,N,T}) where {V,N,T<:RelaxTag} + _set_last_reverse!(d, true) + return rprop!(Relax(), x, d.ex) +end diff --git a/src/eago_optimizer/functions/nonlinear/register_special.jl b/src/eago_optimizer/functions/nonlinear/register_special.jl index 798ddde6..4ad15706 100644 --- a/src/eago_optimizer/functions/nonlinear/register_special.jl +++ b/src/eago_optimizer/functions/nonlinear/register_special.jl @@ -29,8 +29,9 @@ function register_eago_operators!(m::JuMP.Model) JuMP.register(m, :bisigmoid, 1, bisigmoid, McCormick.bisigmoid_deriv, McCormick.bisigmoid_deriv2) JuMP.register(m, :softsign, 1, softsign, McCormick.softsign_deriv, McCormick.softsign_deriv2) JuMP.register(m, :gelu, 1, gelu, McCormick.gelu_deriv, McCormick.gelu_deriv2) - JuMP.register(m, :swish1, 1, swish1, McCormick.swish1_deriv, McCormick.swish1_deriv2) + JuMP.register(m, :swish, 1, swish, McCormick.swish_deriv, McCormick.swish_deriv2) JuMP.register(m, :xabsx, 1, xabsx, McCormick.xabsx_deriv, McCormick.xabsx_deriv2) + JuMP.register(m, :logcosh, 1, xabsx, McCormick.xabsx_deriv, McCormick.xabsx_deriv2) # register activatio functions w/ parameters d_param_relu = JuMP._UserFunctionEvaluator(x -> param_relu(x...), diff --git a/src/eago_optimizer/functions/nonlinear/reverse_pass.jl b/src/eago_optimizer/functions/nonlinear/reverse_pass.jl deleted file mode 100644 index 22620059..00000000 --- a/src/eago_optimizer/functions/nonlinear/reverse_pass.jl +++ /dev/null @@ -1,814 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/functions/nonlinear/reverse_pass.jl -# Functions used to compute reverse pass of nonlinear functions. -############################################################################# - -# maximum number to perform reverse operation on associative term by summing -# and evaluating pairs remaining terms not reversed -const MAX_ASSOCIATIVE_REVERSE = 6 -const REVERSE_DEBUG = false - -""" -$(FUNCTIONNAME) - -Updates storage tapes with reverse evalution of node representing `n = x + y` which updates x & y. -""" -function reverse_plus_binary!(k::Int64, children_arr::Vector{Int64}, children_idx::UnitRange{Int64}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N,T}}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, sparsity::Vector{Int}, - subgrad_tol::Float64, is_post::Bool) where {N, T<:RelaxTag} - - REVERSE_DEBUG && println("--- start reverse plus binary ---") - - # extract values for k - argk_is_number = numvalued[k] - if !argk_is_number - setk = setstorage[k] - end - - # get row indices - idx1 = first(children_idx) - idx2 = last(children_idx) - - # extract values for argument 1 - arg1_index = children_arr[idx1] - arg1_is_number = numvalued[arg1_index] - if arg1_is_number - set1 = zero(MC{N,T}) - num1 = numberstorage[arg1_index] - else - num1 = 0.0 - set1 = setstorage[arg1_index] - end - - # extract values for argument 2 - arg2_index = children_arr[idx2] - arg2_is_number = numvalued[arg2_index] - if arg2_is_number - num2 = numberstorage[arg2_index] - set2 = zero(MC{N,T}) - else - set2 = setstorage[arg2_index] - num2 = 0.0 - end - - if !arg1_is_number && arg2_is_number - c, a, b = IntervalContractors.plus_rev(setk.Intv, set1.Intv, num2) - - elseif arg1_is_number && !arg2_is_number - c, a, b = IntervalContractors.plus_rev(setk.Intv, num1, set2.Intv) - - else - c, a, b = IntervalContractors.plus_rev(setk.Intv, set1.Intv, set2.Intv) - end - - #= - if isnan(a) || isnan(b) - if !arg1_is_number && arg2_is_number - c, a, b = plus_rev(setk.Intv, MC{N,T}(set1.Intv), num2) - - elseif arg1_is_number && !arg2_is_number - c, a, b = plus_rev(MC{N,T}(setk.Intv), num1, MC{N,T}(set2.Intv)) - - else - c, a, b = plus_rev(MC{N,T}(setk.Intv), MC{N,T}(set1.Intv), MC{N,T}(set2.Intv)) - end - end - =# - - if REVERSE_DEBUG - println("val out = $(c)") - println("arg1 out = $(a)") - println("arg2 out = $(b)") - end - - if !arg1_is_number - if isempty(a) - return false - else - setstorage[arg1_index] = MC{N,T}(a) #set1 ∩ a - end - REVERSE_DEBUG && println("setstorage[arg1_index] = $(setstorage[arg1_index])") - end - - if !arg2_is_number - if isempty(b) - return false - else - setstorage[arg2_index] = MC{N,T}(b) # set2 ∩ b - end - REVERSE_DEBUG && println("setstorage[arg2_index] = $(setstorage[arg2_index])") - end - - return true -end - -""" -$(FUNCTIONNAME) - -Updates storage tapes with reverse evalution of node representing `n = +(x,y,z...)` which updates x, y, z and so on. -""" -function reverse_plus_narity!(k::Int64, children_arr::Vector{Int64}, children_idx::UnitRange{Int64}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N,T}}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, sparsity::Vector{Int}, - subgrad_tol::Float64, is_post::Bool) where {N, T<:RelaxTag} - - REVERSE_DEBUG && println("--- start reverse plus narity ---") - - continue_flag = true - argk_is_number = numvalued[k] - if !argk_is_number - setk = setstorage[k] - end - - # out loops makes a temporary sum (minus one argument) - # a reverse is then compute with respect to this argument - active_count_number = 0 - for idx in children_idx - active_idx = children_arr[idx] - - # don't contract a number valued argument - active_arg_is_number = numvalued[active_idx] - active_arg_is_number && continue - - if active_count_number >= MAX_ASSOCIATIVE_REVERSE - break - end - - tmp_sum = zero(MC{N,T}) - active_count_number += 1 - for nidx in children_idx - inactive_idx = children_arr[nidx] - if inactive_idx != active_idx - if numvalued[inactive_idx] - tmp_sum += numberstorage[inactive_idx] - else - tmp_sum += setstorage[inactive_idx] - end - end - end - - active_set = setstorage[active_idx] - c, a, b = IntervalContractors.plus_rev(setk.Intv, active_set.Intv, tmp_sum.Intv) - - if REVERSE_DEBUG - println("val out = $(c)") - println("arg1 out = $(a)") - println("arg2 out = $(b)") - end - - if isempty(a) - return false - else - setstorage[active_idx] = MC{N,T}(a) # setk ∩ a - end - end - - return true -end - -""" -$(FUNCTIONNAME) - -Updates storage tapes with reverse evalution of node representing `n = x * y` which updates x & y. -""" -function reverse_multiply_binary!(k::Int64, children_arr::Vector{Int64}, children_idx::UnitRange{Int64}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N,T}}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, sparsity::Vector{Int}, - subgrad_tol::Float64, is_post::Bool) where {N, T<:RelaxTag} - - REVERSE_DEBUG && println(" is_post = $is_post") - - REVERSE_DEBUG && println("--- start reverse mult binary ---") - - # extract values for k - argk_is_number = numvalued[k] - if !argk_is_number - setk = setstorage[k] - REVERSE_DEBUG && println("setk = $setk") - end - - # get row indices - idx1 = first(children_idx) - idx2 = last(children_idx) - - # extract values for argument 1 - arg1_index = children_arr[idx1] - arg1_is_number = numvalued[arg1_index] - if arg1_is_number - set1 = zero(MC{N,T}) - num1 = numberstorage[arg1_index] - else - num1 = 0.0 - set1 = setstorage[arg1_index] - end - - # extract values for argument 2 - arg2_index = children_arr[idx2] - arg2_is_number = numvalued[arg2_index] - if arg2_is_number - num2 = numberstorage[arg2_index] - set2 = zero(MC{N,T}) - else - set2 = setstorage[arg2_index] - num2 = 0.0 - end - - if !arg1_is_number && arg2_is_number - c, a, b = IntervalContractors.mul_rev(setk.Intv, set1.Intv, num2) - - elseif arg1_is_number && !arg2_is_number - c, a, b = IntervalContractors.mul_rev(setk.Intv, num1, set2.Intv) - - else - c, a, b = IntervalContractors.mul_rev(setk.Intv, set1.Intv, set2.Intv) - end - - #= - if isnan(a) || isnan(b) - if !arg1_is_number && arg2_is_number - c, a, b = mult_rev(MC{N,T}(setk.Intv), MC{N,T}(set1.Intv), num2) - - elseif arg1_is_number && !arg2_is_number - c, a, b = mult_rev(MC{N,T}(setk.Intv), num1, MC{N,T}(set2.Intv)) - - else - c, a, b = mult_rev(MC{N,T}(setk.Intv), MC{N,T}(set1.Intv), MC{N,T}(set2.Intv)) - end - end - =# - - if REVERSE_DEBUG - println("val out = $(c)") - println("arg1 out = $(a)") - println("arg2 out = $(b)") - end - - if !arg1_is_number - if isempty(a) - return false - else - setstorage[arg1_index] = MC{N,T}(a) #set1 ∩ a - end - REVERSE_DEBUG && println("setstorage[arg1_index] = $(setstorage[arg1_index])") - end - - if !arg2_is_number - if isempty(b) - return false - else - setstorage[arg2_index] = MC{N,T}(b) #set2 ∩ b - end - REVERSE_DEBUG && println("setstorage[arg2_index] = $(setstorage[arg2_index])") - end - - return true -end - -""" -$(FUNCTIONNAME) - -Updates storage tapes with reverse evalution of node representing `n = *(x,y,z...)` which updates x, y, z and so on. -""" -function reverse_multiply_narity!(k::Int64, children_arr::Vector{Int64}, children_idx::UnitRange{Int64}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N,T}}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, sparsity::Vector{Int}, - subgrad_tol::Float64, is_post::Bool) where {N, T<:RelaxTag} - - REVERSE_DEBUG && println("--- start reverse mult narity ---") - continue_flag = true - argk_is_number = numvalued[k] - if !argk_is_number - setk = setstorage[k] - end - - # out loops makes a temporary sum (minus one argument) - # a reverse is then compute with respect to this argument - active_count_number = 0 - for idx in children_idx - active_idx = children_arr[idx] - - # don't contract a number valued argument - active_arg_is_number = numvalued[active_idx] - active_arg_is_number && continue - - if active_count_number >= MAX_ASSOCIATIVE_REVERSE - break - end - - tmp_mul = one(MC{N,T}) - active_count_number += 1 - for nidx in children_idx - inactive_idx = children_arr[nidx] - if inactive_idx != active_idx - if numvalued[inactive_idx] - tmp_mul *= numberstorage[inactive_idx] - else - tmp_mul *= setstorage[inactive_idx] - end - end - end - - active_set = setstorage[active_idx] - c, a, b = IntervalContractors.mul_rev(setk.Intv, active_set.Intv, tmp_mul.Intv) - - if REVERSE_DEBUG - println("val out = $(c)") - println("arg1 out = $(a)") - println("arg2 out = $(b)") - end - - if isempty(a) - return false - else - setstorage[active_idx] = MC{N,T}(a) #setstorage[active_idx] ∩ a - end - end - - return true -end - -function reverse_minus!(k::Int64, children_arr::Vector{Int64}, children_idx::UnitRange{Int64}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N,T}}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, sparsity::Vector{Int}, - subgrad_tol::Float64, is_post::Bool) where {N, T<:RelaxTag} - - REVERSE_DEBUG && println("--- start reverse minus ---") - - #argk_index = children_arr[k] - argk_is_number = numvalued[k] - if !argk_is_number - setk = setstorage[k] - end - - # don't perform a reverse pass if the output was a number - if argk_is_number - return true - end - - # get row indices - idx1 = first(children_idx) - idx2 = last(children_idx) - - # extract values for argument 1 - arg1_index = children_arr[idx1] - arg1_is_number = numvalued[arg1_index] - if arg1_is_number - set1 = zero(MC{N,T}) - num1 = numberstorage[arg1_index] - else - num1 = 0.0 - set1 = setstorage[arg1_index] - end - - # extract values for argument 2 - arg2_index = children_arr[idx2] - arg2_is_number = numvalued[arg2_index] - if arg2_is_number - num2 = numberstorage[arg2_index] - set2 = zero(MC{N,T}) - else - set2 = setstorage[arg2_index] - num2 = 0.0 - end - - if !arg1_is_number && arg2_is_number - #c, a, b = minus_rev(setk, set1, num2) - c, a, b = IntervalContractors.minus_rev(setk.Intv, set1.Intv, num2) - - elseif arg1_is_number && !arg2_is_number - #c, a, b = minus_rev(setk, num1, set2) - c, a, b = IntervalContractors.minus_rev(setk.Intv, num1, set2.Intv) - - else - #c, a, b = minus_rev(setk, set1, set2) - c, a, b = IntervalContractors.minus_rev(setk.Intv, set1.Intv, set2.Intv) - end - - #= - if isnan(a) || isnan(b) - if !arg1_is_number && arg2_is_number - c, a, b = minus_rev(MC{N,T}(setk.Intv), MC{N,T}(set1.Intv), num2) - - elseif arg1_is_number && !arg2_is_number - c, a, b = minus_rev(MC{N,T}(setk.Intv), num1, MC{N,T}(set2.Intv)) - - else - c, a, b = minus_rev(MC{N,T}(setk.Intv), MC{N,T}(set1.Intv), MC{N,T}(set2.Intv)) - end - end - =# - - if REVERSE_DEBUG - println("val out = $(c)") - println("arg1 out = $(a)") - println("arg2 out = $(b)") - end - - if !arg1_is_number - if isempty(a) - return false - else - setstorage[arg1_index] = MC{N,T}(a) #set1 ∩ a - end - REVERSE_DEBUG && println("setstorage[arg1_index] = $(setstorage[arg1_index])") - end - - if !arg2_is_number - if isempty(b) - return false - else - setstorage[arg2_index] = MC{N,T}(b) #set2 ∩ b - end - REVERSE_DEBUG && println("setstorage[arg2_index] = $(setstorage[arg2_index])") - end - - return true -end - -function reverse_power!(k::Int64, children_arr::Vector{Int64}, children_idx::UnitRange{Int64}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N,T}}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, sparsity::Vector{Int}, - subgrad_tol::Float64, is_post::Bool) where {N, T<:RelaxTag} - - REVERSE_DEBUG && println("--- start reverse power ---") - # extract values for k - argk_is_number = numvalued[k] - if !argk_is_number - setk = setstorage[k] - end - - # don't perform a reverse pass if the output was a number - if argk_is_number - return true - end - - # get row indices - idx1 = first(children_idx) - idx2 = last(children_idx) - - # extract values for argument 1 - arg1_index = children_arr[idx1] - arg1_is_number = numvalued[arg1_index] - if arg1_is_number - set1 = zero(MC{N,T}) - num1 = numberstorage[arg1_index] - else - num1 = 0.0 - set1 = setstorage[arg1_index] - end - - # extract values for argument 2 - arg2_index = children_arr[idx2] - arg2_is_number = numvalued[arg2_index] - if arg2_is_number - num2 = numberstorage[arg2_index] - set2 = zero(MC{N,T}) - else - set2 = setstorage[arg2_index] - num2 = 0.0 - end - - if !arg1_is_number && arg2_is_number - #c, a, b = power_rev(setk, set1, num2) - c, a, b = IntervalContractors.power_rev(setk.Intv, set1.Intv, num2) - - elseif arg1_is_number && !arg2_is_number - #c, a, b = power_rev(setk, num1, set2) - c, a, b = IntervalContractors.power_rev(setk.Intv, num1, set2.Intv) - - else - #c, a, b = power_rev(setk, set1, set2) - c, a, b = IntervalContractors.power_rev(setk.Intv, set1.Intv, set2.Intv) - end - - #= - if isnan(a) || isnan(b) - if !arg1_is_number && arg2_is_number - c, a, b = power_rev(MC{N,T}(setk.Intv), MC{N,T}(set1.Intv), num2) - - elseif arg1_is_number && !arg2_is_number - c, a, b = power_rev(MC{N,T}(setk.Intv), num1, MC{N,T}(set2.Intv)) - - else - c, a, b = power_rev(MC{N,T}(setk.Intv), MC{N,T}(set1.Intv), MC{N,T}(set2.Intv)) - end - end - =# - - if REVERSE_DEBUG - println("val out = $(c)") - println("arg1 out = $(a)") - println("arg2 out = $(b)") - end - - if !arg1_is_number - if isempty(a) - return false - else - setstorage[arg1_index] = MC{N,T}(a) #setstorage[arg1_index] ∩ a - end - end - - if !arg2_is_number - if isempty(b) - return false - else - setstorage[arg2_index] = MC{N,T}(b) #setstorage[arg2_index] ∩ b - end - end - - return true -end - -function reverse_divide!(k::Int64, children_arr::Vector{Int64}, children_idx::UnitRange{Int64}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, setstorage::Vector{MC{N,T}}, - x::Vector{Float64}, lbd::Vector{Float64}, ubd::Vector{Float64}, sparsity::Vector{Int}, - subgrad_tol::Float64, is_post::Bool) where {N, T<:RelaxTag} - - REVERSE_DEBUG && println("--- start reverse divide ---") - - # extract values for k - argk_is_number = numvalued[k] - if !argk_is_number - setk = setstorage[k] - end - - # don't perform a reverse pass if the output was a number - if argk_is_number - return true - end - - # get row indices - idx1 = first(children_idx) - idx2 = last(children_idx) - - # extract values for argument 1 - arg1_index = children_arr[idx1] - arg1_is_number = numvalued[arg1_index] - if arg1_is_number - set1 = zero(MC{N,T}) - num1 = numberstorage[arg1_index] - else - num1 = 0.0 - set1 = setstorage[arg1_index] - end - - # extract values for argument 2 - arg2_index = children_arr[idx2] - arg2_is_number = numvalued[arg2_index] - if arg2_is_number - num2 = numberstorage[arg2_index] - set2 = zero(MC{N,T}) - else - set2 = setstorage[arg2_index] - num2 = 0.0 - end - - if !arg1_is_number && arg2_is_number - c, a, b = IntervalContractors.div_rev(setk.Intv, set1.Intv, num2) - - elseif arg1_is_number && !arg2_is_number - c, a, b = IntervalContractors.div_rev(setk.Intv, num1, set2.Intv) - - else - c, a, b = IntervalContractors.div_rev(setk.Intv, set1.Intv, set2.Intv) - end - - #= - if isnan(a) || isnan(b) - if !arg1_is_number && arg2_is_number - c, a, b = div_rev(MC{N,T}(setk.Intv), MC{N,T}(set1.Intv), num2) - - elseif arg1_is_number && !arg2_is_number - c, a, b = div_rev(MC{N,T}(setk.Intv), num1, MC{N,T}(set2.Intv)) - - else - c, a, b = div_rev(MC{N,T}(setk.Intv), MC{N,T}(set1.Intv), MC{N,T}(set2.Intv)) - end - end - =# - - if REVERSE_DEBUG - println("val out = $(c)") - println("arg1 out = $(a)") - println("arg2 out = $(b)") - end - - if !arg1_is_number - if isempty(a) - return false - else - setstorage[arg1_index] = MC{N,T}(a) #setstorage[arg1_index] ∩ a - end - end - - if !arg2_is_number - if isempty(b) - return false - else - setstorage[arg2_index] = MC{N,T}(b) #setstorage[arg2_index] ∩ b - end - end - - return true -end - -function reverse_univariate!(k::Int64, op::Int64, arg_indx::Int64, setstorage::Vector{MC{N,T}}, x::Vector{Float64}, - lbd::Vector{Float64}, ubd::Vector{Float64}, sparsity::Vector{Int}, - subgrad_tol::Float64, is_post::Bool) where {N, T<:RelaxTag} - - REVERSE_DEBUG && println("--- start reverse reverse_univariate ---") - valset = setstorage[k] - argset = setstorage[arg_indx] - - REVERSE_DEBUG && println("val set = $(valset)") - REVERSE_DEBUG && println("arg set = $(argset)") - - #a, b = eval_univariate_set_reverse(op, valset, argset) - - a, b = eval_univariate_set_reverse(op, valset.Intv, argset.Intv) - - REVERSE_DEBUG && println("val out = $(a)") - REVERSE_DEBUG && println("arg out = $(b)") - - if isempty(b) - return false - else - setstorage[arg_indx] = MC{N,T}(b) #setstorage[arg_indx] ∩ b - end - - return true -end - -function reverse_set_subexpression!(k::Int64, op::Int64, subexpressions::Vector{NonlinearExpression}, - numvalued::Vector{Bool}, numberstorage::Vector{Float64}, - setstorage::Vector{MC{N,T}}, cv_buffer::Vector{Float64}, - cc_buffer::Vector{Float64}, func_sparsity::Vector{Int64}) where {N, T<:RelaxTag} - - subexpression = subexpressions[op] - isa_number = subexpression.is_number[1] - numvalued[k] = isa_number - - return nothing -end - -""" -Performs a reverse McCormick/interval pass. If a NaN value is computed for the McCormick relaxation then the -routine defaults to the interval value instead. - -There is a tacit assumption here that an abstract tree structure is used for propagation. If a cse structure is -eventually used then each reverse_xxx function will need to keep track of each variables state. -""" -function reverse_pass_kernel!(nd::Vector{JuMP.NodeData}, adj::SparseMatrixCSC{Bool,Int64}, x::Vector{Float64}, - lbd::Vector{Float64}, ubd::Vector{Float64}, sparsity::Vector{Int}, - setstorage::Vector{MC{N,T}}, subgrad_tol::Float64, - numberstorage::Vector{Float64}, numvalued::Vector{Bool}, - is_post_input::Bool) where {N, T<:RelaxTag} - - children_arr = rowvals(adj) - continue_flag = true - is_post = is_post_input - - for k = 1:length(nd) - - REVERSE_DEBUG && println(" ") - nod = nd[k] - ntype = nod.nodetype - nvalued = numvalued[k] - - if ntype == JuMP._Derivatives.VALUE || ntype == JuMP._Derivatives.LOGIC || - ntype == JuMP._Derivatives.COMPARISON || ntype == JuMP._Derivatives.PARAMETER || - ntype == JuMP._Derivatives.EXTRA || ntype == JuMP._Derivatives.SUBEXPRESSION - continue - - elseif nod.nodetype == JuMP._Derivatives.VARIABLE - op = nod.index - REVERSE_DEBUG && println("--- start reverse reverse variable ---") - variable_interval = setstorage[k].Intv - lower_interval = variable_interval.lo - upper_interval = variable_interval.hi - - # update - prior_x = x[op] - if lower_interval > prior_x - x[op] = 0.5*(lower_interval + upper_interval) - is_post = false - end - if upper_interval < prior_x - x[op] = 0.5*(lower_interval + upper_interval) - is_post = false - end - lbd[op] = lower_interval - ubd[op] = upper_interval - - REVERSE_DEBUG && println("variable_rev[$op][$k] at k = $k -> $(setstorage[k])") - - elseif nvalued - continue - - elseif nod.nodetype == JuMP._Derivatives.CALL - op = nod.index - parent_index = nod.parent - children_idx = nzrange(adj, k) - parent_value = setstorage[k] - n_children = length(children_idx) - - # SKIPS USER DEFINE OPERATORS NOT BRIDGED INTO JuMP Tree Representation - if op >= JuMP._Derivatives.USER_OPERATOR_ID_START - continue - - # :+ - elseif op === 1 - REVERSE_DEBUG && println("plus_rev[$n_children][$k] at k = $k -> $(setstorage[k])") - if n_children === 2 - continue_flag &= reverse_plus_binary!(k, children_arr, children_idx, numvalued, numberstorage, - setstorage, x, lbd, ubd, sparsity, subgrad_tol, is_post) - else - continue_flag &= reverse_plus_narity!(k, children_arr, children_idx, numvalued, numberstorage, - setstorage, x, lbd, ubd, sparsity, subgrad_tol, is_post) - end - REVERSE_DEBUG && !continue_flag && println("Infeasible node encountered.") - - # :- - elseif op === 2 - REVERSE_DEBUG && println("minus_rev[$k] at k = $k -> $(setstorage[k])") - continue_flag &= reverse_minus!(k, children_arr, children_idx, numvalued, numberstorage, - setstorage, x, lbd, ubd, sparsity, subgrad_tol, is_post) - REVERSE_DEBUG && !continue_flag && println("Infeasible node encountered.") - - elseif op === 3 # :* - REVERSE_DEBUG && println("mult_rev[$n_children][$k] at k = $k -> $(setstorage[k])") - if n_children === 2 - continue_flag &= reverse_multiply_binary!(k, children_arr, children_idx, numvalued, numberstorage, - setstorage, x, lbd, ubd, sparsity, subgrad_tol, is_post) - else - continue_flag &= reverse_multiply_narity!(k, children_arr, children_idx, numvalued, numberstorage, - setstorage, x, lbd, ubd, sparsity, subgrad_tol, is_post) - end - REVERSE_DEBUG && !continue_flag && println("Infeasible node encountered.") - - # :^ - elseif op === 4 - REVERSE_DEBUG && println("power_rev[$k] at k = $k -> $(setstorage[k])") - continue_flag &= reverse_power!(k, children_arr, children_idx, numvalued, numberstorage, - setstorage, x, lbd, ubd, sparsity, subgrad_tol, is_post) - REVERSE_DEBUG && !continue_flag && println("Infeasible node encountered.") - - # :/ - elseif op === 5 - REVERSE_DEBUG && println("power_div[$k] at k = $k -> $(setstorage[k])") - continue_flag &= reverse_divide!(k, children_arr, children_idx, numvalued, numberstorage, - setstorage, x, lbd, ubd, sparsity, subgrad_tol, is_post) - REVERSE_DEBUG && !continue_flag && println("Infeasible node encountered.") - - # ifelse - elseif op === 6 - continue - end - - # assumes that child is set-valued and thus parent is set-valued (since isnumber already checked) - elseif nod.nodetype == JuMP._Derivatives.CALLUNIVAR - op = nod.index - REVERSE_DEBUG && println("fop_rev[$op][$k] at k = $k -> $(setstorage[k])") - if op <= JuMP._Derivatives.USER_UNIVAR_OPERATOR_ID_START - arg_indx = children_arr[adj.colptr[k]] - continue_flag &= reverse_univariate!(k, op, arg_indx, setstorage, x, lbd, ubd, sparsity, - subgrad_tol, is_post) - end - REVERSE_DEBUG && !continue_flag && println("Infeasible node encountered.") - end - - !continue_flag && break - end - - return continue_flag -end - -""" -$(FUNCTIONNAME) - -A reverse_pass! on a `BufferedNonlinear` structure `d` intersects the existing value of the `d` with -constraint bounds then reverse propagates a set-valued operator (by default McCormick operator) along the -computational tape. The tapes are updated in place and boolean value is returned indicating whether the -reverse propagation yeilded a infeasible point (true = still feasible, false is proved infeasible). -""" -function reverse_pass!(evaluator::Evaluator, d::NonlinearExpression{V}) where V - - return reverse_pass_kernel!(d.nd, d.adj, evaluator.x, evaluator.lower_variable_bounds, - evaluator.upper_variable_bounds, d.grad_sparsity, - d.setstorage, evaluator.subgrad_tol, d.numberstorage, - d.isnumber, evaluator.reverse_subgrad_tighten) -end - -function reverse_pass!(evaluator::Evaluator, d::BufferedNonlinearFunction{V}) where V - d.last_past_reverse = true - set_intersect_value!(d.expr, Interval(d.lower_bound, d.upper_bound)) - return reverse_pass!(evaluator, d.expr) -end diff --git a/src/eago_optimizer/functions/nonlinear/univariate.jl b/src/eago_optimizer/functions/nonlinear/univariate.jl deleted file mode 100644 index 8e1dc79f..00000000 --- a/src/eago_optimizer/functions/nonlinear/univariate.jl +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/functions/nonlinear/univariate.jl -# Defines switching functions for forward and reverse univariate functions. -############################################################################# - -exprs = Expr[] -for i = 1:length(univariate_operators) - op = univariate_operators[i] - ex = :(return $op(x)) - push!(exprs, ex) -end - -function binaryswitch(ids, exprs) - if length(exprs) <= 3 - out = Expr(:if, Expr(:call, :(==), :operator_id, ids[1]), exprs[1]) - if length(exprs) > 1 - push!(out.args, binaryswitch(ids[2:end], exprs[2:end])) - end - return out - else - mid = length(exprs) >>> 1 - return Expr(:if, Expr(:call, :(<=), :operator_id, ids[mid]), - binaryswitch(ids[1:mid], exprs[1:mid]), - binaryswitch(ids[mid+1:end], exprs[mid+1:end])) - end -end - -switchexpr = binaryswitch(1:length(exprs), exprs) -@eval @inline function eval_univariate_set(operator_id,x::T) where T - $switchexpr - error("No match for operator_id") -end - -#= -null_interval_rev(y::Interval{T}, x::Interval{T}) where T = y, x -const DEFINED_REVERSES = [:plus_rev, :minus_rev, :inv_rev, - :mul_rev, :div_rev, power_rev, - :sign_rev, max_rev, min_rev, - :sqr_rev, sqrt_rev, abs_rev, - :exp_rev, exp2_rev, exp10_rev, expm1_rev, - :log_rev, log2_rev, log10_rev, log1p_rev, - :sin_rev, cos_rev, tan_rev, - :asin_rev, acos_rev, atan_rev, - :sinh_rev, cosh_rev, tanh_rev, - :asinh_rev, acosh_rev, atanh_rev] - =# - -# code for defining reverse operators -univariate_operators_rev = [:plus_rev, :minus_rev] -for i in 3:length(univariate_operators) - string = String(univariate_operators[i])*"_rev" - push!(univariate_operators_rev, Symbol(string)) -end - -univariate_reverse_operator_to_id = Dict{Symbol,Int}() -for i = 1:length(univariate_operators) - univariate_reverse_operator_to_id[univariate_operators_rev[i]] = i -end - -exprs_rev = Expr[] -for i = 1:length(univariate_operators_rev) - op = univariate_operators_rev[i] - ex = :(return $op(y,x)) - push!(exprs_rev, ex) -end - -switchexpr_rev = binaryswitch(1:length(exprs_rev), exprs_rev) -@eval @inline function eval_univariate_set_reverse(operator_id, y::T, x::T) where T - $switchexpr_rev - y, x -end - -function make_uv_gate(symbol_list) - n = Expr(:||) - for (i, sym) in enumerate(symbol_list) - if i !== 1 - n = :(x == $(univariate_operator_to_id[sym]) || $n) - else - n = Expr(:||) - n = :(x == $(univariate_operator_to_id[sym])) - end - end - n -end - -single_tp_ops = [:tan, :cot, :asin, :acos, :atan, :sinh, :tanh, - :erf, :erfinv, :erfc, :erfcinv, :erfi, :erfcx, :tand, :cotd, - :acosd, :atand, :asind, :asinh, :atanh, :asech] -single_tp_gate = make_uv_gate(single_tp_ops) -@eval @inline single_tp(x::Int) = $single_tp_gate - -double_tp_ops = [:sin, :cos, :sind, :cosd, :sech, :dawson] -double_tp_gate = make_uv_gate(double_tp_ops) -@eval @inline double_tp(x::Int) = $double_tp_gate - -function make_tp_gate_1(symbol_list, x::Symbol, z::Symbol) - n = Expr(:block) - for (i, sym) in enumerate(symbol_list) - a = :(op == $(univariate_operator_to_id[sym]) && (return (McCormick.$sym)(x), 0.0, 0.0)) - push!(n.args, a) - end - push!(n.args, :(error("No operator"))) - - nd = Expr(:block) - for (i, sym) in enumerate(symbol_list) - sym_kernel = Symbol(String(sym)*"_kernel") - a = :(op == $(univariate_operator_to_id[sym]) && (return (McCormick.$sym_kernel)(x, z.Intv, tp1, tp2))) - push!(nd.args, a) - end - push!(nd.args, :(error("No operator"))) - - qn = quote - if flag - $n - else - $nd - end - end - qn -end - -function make_tp_gate_2(symbol_list, x::Symbol, z::Symbol) - n = Expr(:block) - for (i, sym) in enumerate(symbol_list) - a = :(op == $(univariate_operator_to_id[sym]) && (return (McCormick.$sym)(x), 0.0, 0.0, 0.0, 0.0)) - push!(n.args, a) - end - push!(n.args, :(error("No operator"))) - - nd = Expr(:block) - for (i, sym) in enumerate(symbol_list) - sym_kernel = Symbol(String(sym)*"_kernel") - a = :(op == $(univariate_operator_to_id[sym]) && (return (McCormick.$sym_kernel)(x, z.Intv, tp1, tp2, tp3, tp4))) - push!(nd.args, a) - end - push!(nd.args, :(error("No operator"))) - - qn = quote - if flag - $n - else - $nd - end - end - qn -end - -single_tp_expr = make_tp_gate_1(single_tp_ops, :x, :z) -@eval @inline function single_tp_set(op::Int64, x::MC, z::MC, tp1::Float64, - tp2::Float64, flag::Bool) - $single_tp_expr -end -double_tp_expr = make_tp_gate_2(double_tp_ops, :x, :z) -@eval @inline function double_tp_set(op::Int64, x::MC, z::MC, tp1::Float64, - tp2::Float64, tp3::Float64, tp4::Float64, flag::Bool) - $double_tp_expr -end diff --git a/src/eago_optimizer/functions/nonlinear/user_function/user_function.jl b/src/eago_optimizer/functions/nonlinear/user_function/user_function.jl new file mode 100644 index 00000000..1f28fd31 --- /dev/null +++ b/src/eago_optimizer/functions/nonlinear/user_function/user_function.jl @@ -0,0 +1,33 @@ + +# Defines the Vrev object which holds a value and an reverse function... +""" + Vrev{T<:Number,F} + +Structure used to wrap the value and reverse function +""" +struct Vrev{T<:Number,F} + val::T + rev::F +end +Vrev(x::T) where T = Vrev{T,typeof(identity)}(x, identity) +@inline _val(d::Vrev{T,F}) where {T,F} = d.val + +zero(::Vrev{T,F}) where {T<:Number,F} = Vrev(zero(T)) + +macro norev_scalar(c, f) + esc(quote + function Cassette.overdub(::$c, ::typeof($f), x::Vrev{T,F}) where {T<:Number,F} + r = y -> x.rev(_val(x)) + return Vrev{T,typeof(r)}(($f)(x), r) + end + end) +end + +macro no_prop(f) + esc(quote + function Cassette.overdub(::$c, ::typeof($f), x::Vrev{T,F}) where {T<:Number,F} + r = y -> x.rev(_val(x)) + return Vrev{T,typeof(r)}(($f)(x), r) + end + end) +end diff --git a/src/eago_optimizer/functions/quadratic.jl b/src/eago_optimizer/functions/quadratic.jl deleted file mode 100644 index 7e8ec059..00000000 --- a/src/eago_optimizer/functions/quadratic.jl +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/functions/nonlinear/quadratic.jl -# Defines buffered structures to store quadratic functions: -# BufferedQuadraticIneq, BufferedQuadraticEq, as well as the -# lower_interval_bound, interval_bound, and eliminate_fixed_variables! -# functions associated with each structure. -############################################################################# - -### -### Structure definitions -### - -""" -$(TYPEDEF) - -Stores a general quadratic inequality constraint with a buffer. -""" -mutable struct BufferedQuadraticIneq <: AbstractEAGOConstraint - func::SQF - buffer::Dict{Int, Float64} - saf::SAF - len::Int -end - -""" -$(TYPEDEF) - -Stores a general quadratic equality constraint with a buffer. -""" -mutable struct BufferedQuadraticEq <: AbstractEAGOConstraint - func::SQF - minus_func::SQF - buffer::Dict{Int, Float64} - saf::SAF - len::Int -end - -#= -mutable struct BufferedConvexQuadratic <: AbstractEAGOConstraint - func::SQF - buffer::Dict{Int, Float64} - saf::SAF - len::Int -end -=# - -### -### Constructor definitions -### - -function create_buffer_dict(func::SQF) - - buffer = Dict{Int, Float64}() - - for term in func.quadratic_terms - buffer[term.variable_index_1.value] = 0.0 - buffer[term.variable_index_2.value] = 0.0 - end - - for term in func.affine_terms - buffer[term.variable_index.value] = 0.0 - end - - return buffer -end - -BufferedQuadraticIneq() = BufferedQuadraticIneq(SQF(SQT[], SAT[], 0.0), Dict{Int, Float64}(), SAF(SAT[], 0.0), 0) - -function BufferedQuadraticIneq(func::SQF, set::LT) - - buffer = create_buffer_dict(func) - saf = SAF([SAT(0.0, VI(k)) for k in keys(buffer)], 0.0) - len = length(buffer) - cfunc = copy(func) - cfunc.constant -= set.upper - - return BufferedQuadraticIneq(cfunc, buffer, saf, len) -end - -function BufferedQuadraticIneq(func::SQF, set::GT) - - buffer = create_buffer_dict(func) - saf = SAF([SAT(0.0, VI(k)) for k in keys(buffer)], 0.0) - len = length(buffer) - cfunc = MOIU.operate(-, Float64, func) - cfunc.constant += set.lower - - return BufferedQuadraticIneq(cfunc, buffer, saf, len) -end - -BufferedQuadraticEq() = BufferedQuadraticEq(SQF(SQT[], SAT[], 0.0), SQF(SQT[], SAT[], 0.0), Dict{Int, Float64}(), SAF(SAT[], 0.0), 0) - -function BufferedQuadraticEq(func::SQF, set::ET) - - buffer = create_buffer_dict(func) - saf = SAF([SAT(0.0, VI(k)) for k in keys(buffer)], 0.0) - len = length(buffer) - cfunc1 = copy(func) - cfunc1.constant -= set.value - cfunc2 = MOIU.operate(-, Float64, func) - cfunc2.constant += set.value - - return BufferedQuadraticEq(cfunc1, cfunc2, buffer, saf, len) -end - -#= -function BufferedConvexQuadratic(f::BufferedQuadraticIneq) - BufferedConvexQuadratic(copy(f.func), copy(f.buffer), copy(f.saf), f.len) -end -=# - -### -### Parsing definitions -### - -function eliminate_fixed_variables!(f::T, v::Vector{VariableInfo}) where T <: Union{BufferedQuadraticIneq, - BufferedQuadraticIneq} - deleted_count = 0 - index = 1 - while i + deleted_count <= f.len - term = @inbounds f.sqf.terms[i] - variable_info_1 = @inbounds v[term.variable_index_1.value] - variable_info_2 = @inbounds v[term.variable_index_2.value] - if variable_info_1.is_fixed && variable_index_2.is_fixed - f.sqf.constant += coeff*variable_info_1.lower_bound*variable_index_2.lower_bound - deleteat!(f.sqf.terms, i) - deleted_count += 1 - else - i += 1 - end - end - f.len -= deleted_count - - return nothing -end diff --git a/src/eago_optimizer/guarded_context.jl b/src/eago_optimizer/guarded_context.jl deleted file mode 100644 index c43bfeda..00000000 --- a/src/eago_optimizer/guarded_context.jl +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/guarded_context.jl -# Provides utilities for dealing with nonlinear expressions that contain domain -# violations. The assumption is that domain violations only occur due to -# expansiveness of the bounds of the nonlinear terms not the underlying model. -############################################################################# - -Cassette.@context GuardCtx - -struct GuardTracker - domain_tol::Float64 - guard_on::Bool -end - -const IntFltIntv = Union{Int16, Int32, Int64, Float16, Float32, Float64, Interval{Float64}} - -for f in (+, *, -, max, min) - @eval Cassette.overdub(ctx::GuardCtx, ::typeof($f), x::MC{N,T}, y::MC{N,T}) where {N, T<:RelaxTag} = f(x, y) - @eval Cassette.overdub(ctx::GuardCtx, ::typeof($f), x::S, y::S) where {S <: IntFltIntv} = f(x,y) - @eval Cassette.overdub(ctx::GuardCtx, ::typeof($f), x::S, y::MC{N,T}) where {N, T<:RelaxTag, S<:IntFltIntv} = f(x,y) - @eval Cassette.overdub(ctx::GuardCtx, ::typeof($f), x::MC{N,T}, y::S) where {N, T<:RelaxTag, S<:IntFltIntv} = f(x,y) -end - -function Cassette.overdub(ctx::GuardCtx, ::typeof(/), x::MC{N,T}, y::MC{N,T}) where {N, T<:RelaxTag} - - m = ctx.metadata - if m.guard_on && (y.Intv.lo <= -m.domain_tol) && (y.Intv.hi >= m.domain_tol) - z = MC{N,T}(union(x.Intv/Interval{Float64}(y.Intv.lo, -m.domain_tol), - x.Intv/Interval{Float64}(m.domain_tol, y.Intv.hi))) - else - z = x/y - end - - return z -end - -function Cassette.overdub(ctx::GuardCtx, ::typeof(/), x::Float64, y::MC{N,T}) where {N, T<:RelaxTag} - - m = ctx.metadata - if m.guard_on && (y.Intv.lo <= -m.domain_tol) && (y.Intv.hi >= m.domain_tol) - z = MC{N,T}(union(x.Intv/Interval{Float64}(y.Intv.lo, -m.domain_tol), - x.Intv/Interval{Float64}(m.domain_tol, y.Intv.hi))) - else - z = x/y - end - - return z -end - -Cassette.overdub(ctx::GuardCtx, ::typeof(/), x::S, y::S) where {S <: IntFltIntv} = f(x,y) -Cassette.overdub(ctx::GuardCtx, ::typeof(/), x::S, y::MC{N,T}) where {N, T<:RelaxTag, S <: IntFltIntv} = f(x,y) -Cassette.overdub(ctx::GuardCtx, ::typeof(/), x::MC{N,T}, y::S) where {N, T<:RelaxTag, S <: IntFltIntv} = f(x,y) - -function Cassette.overdub(ctx::GuardCtx, ::typeof(^), x::MC{N,T}, y::MC{N,T}) where {N, T<:RelaxTag} - - m = ctx.metadata - if m.guard_on && (y < 0.0) && ((x.Intv.lo <= -m.domain_tol) && (x.Intv.hi >= m.domain_tol)) - z = MC{N,T}(union(Interval{Float64}(x.Intv.lo, -m.domain_tol), - Interval{Float64}(m.domain_tol, x.Intv.hi))^y) - else - z = x/y - end - - return z -end - -function Cassette.overdub(ctx::GuardCtx, ::typeof(^), x::MC{N,T}, y::Float64) where {N, T<:RelaxTag} - - m = ctx.metadata - if m.guard_on && (y < 0.0) && ((x.Intv.lo <= -m.domain_tol) && (x.Intv.hi >= m.domain_tol)) - z = MC{N,T}(union(Interval{Float64}(x.Intv.lo, -m.domain_tol), - Interval{Float64}(m.domain_tol, x.Intv.hi))^y) - else - z = x/y - end - - return z -end - -Cassette.overdub(ctx::GuardCtx, ::typeof(^), x::S, y::S) where {S<:IntFltIntv} = f(x,y) - -for f in (log, log2, log10, sqrt) - @eval function Cassette.overdub(ctx::GuardCtx, ::typeof($f), x::MC{N,T}) where {N, T<:RelaxTag} - - m = ctx.metadata - if m.guard_on && (x.Intv.lo <= m.domain_tol) - z = ($f)(MC{N,T}(Interval{Float64}(m.domain_tol, x.Intv.hi))) - else - z = ($f)(x) - end - - return z - end -end - -for f in (log1p, acosh) - @eval function Cassette.overdub(ctx::GuardCtx, ::typeof($f), x::MC{N,T}) where {N, T<:RelaxTag} - - m = ctx.metadata - if m.guard_on && (x.Intv.lo <= -1.0 + m.domain_tol) - z = ($f)(MC{N,T}(Interval{Float64}(-1.0 + m.domain_tol, x.Intv.hi))) - else - z = ($f)(x) - end - - return z - end -end - -for f in (acos, asin, atanh) - @eval function Cassette.overdub(ctx::GuardCtx, ::typeof($f), x::MC{N,T}) where {N, T<:RelaxTag} - - m = ctx.metadata - if m.guard_on && ((x.Intv.lo <= m.domain_tol - 1.0) || - (x.Intv.hi >= 1.0 - m.domain_tol)) - z = ($f)(MC{N,T}(Interval{Float64}(max(y.Intv.lo, m.domain_tol - 1.0), - min(1.0 - m.domain_tol, y.Intv.hi)))) - else - z = ($f)(x) - end - - return z - end -end - -for f in (log, log2, log10, sqrt, log1p, acosh, acos, asin, atanh, acosd, asind) - @eval Cassette.overdub(ctx::GuardCtx, ::typeof($f), x::S) where {S<:IntFltIntv} = f(x) -end - -for f in (abs, exp, exp2, exp10, sin, tan, cos, sec, csc, - sech, csch, coth, acsch, acoth, asech, step, sign, - asinh, tanh, atan, cosh, sind, cosd, tand, secd, cscd, cotd, - atand, asecd, acscd, acotd, isone, isnan, empty, - convert, in, isempty, one, zero, real, eps, rad2deg, deg2rad) - @eval Cassette.overdub(ctx::GuardCtx, ::typeof($f), x::S) where {S<:IntFltIntv} = f(x) - @eval Cassette.overdub(ctx::GuardCtx, ::typeof($f), x::MC{N,T}) where {N,T<:RelaxTag} = f(x) -end diff --git a/src/eago_optimizer/moi_constraints.jl b/src/eago_optimizer/moi_constraints.jl deleted file mode 100644 index 598b06cd..00000000 --- a/src/eago_optimizer/moi_constraints.jl +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/constraints.jl -# Defines constraints supported Optimizer and how to store them. -############################################################################# - - -##### Supports function and add_constraint for scalar affine functions -MOI.supports_constraint(::Optimizer, ::Type{SAF}, ::Type{S}) where {S <: INEQ_SETS} = true - -macro define_addconstraint_linear(function_type, set_type, array_name, count_name) - quote - function MOI.add_constraint(m::Optimizer, func::$function_type, set::$set_type) - check_inbounds!(m, func) - push!(m._input_problem.$(array_name), (func, set)) - m._input_problem._last_constraint_index += 1 - m._input_problem.$(count_name) += 1 - indx = CI{$function_type, $set_type}(m._input_problem._last_constraint_index) - return indx - end - end -end - -@define_addconstraint_linear SAF LT _linear_leq_constraints _linear_leq_count -@define_addconstraint_linear SAF GT _linear_geq_constraints _linear_geq_count -@define_addconstraint_linear SAF ET _linear_eq_constraints _linear_eq_count - -##### Supports function and add_constraint for scalar quadratic functions -MOI.supports_constraint(::Optimizer, ::Type{SQF}, ::Type{S}) where {S <: INEQ_SETS} = true - -macro define_addconstraint_quadratic(function_type, set_type, array_name, count_name) - quote - function MOI.add_constraint(m::Optimizer, func::$function_type, set::$set_type) - check_inbounds!(m, func) - push!(m._input_problem.$(array_name), (func, set)) - m._input_problem._last_constraint_index += 1 - m._input_problem.$(count_name) += 1 - indx = CI{$function_type, $set_type}(m._input_problem._last_constraint_index) - return indx - end - end -end - -@define_addconstraint_quadratic SQF LT _quadratic_leq_constraints _quadratic_leq_count -@define_addconstraint_quadratic SQF GT _quadratic_geq_constraints _quadratic_geq_count -@define_addconstraint_quadratic SQF ET _quadratic_eq_constraints _quadratic_eq_count - -##### Supports function and add_constraint for conic functions -#= -const CONE_SETS = Union{SOC} -MOI.supports_constraint(::Optimizer, ::Type{VECOFVAR}, ::Type{S}) where {S <: CONE_SETS} = true - -function MOI.add_constraint(m::Optimizer, func::VECOFVAR, set::SOC) - - if length(func.variables) !== set.dimension - error("Dimension of $(s) does not match number of terms in $(f)") - end - - check_inbounds!(m, func) - push!(m._input_problem._conic_second_order, (func, set)) - m._input_problem._last_constraint_index += 1 - m._input_problem._conic_second_order_count += 1 - - return CI{VECOFVAR, SOC}(m._input_problem._last_constraint_index) -end -=# diff --git a/src/eago_optimizer/moi_wrapper.jl b/src/eago_optimizer/moi_wrapper.jl new file mode 100644 index 00000000..2f6412dc --- /dev/null +++ b/src/eago_optimizer/moi_wrapper.jl @@ -0,0 +1,215 @@ +# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. +# This code is licensed under MIT license (see LICENSE.md for full details) +############################################################################# +# EAGO +# A development environment for robust and global optimization +# See https://github.com/PSORLab/EAGO.jl +############################################################################# +# src/eago_optimizer/constraints.jl +# Defines constraints supported by optimizer and how to store them. +############################################################################# + +const INEQ_SETS = Union{LT, GT, ET} +const VAR_SETS = Union{LT, GT, ET, ZO, MOI.Integer} + +##### Utilities for checking that JuMP model contains variables used in expression +function check_inbounds!(m::Optimizer, vi::VI) + if !(1 <= vi.value <= m._input_problem._variable_count) + error("Invalid variable index $vi. ($(m._input_problem._variable_count) variables in the model.)") + end + return nothing +end +check_inbounds!(m::Optimizer, f::SAF) = foreach(x -> check_inbounds!(m, x.variable), f.terms) +check_inbounds!(m::Optimizer, f::VECOFVAR) = foreach(x -> check_inbounds!(m, x), f.variables) +function check_inbounds!(m::Optimizer, f::SQF) + foreach(x -> check_inbounds!(m, x.variable), f.affine_terms) + for term in f.quadratic_terms + check_inbounds!(m, term.variable_1) + check_inbounds!(m, term.variable_2) + end + return nothing +end + +MOI.supports_constraint(::Optimizer, ::Type{VI}, ::Type{S}) where S <: VAR_SETS = true +MOI.supports_constraint(::Optimizer,::Type{T},::Type{S}) where {T<:Union{SAF,SQF},S<:INEQ_SETS} = true + +MOI.is_valid(m::Optimizer, v::VI) = (1 <= v.value <= m._input_problem._variable_count) +MOI.is_valid(m::Optimizer, c::CI{VI,S}) where S <: VAR_SETS = (1 <= c.value <= m._input_problem._variable_count) +MOI.is_valid(m::Optimizer, c::CI{F,S}) where {F<:Union{SAF,SQF}, S<:INEQ_SETS} = (1 <= c.value <= length(_constraints(m,F,S))) + +MOI.get(m::Optimizer, ::MOI.NumberOfConstraints{VI,S}) where S<:VAR_SETS = length(_constraints(m,VI,S)) +MOI.get(m::Optimizer, ::MOI.NumberOfConstraints{F,S}) where {F<:Union{SAF,SQF},S<:INEQ_SETS} = length(_constraints(m,F,S)) + +MOI.get(m::Optimizer, ::MOI.ListOfConstraintIndices{VI,S}) where S<:VAR_SETS = collect(keys(_constraints(m,VI,S))) +MOI.get(m::Optimizer, ::MOI.ListOfConstraintIndices{F,S}) where {F<:Union{SAF,SQF},S<:INEQ_SETS} = collect(keys(_constraints(m,F,S))) + +MOI.add_variable(m::Optimizer) = VI(m._input_problem._variable_count += 1) + +function MOI.add_constraint(m::Optimizer, f::F, s::S) where {F<:Union{SAF,SQF},S<:INEQ_SETS} + check_inbounds!(m, f) + ci = CI{F, S}(m._input_problem._constraint_count += 1) + _constraints(m, F, S)[ci] = (f, s) + return ci +end +function MOI.add_constraint(m::Optimizer, f::VI, s::S) where S<:VAR_SETS + check_inbounds!(m, f) + ci = CI{VI,S}(f.value) + _constraints(m,VI,S)[ci] = (f, s) + return ci +end + +result_index_1_error(v::T) where T = throw(MOI.ResultIndexBoundsError{T}(v, 1)) +function MOI.get(model::Optimizer, v::MOI.VariablePrimal, vi::MOI.VariableIndex) + check_inbounds!(model, vi) + (v.result_index != 1) && result_index_1_error(v) + return model._global_optimizer._continuous_solution[vi.value] +end +function MOI.get(m::Optimizer{R,S,T}, v::MOI.ConstraintPrimal, c::CI{VI,<:Any}) where {R,S,T} + (v.result_index != 1) && result_index_1_error(v) + return MOI.get(m, MOI.VariablePrimal(), MOI.VariableIndex(c.value)) +end +function MOI.get(m::Optimizer{R,Q,T}, v::MOI.ConstraintPrimal, c::CI{F,S}) where {R,Q,T,F,S} + (v.result_index != 1) && result_index_1_error(v) + return _constraint_primal(m._global_optimizer, F, S)[c] +end + +MOI.get(m::Optimizer, ::MOI.ConstraintFunction, c::CI{F,S}) where {F,S} = _constraints(m,F,S)[c][1] +MOI.get(m::Optimizer, ::MOI.ConstraintSet, c::CI{F,S}) where {F,S} = _constraints(m,F,S)[c][2] + +function MOI.empty!(m::Optimizer{R,S,T}) where {R,S,T} + + MOI.empty!(m.subsolver_block) + MOI.empty!(m._global_optimizer) + m._input_problem = InputProblem() + m._working_problem = ParsedProblem() + + m._termination_status_code = MOI.OPTIMIZE_NOT_CALLED + m._result_status_code = MOI.OTHER_RESULT_STATUS + m._run_time = 0.0 + m._objective_value = -Inf + m._objective_bound = Inf + m. _relative_gap = Inf + m._iteration_count = 0 + m._node_count = 0 + + return nothing +end + +function MOI.is_empty(m::Optimizer{R,S,T}) where {R,S,T} + + flag = true + flag &= MOI.is_empty(m._global_optimizer) + flag &= isempty(m._input_problem) + flag &= isempty(m._working_problem) + flag &= isempty(m.subsolver_block) + flag &= m._termination_status_code == MOI.OPTIMIZE_NOT_CALLED + flag &= m._result_status_code == MOI.OTHER_RESULT_STATUS + + # set constructor reset on empty! and to zero in initial parse! in parse.jl + flag &= iszero(m._run_time) + flag &= iszero(m._iteration_count) + flag &= iszero(m._node_count) + flag &= m._objective_value == -Inf + flag &= m._objective_bound == Inf + flag &= m. _relative_gap == Inf + + return flag +end + +MOI.supports_incremental_interface(m::Optimizer) = true +MOI.copy_to(model::Optimizer, src::MOI.ModelLike) = MOIU.default_copy_to(model, src) + +##### +##### Set & get attributes of model +##### +MOI.supports(::Optimizer, ::MOI.Silent) = true + +function MOI.set(m::Optimizer, ::MOI.Silent, value) + if value + m._parameters.verbosity = 0 + m._parameters.log_on = false + else + m._parameters.verbosity = 1 + end + return +end + +MOI.set(m::Optimizer, ::MOI.TimeLimitSec, ::Nothing) = m._parameters.time_limit = Inf +MOI.set(m::Optimizer, ::MOI.TimeLimitSec, v::Float64) = m._parameters.time_limit = v + +function MOI.get(m::Optimizer, ::MOI.ListOfConstraintTypesPresent) + constraint_types = [] + for S in (ZO, MOI.Integer) + if MOI.get(m, MOI.NumberOfConstraints{VI,S}()) > 0 + push!(constraint_types, (VI,S)) + end + end + for S in (LT, GT, ET), T in (VI, SAF, SQF) + if MOI.get(m, MOI.NumberOfConstraints{T,S}()) > 0 + push!(constraint_types, (VI,S)) + end + end + return constraint_types +end + +MOI.get(m::Optimizer, v::MOI.ObjectiveValue) = !isone(v.result_index) ? result_index_1_error(v) : m._objective_value +MOI.get(m::Optimizer, v::MOI.PrimalStatus) = !isone(v.result_index) ? MOI.NO_SOLUTION : m._result_status_code +MOI.get(m::Optimizer, ::MOI.DualStatus) = MOI.NO_SOLUTION +MOI.get(m::Optimizer, ::MOI.ObjectiveBound) = m._objective_bound +MOI.get(m::Optimizer, ::MOI.NumberOfVariables) = m._input_problem._variable_count +MOI.get(m::Optimizer, ::MOI.SolverName) = "EAGO: Easy Advanced Global Optimization" +MOI.get(m::Optimizer, ::MOI.SolverVersion) = "0.7.0" +MOI.get(m::Optimizer, ::MOI.TerminationStatus) = m._termination_status_code +MOI.get(m::Optimizer, ::MOI.SolveTimeSec) = m._run_time +MOI.get(m::Optimizer, ::MOI.NodeCount) = m._node_count +MOI.get(m::Optimizer, ::MOI.ResultCount) = (m._result_status_code === MOI.FEASIBLE_POINT) ? 1 : 0 +MOI.get(m::Optimizer, ::MOI.TimeLimitSec) = m._parameters.time_limit +MOI.get(m::Optimizer, ::MOI.Silent) = m._parameters.verbosity == 0 +MOI.get(m::Optimizer, ::MOI.ListOfVariableIndices) = [VI(i) for i = 1:m._input_problem._variable_count] + +function MOI.get(m::Optimizer, ::MOI.RelativeGap) + b = MOI.get(m, MOI.ObjectiveBound()) + v = MOI.get(m, MOI.ObjectiveValue()) + return relative_gap(b,v) +end + +_to_sym(d) = error("EAGO only supports raw parameters with Symbol or String names.") +_to_sym(d::String) = Symbol(d) +_to_sym(d::Symbol) = d +function MOI.get(m::Optimizer, p::MOI.RawOptimizerAttribute) + s = _to_sym(p.name) + s in EAGO_PARAMETERS ? getfield(m._parameters, s) : getfield(m, s) +end +function MOI.set(m::Optimizer, p::MOI.RawOptimizerAttribute, x) + s = _to_sym(p.name) + if (s == :relaxed_optimizer) || (s == :upper_optimizer) + setfield!(m, s, Incremental(x)) + else + s in EAGO_PARAMETERS ? setfield!(m._parameters, s, x) : setfield!(m, s, x) + end +end +MOI.get(m::Optimizer, p::MOI.RawStatusString) = string(m._global_optimizer._end_state) + +##### +##### Support, set, and evaluate objective functions +##### +MOI.supports(::Optimizer, ::MOI.TimeLimitSec) = true +MOI.supports(::Optimizer, ::MOI.ObjectiveSense) = true +MOI.supports(::Optimizer, ::MOI.ObjectiveFunction{F}) where {F <: Union{VI, SAF, SQF}} = true + +function MOI.set(m::Optimizer, ::MOI.NLPBlock, nlp_data::MOI.NLPBlockData) + if nlp_data.has_objective + m._input_problem._objective = nothing + end + m._input_problem._nlp_data = nlp_data +end + +function MOI.set(m::Optimizer, ::MOI.ObjectiveFunction{T}, f::T) where T <: Union{VI,SAF,SQF} + check_inbounds!(m, f) + m._input_problem._objective = f +end +MOI.get(m::Optimizer, ::MOI.ObjectiveFunction{T}) where T <: Union{VI,SAF,SQF} = m._input_problem._objective +MOI.get(m::Optimizer, ::MOI.ObjectiveFunctionType) = typeof(m._input_problem._objective) + +MOI.set(m::Optimizer, ::MOI.ObjectiveSense, s::MOI.OptimizationSense) = m._input_problem._optimization_sense = s +MOI.get(m::Optimizer, ::MOI.ObjectiveSense) = m._input_problem._optimization_sense \ No newline at end of file diff --git a/src/eago_optimizer/node_bb.jl b/src/eago_optimizer/node_bb.jl deleted file mode 100644 index 114df32a..00000000 --- a/src/eago_optimizer/node_bb.jl +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/node_bb.jl -# Defines storage for a node in the B&B tree & utilities functions -############################################################################# - -""" -$(TYPEDEF) - -Stores information associated with each node in Branch & Bound tree. - -$(TYPEDFIELDS) -""" -struct NodeBB - "Lower bounds of variable box." - lower_variable_bounds::Vector{Float64} - "Upper bounds of variable box." - upper_variable_bounds::Vector{Float64} - "Lower bound of problem solution on nodeBB" - lower_bound::Float64 - "Upper bound of problem solution on nodeBB" - upper_bound::Float64 - "Depth of node in B&B tree." - depth::Int64 - "Unique id for each node." - id::Int64 -end - -# Constructors -NodeBB() = NodeBB(Float64[], Float64[], -Inf, Inf, 0, 1) -NodeBB(x::NodeBB) = NodeBB(x.lower_variable_bounds, x.upper_variable_bounds, - x.lower_bound, x.upper_bound, x.depth, x.id) - -# Copy utilities -Base.copy(x::NodeBB) = NodeBB(copy(x.lower_variable_bounds), - copy(x.upper_variable_bounds), - x.lower_bound, x.upper_bound, x.depth, x.id) - -# using alternative name as to not interfere with ordering... -function uninitialized(x::NodeBB) - flag = isempty(x.lower_variable_bounds) - flag &= isempty(x.upper_variable_bounds) - flag &= x.lower_bound === -Inf - flag &= x.upper_bound === Inf - flag &= x.depth === 0 - flag &= x.id === 1 - return flag -end - -# Access functions for broadcasting data easily -lower_variable_bounds(x::NodeBB) = x.lower_variable_bounds -upper_variable_bounds(x::NodeBB) = x.upper_variable_bounds -lower_variable_bounds(x::NodeBB, id::Int64, nid::Int64) = x.lower_variable_bounds[id:nid] -upper_variable_bounds(x::NodeBB, id::Int64, nid::Int64) = x.upper_variable_bounds[id:nid] -lower_bound(x::NodeBB) = x.lower_bound -upper_bound(x::NodeBB) = x.upper_bound -depth(x::NodeBB) = x.depth - -# Iterations Functions -Base.isless(x::NodeBB, y::NodeBB) = x.lower_bound < y.lower_bound -Base.length(x::NodeBB) = length(x.lower_variable_bounds) -function Base.isempty(x::NodeBB) - for i = 1:length(x) - @inbounds lower = x.lower_variable_bounds[i] - @inbounds upper = x.upper_variable_bounds[i] - (lower > upper) && (return true) - end - return false -end - -""" -$(FUNCTIONNAME) - -Checks that node `x` and `y` have equal domains withing a tolerance of `atol`. -""" -function same_box(x::NodeBB, y::NodeBB, r::Float64) - (isempty(x.lower_variable_bounds) ⊻ isempty(y.lower_variable_bounds)) && (return false) - (isempty(x.upper_variable_bounds) ⊻ isempty(y.upper_variable_bounds)) && (return false) - for i = 1:length(x) - ~isapprox(x.lower_variable_bounds[i], y.lower_variable_bounds[i], atol=r) && (return false) - ~isapprox(x.upper_variable_bounds[i], y.upper_variable_bounds[i], atol=r) && (return false) - end - return true -end - -# Compute middle & diameter -diam(x::NodeBB) = x.upper_variable_bounds - x.lower_variable_bounds -mid(x::NodeBB) = 0.5*(x.upper_variable_bounds + x.lower_variable_bounds) diff --git a/src/eago_optimizer/optimize/nonconvex/bound.jl b/src/eago_optimizer/optimize/nonconvex/bound.jl new file mode 100644 index 00000000..240af6a6 --- /dev/null +++ b/src/eago_optimizer/optimize/nonconvex/bound.jl @@ -0,0 +1,188 @@ +# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. +# This code is licensed under MIT license (see LICENSE.md for full details) +############################################################################# +# EAGO +# A development environment for robust and global optimization +# See https://github.com/PSORLab/EAGO.jl +############################################################################# +# src/eago_optimizer/bound.jl +# Computes interval bounds of various functions. +############################################################################# + +### +### AFFINE FUNCTIONS +### +function lower_interval_bound(m::GlobalOptimizer, f::AffineFunctionIneq) + fL = f.constant + for (c, j) in f.terms + xL = _lower_bound(FullVar(), m, j) + xU = _upper_bound(FullVar(), m, j) + fL += (c > 0.0) ? c*xL : c*xU + end + return fL +end + +function interval_bound(m::GlobalOptimizer, f::Union{AffineFunctionEq,AffineFunctionIneq}) + fL = fU = f.constant + for (c, j) in f.terms + xL = _lower_bound(FullVar(), m, j) + xU = _upper_bound(FullVar(), m, j) + if c > 0.0 + fL += c*xL + fU += c*xU + else + fL += c*xU + fU += c*xL + end + end + return fL, fU +end + +### +### QUADRATIC FUNCTIONS +### + +function lower_interval_bound(m::GlobalOptimizer, f::BufferedQuadraticIneq) + fval = Interval{Float64}(f.func.constant) + for t in f.func.affine_terms + c = t.coefficient + j = t.variable.value + xL = _lower_bound(FullVar(), m, j) + xU = _upper_bound(FullVar(), m, j) + fval += c > 0.0 ? c*xL : c*xU + end + for t in f.func.quadratic_terms + c = t.coefficient + i = t.variable_1.value + j = t.variable_2.value + xL = _lower_bound(FullVar(), m, i) + xU = _upper_bound(FullVar(), m, i) + if i == j + if c > 0.0 + fval += (0.0 < xL) ? 0.5*c*xL*xL : ((xU <= 0.0) ? 0.5*c*xU*xU : 0.0) + else + fval += (xL < xU) ? 0.5*c*xU*xU : 0.5*c*xL*xL + end + else + yL = _lower_bound(FullVar(), m, j) + yU = _upper_bound(FullVar(), m, j) + fval += c*Interval{Float64}(xL, xU)*Interval{Float64}(yL, yU) + end + end + return fval.lo +end +function interval_bound(m::GlobalOptimizer, f::BufferedQuadraticIneq) + + fval = Interval{Float64}(f.func.constant) + for t in f.func.affine_terms + c = t.coefficient + j = t.variable.value + xL = _lower_bound(FullVar(), m, j) + xU = _upper_bound(FullVar(), m, j) + fval += c*Interval(xL, xU) + end + for t in f.func.quadratic_terms + c = t.coefficient + i = t.variable_1.value + j = t.variable_2.value + xL = _lower_bound(FullVar(), m, i) + xU = _upper_bound(FullVar(), m, i) + if i == j + fval += 0.5*c*pow(Interval(xL, xU), 2) + else + yL = _lower_bound(FullVar(), m, j) + yU = _upper_bound(FullVar(), m, j) + fval += c*Interval(xL, xU)*Interval(yL, yU) + end + end + return fval.lo, fval.hi +end + +function interval_bound(m::GlobalOptimizer, f::BufferedQuadraticEq) + + fval = Interval{Float64}(f.func.constant) + for t in f.func.affine_terms + c = t.coefficient + j = t.variable.value + xL = _lower_bound(FullVar(), m, j) + xU = _upper_bound(FullVar(), m, j) + fval += c*Interval(xL, xU) + end + for t in f.func.quadratic_terms + c = t.coefficient + i = t.variable_1.value + j = t.variable_2.value + xL = _lower_bound(FullVar(), m, i) + xU = _upper_bound(FullVar(), m, i) + if i == j + fval += 0.5*c*pow(Interval(xL, xU), 2) + else + yL = _lower_bound(FullVar(), m, j) + yU = _upper_bound(FullVar(), m, j) + fval += c*Interval(xL, xU)*Interval(yL, yU) + end + end + return fval.lo, fval.hi +end + +### +### SECOND-ORDER CONE +### +function lower_interval_bound(m::GlobalOptimizer, d::BufferedSOC) + + sol_branch_map = m._sol_to_branch_map + lo_bnds = n.lower_variable_bounds + up_bnds = n.upper_variable_bounds + vec_of_vi = d.variables.variables + + norm_bound = Interval(0.0) + for i = 2:length(vec_of_vi) + mapped_vi = @inbounds sol_branch_map[vec_of_vi[i].value] + x = Interval{Float64}(lo_bnds[mapped_vi], up_bnds[mapped_vi]) + norm_bound += pow(x, 2) + end + norm_bound = sqrt(norm_bound) + + mapped_vi = @inbounds sol_branch_map[vec_of_vi[1].value] + lower_bound = norm_bound.lo -(@inbounds up_bnds[mapped_vi]) + + return lower_bound +end + + +### +### NONLINEAR FUNCTIONS +### +function lower_interval_bound(m::GlobalOptimizer, d::BufferedNonlinearFunction{V,N,T}) where {V,N,T} + !has_value(d) && forward_pass!(m._working_problem._relaxed_evaluator, d) + is_num(d) ? num(d) : interval(d).lo +end +function interval_bound(m::GlobalOptimizer, d::BufferedNonlinearFunction{V,N,T}) where {V,N,T} + !has_value(d) && forward_pass!(m._working_problem._relaxed_evaluator, d) + v = is_num(d) ? Interval{Float64}(num(d)) : interval(d) + return v.lo, v.hi +end + +is_feasible(m::GlobalOptimizer, f::Union{AFI,BQI}) = lower_interval_bound(m, f) <= 0.0 +function is_feasible(m::GlobalOptimizer, f::Union{AFE,BQE}) + l, u = interval_bound(m, f) + l <= 0.0 <= u +end +function is_feasible(m::GlobalOptimizer, f::BufferedNonlinearFunction{V,N,T}) where {V,N,T} + l, u = interval_bound(m, f) + feasible_flag = (u >= lower_bound(f)) + feasible_flag && (l <= upper_bound(f)) +end + +bound_objective(m::GlobalOptimizer, f::BufferedNonlinearFunction) = interval_bound(m, f) +bound_objective(m::GlobalOptimizer, f::AffineFunctionIneq) = interval_bound(m, f) +bound_objective(m::GlobalOptimizer, f::BufferedQuadraticIneq) = interval_bound(m, f) +function bound_objective(m::GlobalOptimizer, f::VI) + vval = f.value + l = lower_bound(FullVar(), m, vval) + u = upper_bound(FullVar(), m, vval) + return l, u +end + +bound_objective(t::ExtensionType, m::GlobalOptimizer) = bound_objective(m, m._working_problem._objective) +bound_objective(m::GlobalOptimizer{R,Q,S}) where {R,Q,S<:ExtensionType} = bound_objective(_ext(m), m) diff --git a/src/eago_optimizer/optimize/nonconvex/configure_subsolver.jl b/src/eago_optimizer/optimize/nonconvex/configure_subsolver.jl new file mode 100644 index 00000000..daeb943a --- /dev/null +++ b/src/eago_optimizer/optimize/nonconvex/configure_subsolver.jl @@ -0,0 +1,52 @@ +# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. +# This code is licensed under MIT license (see LICENSE.md for full details) +############################################################################# +# EAGO +# A development environment for robust and global optimization +# See https://github.com/PSORLab/EAGO.jl +############################################################################# +# src/eago_optimizer/subsolver_config/config.jl +# Contains subroutines used to set default configuration for select supported +# solvers along with routines needed to adjust tolerances to mirror tolerance +# adjustments in the global solve. +############################################################################# + +function set_default_config_udf!(s, m::MOI.AbstractOptimizer, verbosity::Int) + if verbosity > 0 + println("EAGO lacks a specialized configuration routine for the subsolver ($(MOI.get(m, MOI.SolverName())))") + println("you selected. As a result, EAGO cannot set the subsolver tolerances based on the") + println("absolute_tolerance, relative tolerance, and absolute_constraint_feas_tolerance") + println("parameters passed to the EAGO optimizer. Consequently, need to ensure that the tolerances") + println("set in the provided subsolver are appropriate (for instance if the absolute_tolerance = 1E-3") + println("then the absolute tolerance for a subsolver should be < 1E-4 and any feasibility tolerances") + println(" should be as conservative as the absolute_constraint_feas_tolerance). If you see this message") + println("please submit an issue at https://github.com/PSORLab/EAGO.jl/issues/new/choose requesting") + println("that a configuration routine be added for this subsolver.") + end + return +end + +function set_default_config!(ext, d::GlobalOptimizer, m::MOI.AbstractOptimizer, local_solver) + set_default_config_udf!(MOI.get(m, MOI.SolverName()), m, _verbosity(d)) +end + +function set_default_subsolver_config!(ext::DefaultExt, d::GlobalOptimizer, m::T, local_solver::Bool) where T + !_user_solver_config(d) && set_default_config!(ext, d, m, local_solver) + MOI.set(m, MOI.Silent(), true) + nothing +end + +function set_default_config!(ext::DefaultExt, m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} + set_default_subsolver_config!(DefaultExt(), m, m._subsolvers.relaxed_optimizer, false) + set_default_subsolver_config!(DefaultExt(), m, m._subsolvers.upper_optimizer, true) +end + +""" + set_default_config! + +Configures subsolver tolerances based on tolerance parameters provided to +EAGO (provided that a specialized subsolver configuration routine has been +provided and `m.user_solver_config = false`). +""" +set_default_config!(ext::ExtensionType, m::GlobalOptimizer) = set_default_config!(DefaultExt(), m) +set_default_config!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} = set_default_config!(_ext(m), m) \ No newline at end of file diff --git a/src/eago_optimizer/display.jl b/src/eago_optimizer/optimize/nonconvex/display.jl similarity index 55% rename from src/eago_optimizer/display.jl rename to src/eago_optimizer/optimize/nonconvex/display.jl index 965902c8..c762bbfa 100644 --- a/src/eago_optimizer/display.jl +++ b/src/eago_optimizer/optimize/nonconvex/display.jl @@ -11,25 +11,38 @@ # ambiguity. ############################################################################# -const PRINTING_IOFORMAT = :SCI -const PRINTING_CHARSET = :ASCII - """ $(FUNCTIONNAME) Prints solution information for the B&B problem. Displays first node found, solution value, solution, and time spent solving subproblems. """ -function print_solution!(m::Optimizer) - if m._parameters.verbosity > 0 +function print_solution!(m::GlobalOptimizer) + if _verbosity(m) > 0 println(" ") + if m._end_state == GS_OPTIMAL + println("Empty Stack: Exhaustive Search Finished") + elseif m._end_state == GS_INFEASIBLE + println("Empty Stack: Infeasible") + elseif m._end_state == GS_NODE_LIMIT + println("Node Limit Exceeded") + elseif m._end_state == GS_ITERATION_LIMIT + println("Maximum Iteration Exceeded") + elseif m._end_state == GS_RELATIVE_TOL + println("Relative Tolerance Achieved") + elseif m._end_state == GS_ABSOLUTE_TOL + println("Absolute Tolerance Achieved") + elseif m._end_state == GS_TIME_LIMIT + println("Time Limit Exceeded") + end println("First Solution Found at Node $(m._first_solution_node)") - lower_bound = m._min_converged_value - if (m._input_problem._optimization_sense !== MOI.MIN_SENSE) - lower_bound *= -1.0 + if !_is_input_min(m) + println("LBD = $(MOI.get(m, MOI.ObjectiveBound()))") + println("UBD = $(MOI.get(m, MOI.ObjectiveValue()))") + else + println("LBD = $(MOI.get(m, MOI.ObjectiveBound()))") + println("UBD = $(MOI.get(m, MOI.ObjectiveValue()))") end - println("LBD = $(lower_bound)") - println("UBD = $(MOI.get(m, MOI.ObjectiveValue()))") println("Solution is :") if m._feasible_solution_found for i = 1:m._input_problem._variable_count @@ -46,13 +59,17 @@ $(FUNCTIONNAME) Prints node information for the B&B problem. Node id, bound, and interval box. """ -function print_node!(m::Optimizer) - n = m._current_node - bound = (m._input_problem._optimization_sense === MOI.MIN_SENSE) ? n.lower_bound : -n.lower_bound - println(" ") - println("Node ID: $(n.id), Lower Bound: $(bound), Lower Variable Bounds: - $(n.lower_variable_bounds), Upper Variable Bounds: $(n.upper_variable_bounds)") - println(" ") +function print_node!(m::GlobalOptimizer) + if _verbosity(m) >= 3 + n = m._current_node + bound = _is_input_min(m) ? n.lower_bound : -n.lower_bound + k = length(n) - (_obj_var_slack_added(m) ? 1 : 0) + println(" ") + println("Node ID: $(n.id), Lower Bound: $(bound)") + println("Lower Variable Bounds: $(n.lower_variable_bounds[1:k])") + println("Upper Variable Bounds: $(n.upper_variable_bounds[1:k])") + println(" ") + end return end @@ -62,9 +79,9 @@ $(FUNCTIONNAME) Prints the iteration information based on verbosity. The header is displayed every `header_interval`, the iteration info is displayed every `iteration_interval`. """ -function print_iteration!(m::Optimizer) +function print_iteration!(m::GlobalOptimizer) - if m._parameters.verbosity > 0 + if _verbosity(m) > 0 # prints header line every B.hdr_intv times if mod(m._iteration_count, m._parameters.header_iterations) === 0 || m._iteration_count === 1 @@ -89,48 +106,48 @@ function print_iteration!(m::Optimizer) print_str *= (" "^(max_len - len_str))*temp_str*" | " max_len = 12 - if m._input_problem._optimization_sense === MOI.MIN_SENSE + if _is_input_min(m) lower = m._global_lower_bound upper = m._global_upper_bound else - lower = -m._global_upper_bound - upper = -m._global_lower_bound + lower = m._global_lower_bound + upper = m._global_upper_bound end #temp_str = string(round(lower, sigdigits = 5)) #temp_str = string(lower, sigdigits = 3)) - temp_str = formatted(lower, PRINTING_IOFORMAT, ndigits=4, charset=PRINTING_CHARSET) + temp_str = @sprintf "%.3E" lower len_str = length(temp_str) print_str *= (" "^(max_len - len_str))*temp_str*" | " #temp_str = formatted(upper, PRINTING_IOFORMAT, ndigits=4, charset=PRINTING_CHARSET) #temp_str = string(upper, sigdigits = 3)) - temp_str = formatted(upper, PRINTING_IOFORMAT, ndigits=4, charset=PRINTING_CHARSET) + temp_str = @sprintf "%.3E" upper len_str = length(temp_str) - print_str *= (" "^(max_len - len_str))*temp_str*" |" + print_str *= (" "^(max_len - len_str))*temp_str*" |" max_len = 12 #temp_str = string(round(abs(x._global_upper_bound - x._global_lower_bound), sigdigits = 3)) - temp_str = formatted(abs(m._global_upper_bound - m._global_lower_bound), PRINTING_IOFORMAT, ndigits=4, charset=PRINTING_CHARSET) + temp_str = @sprintf "%.3E" abs(m._global_upper_bound - m._global_lower_bound) len_str = length(temp_str) - print_str *= (" "^(max_len - len_str))*temp_str*" | " + print_str *= (" "^(max_len - len_str))*temp_str*" | " max_len = 12 #temp_str = string(round(relative_gap(x._global_lower_bound, x._global_upper_bound), sigdigits = 3)) - temp_str = formatted(relative_gap(m._global_lower_bound, m._global_upper_bound), PRINTING_IOFORMAT, ndigits=4, charset=PRINTING_CHARSET) + temp_str = @sprintf "%.3E" relative_gap(m._global_lower_bound, m._global_upper_bound) len_str = length(temp_str) print_str *= (" "^(max_len - len_str))*temp_str*" | " max_len = 12 #temp_str = string(round(x._run_time, sigdigits = 3)) - temp_str = formatted(m._run_time, PRINTING_IOFORMAT, ndigits=4, charset=PRINTING_CHARSET) + temp_str = @sprintf "%.3E" m._run_time len_str = length(temp_str) - print_str *= (" "^(max_len - len_str))*temp_str*" | " + print_str *= (" "^(max_len - len_str))*temp_str*" | " max_len = 12 #temp_str = string(round(x._time_left, sigdigits = 4)) - temp_str = formatted(m._time_left, PRINTING_IOFORMAT, ndigits=4, charset=PRINTING_CHARSET) + temp_str = @sprintf "%.3E" m._time_left len_str = length(temp_str) - print_str *= (" "^(max_len - len_str))*temp_str*" |" + print_str *= (" "^(max_len - len_str))*temp_str*" |" println(print_str) end @@ -144,31 +161,29 @@ $(FUNCTIONNAME) Prints the results of a single bounding problem. """ -function print_results!(m::Optimizer, flag::Bool) - if m._parameters.verbosity > 1 +function print_results!(m::GlobalOptimizer, flag::Bool) + if _verbosity(m) > 1 + k = length(m._lower_solution) - (_obj_var_slack_added(m) ? 1 : 0) println(" ") if flag - obj_val = m._lower_objective_value - if m._input_problem._optimization_sense === MOI.MIN_SENSE - print("Lower Bound (First Iteration): $(obj_val),") + if _is_input_min(m) + print("Lower Bound (First Iteration): $(m._lower_objective_value),") else - print("Upper Bound (First Iteration): $(-obj_val),") + print("Upper Bound (First Iteration): $(m._lower_objective_value),") end - print(" Solution: $(m._lower_solution), Feasibility: $(m._lower_feasibility)\n") + print(" Solution: $(m._lower_solution[1:k]), Feasibility: $(m._lower_feasibility)\n") println("Termination Status Code: $(m._lower_termination_status)") - println("Result Code: $(m._lower_result_status)") + println("Result Code: $(m._lower_primal_status)") else - obj_val = m._upper_objective_value - if m._input_problem._optimization_sense === MOI.MIN_SENSE - print("Upper Bound: $(obj_val), ") + if _is_input_min(m) + print("Upper Bound: $(m._upper_objective_value), ") else - print("Lower Bound: $(-obj_val), ") + print("Lower Bound: $(m._upper_objective_value), ") end - print(" Solution: $(m._upper_solution), Feasibility: $(m._upper_feasibility)\n") + print(" Solution: $(m._upper_solution[1:k]), Feasibility: $(m._upper_feasibility)\n") println("Termination Status Code: $(m._upper_termination_status)") println("Result Code: $(m._upper_result_status)") end - println(" ") end return end @@ -176,18 +191,18 @@ end """ $(FUNCTIONNAME) -Prints the results after performing various cuts. +Prints the iteration information based on verbosity. The header is displayed +every `header_interval`, the iteration info is displayed every `iteration_interval`. """ -function print_results_post_cut!(m::Optimizer) - if m._parameters.verbosity > 1 - println(" ") - if m._input_problem._optimization_sense === MOI.MIN_SENSE - print("Lower Bound (Last Iteration): $(m._lower_objective_value)") - else - print("Upper Bound (Last Iteration): $(-m._lower_objective_value)") +function print_preamble!(m::GlobalOptimizer) + if _verbosity(m) >= 3 + if !_is_input_min(m) && isone(m._iteration_count) + println(" ") + println("For maximization problems a max(f) = -min(-f) transformation is applied.") + println("Objectives values for each subproblem are the negative value of the objective") + println("in the original problem and reconciled after branch and bound terminates.") + println(" ") end - print(", Solution: $(m._lower_solution), Feasibility: $(m._lower_feasibility)\n") - println(" ") end return end diff --git a/src/eago_optimizer/logging/log_iteration.jl b/src/eago_optimizer/optimize/nonconvex/log_iteration.jl similarity index 90% rename from src/eago_optimizer/logging/log_iteration.jl rename to src/eago_optimizer/optimize/nonconvex/log_iteration.jl index 78fe926b..952c0035 100644 --- a/src/eago_optimizer/logging/log_iteration.jl +++ b/src/eago_optimizer/optimize/nonconvex/log_iteration.jl @@ -10,20 +10,20 @@ ############################################################################# """ - log_iteration!(x::Optimizer) + log_iteration!(x::GlobalOptimizer) If 'logging_on' is true, the 'global_lower_bound', 'global_upper_bound', 'run_time', and 'node_count' are stored every 'log_interval'. If 'log_subproblem_info' then the lower bound, feasibility and run times of the subproblems are logged every 'log_interval'. """ -function log_iteration!(x::Optimizer) +function log_iteration!(x::GlobalOptimizer) if x._parameters.log_on log = x._log if (mod(x._iteration_count, x._parameters.log_interval) == 0 || x._iteration_count == 1) if x._parameters.log_subproblem_info - if x._input_problem._optimization_sense === MOI.MIN_SENSE + if _is_input_min(m) push!(log.current_lower_bound, x._lower_objective_value) push!(log.current_upper_bound, x._upper_objective_value) else @@ -42,7 +42,7 @@ function log_iteration!(x::Optimizer) push!(log.postprocess_feasibility, x._postprocess_feasibility) end - if x._input_problem._optimization_sense === MOI.MIN_SENSE + if _is_input_min(x) push!(log.global_lower_bound, x._global_lower_bound) push!(log.global_upper_bound, x._global_upper_bound) else diff --git a/src/eago_optimizer/optimize/nonconvex/lower_problem.jl b/src/eago_optimizer/optimize/nonconvex/lower_problem.jl new file mode 100644 index 00000000..7a7d0fa9 --- /dev/null +++ b/src/eago_optimizer/optimize/nonconvex/lower_problem.jl @@ -0,0 +1,431 @@ +""" +$(FUNCTIONNAME) + +Adds linear objective cut constraint to the `x.relaxed_optimizer`. +""" +function objective_cut!(m::GlobalOptimizer, check_safe::Bool) + f = m._working_problem._objective_saf + u = m._global_upper_bound + if u < Inf + b = f.constant + f.constant = 0.0 + if check_safe && is_safe_cut!(m, f) + s = LT(u - b + _constraint_tol(m)) + m._affine_objective_cut_ci = MOI.add_constraint(_relaxed_optimizer(m), f, s) + end + f.constant = b + m._new_eval_objective = false + end + return +end + +""" + RelaxResultStatus + +Status code used internally to determine how to interpret the results from the +solution of a relaxed problem. +""" +@enum(RelaxResultStatus, RRS_OPTIMAL, RRS_DUAL_FEASIBLE, RRS_INFEASIBLE, RRS_INVALID) + + +""" +$(SIGNATURES) + +Takes an `MOI.TerminationStatusCode` and a `MOI.ResultStatusCode` and returns +the tuple `(valid_result::Bool, feasible::Bool)`. The value `valid_result` is +`true` if the pair of codes prove that either the subproblem solution was solved +to global optimality or the subproblem solution is infeasible. The value of +`feasible` is true if the problem is feasible and false if the problem is infeasible. +""" +function relaxed_problem_status(t::MOI.TerminationStatusCode, + p::MOI.ResultStatusCode, + d::MOI.ResultStatusCode) + + if (t == MOI.OPTIMAL) && (p == MOI.FEASIBLE_POINT) + return RRS_OPTIMAL + elseif t == MOI.INFEASIBLE + if (p == MOI.INFEASIBILITY_CERTIFICATE) || + (p == MOI.NO_SOLUTION) || (p == MOI.UNKNOWN_RESULT_STATUS) + return RRS_INFEASIBLE + end + elseif (t == MOI.INFEASIBLE_OR_UNBOUNDED && p == MOI.NO_SOLUTION) + return RRS_INFEASIBLE + end + (d == MOI.FEASIBLE_POINT) && return RRS_DUAL_FEASIBLE + return RRS_INVALID +end + +""" +$(SIGNATURES) + +Updates the relaxed constraint by setting the constraint set of `v == x*`` , +`xL_i <= x_i`, and `x_i <= xU_i` for each such constraint added to the relaxed +optimizer. Resets integral valued constraints to either `EqualTo` or `Interval` +constraints. +""" +function update_relaxed_problem_box!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} + d = _relaxed_optimizer(m) + for i = 1:_variable_num(BranchVar(), m) + l = _lower_bound(BranchVar(), m, i) + u = _upper_bound(BranchVar(), m, i) + v = VI(_bvi(m, i)) + if l == u + ci_vi_et = MOI.add_constraint(d, v, ET(l)) + push!(m._relaxed_variable_et, (ci_vi_et,i)) + else + ci_vi_lt = MOI.add_constraint(d, v, LT(u)) + ci_vi_gt = MOI.add_constraint(d, v, GT(l)) + m._node_to_sv_leq_ci[i] = ci_vi_lt + m._node_to_sv_geq_ci[i] = ci_vi_gt + push!(m._relaxed_variable_lt, (ci_vi_lt,i)) + push!(m._relaxed_variable_gt, (ci_vi_gt,i)) + end + end + return +end + +const SOLUTION_EPS = 0.05 +function store_lower_solution!(m::GlobalOptimizer{R,S,Q}, d::T) where {R,S,Q<:ExtensionType,T} + for i = 1:_variable_num(FullVar(), m) + l = _lower_bound(FullVar(), m, i) + u = _upper_bound(FullVar(), m, i) + ladj = l + SOLUTION_EPS*(u - l) + uadj = u - SOLUTION_EPS*(u - l) + x = MOI.get(d, MOI.VariablePrimal(), m._relaxed_variable_index[i]) + (x < ladj) && (x = ladj) + (x > uadj) && (x = uadj) + m._lower_solution[i] = x + end + return +end + +function reset_relaxation!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} + d = _relaxed_optimizer(m) + m._cut_iterations = 1 + m._obbt_performed_flag = false + m._working_problem._relaxed_evaluator.is_first_eval = true + fill!(m._working_problem._relaxed_evaluator.subexpressions_eval, false) + + m._new_eval_objective = true + m._new_eval_constraint = true + + # delete added affine constraints + foreach(c -> MOI.delete(d, c), m._affine_relax_ci) + empty!(m._affine_relax_ci) + + # delete variable + foreach(c -> MOI.delete(d, c[1]), m._relaxed_variable_et) + foreach(c -> MOI.delete(d, c[1]), m._relaxed_variable_lt) + foreach(c -> MOI.delete(d, c[1]), m._relaxed_variable_gt) + foreach(c -> MOI.delete(d, c), m._relaxed_variable_integer) + empty!(m._relaxed_variable_et) + empty!(m._relaxed_variable_lt) + empty!(m._relaxed_variable_gt) + empty!(m._relaxed_variable_integer) + + # delete objective cut + !isnothing(m._affine_objective_cut_ci) && MOI.delete(d, m._affine_objective_cut_ci) + return +end + +""" +$(FUNCTIONNAME) + +""" +function set_first_relax_point!(m::GlobalOptimizer) + if m._cut_iterations == 1 + m._working_problem._relaxed_evaluator.is_first_eval = true + m._new_eval_constraint = true + m._new_eval_objective = true + for i = 1:_variable_num(FullVar(), m) + l = _lower_bound(FullVar(), m, i) + u = _upper_bound(FullVar(), m, i) + if isfinite(l) && isfinite(u) + x = 0.5*(l + u) + elseif isfinite(l) + x = min(0.0, u) + elseif isfinite(u) + x = max(0.0, l) + else + x = 0.0 + end + _set_lower_solution!(FullVar(), m, x, i) + end + end + return +end + +""" +$(TYPEDSIGNATURES) + +A routine that adds relaxations for all nonlinear constraints and quadratic constraints +corresponding to the current node to the relaxed problem. This adds an objective cut +(if specified by `objective_cut_on`) and then sets the `_new_eval_constraint` flag +to false indicating that an initial evaluation of the constraints has occurred. If +the `objective_cut_on` flag is `true` then the `_new_eval_objective` flag is also +set to `false` indicating that the objective expression was evaluated. +""" +function relax_all_constraints!(t::ExtensionType, m::GlobalOptimizer, k::Int) + check_safe = (k == 1) ? false : m._parameters.cut_safe_on + wp = m._working_problem + wp._relaxed_evaluator.is_first_eval = m._new_eval_constraint + foreach(f -> relax!(m, f, k, check_safe), wp._sqf_leq) + foreach(f -> relax!(m, f, k, check_safe), wp._sqf_eq) + valid_relax_flag = true + num_feasible_relax_flag = true + if valid_relax_flag + for nl in wp._nonlinear_constr + valid_cut, feas_cut = relax!(m, nl, k, check_safe) + valid_relax_flag &= valid_cut + num_feasible_relax_flag &= feas_cut + end + end + m._new_eval_constraint = false + (k == 1) && objective_cut!(m, check_safe) + return valid_relax_flag, num_feasible_relax_flag +end +relax_constraints!(t::ExtensionType, m::GlobalOptimizer, k::Int) = relax_all_constraints!(t, m, k) +relax_constraints!(m::GlobalOptimizer{R,S,Q}, k::Int) where {R,S,Q<:ExtensionType} = relax_constraints!(_ext(m), m, k) + +function relax_problem!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} + wp = m._working_problem + if m._cut_iterations == 1 + reset_relaxation!(m) + if m._nonlinear_evaluator_created + set_node!(wp._relaxed_evaluator, m._current_node) + set_reference_point!(m) + fill!(wp._relaxed_evaluator.subexpressions_eval, false) + end + wp._relaxed_evaluator.is_post = m._parameters.subgrad_tighten + _set_has_value!(wp._objective, false) + wp._relaxed_evaluator.interval_intersect = false + update_relaxed_problem_box!(m) + set_first_relax_point!(m) + else + set_reference_point!(m) + end + valid_relax_flag, num_feasible_relax_flag = relax_constraints!(m, m._cut_iterations) + MOI.set(_relaxed_optimizer(m), MOI.ObjectiveSense(), MOI.MIN_SENSE) + return valid_relax_flag, num_feasible_relax_flag +end + +""" +$(SIGNATURES) + +Retrieves the lower and upper duals for variable bounds from the +`relaxed_optimizer` and sets the appropriate values in the +`_lower_lvd` and `_lower_uvd` storage fields. +""" +function set_dual!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} + d = _relaxed_optimizer(m) + if MOI.get(d, MOI.DualStatus()) == MOI.FEASIBLE_POINT + for (c, i) in m._relaxed_variable_lt + m._lower_uvd[i] = MOI.get(d, MOI.ConstraintDual(), c) + end + for (c, i) in m._relaxed_variable_gt + m._lower_lvd[i] = MOI.get(d, MOI.ConstraintDual(), c) + end + else + fill!(m._lower_lvd, 0.0) + fill!(m._lower_uvd, 0.0) + end + return +end + +interval_objective_bound!(m::GlobalOptimizer, f::Nothing, is_first_eval) = nothing +function interval_objective_bound!(m::GlobalOptimizer, f::AffineFunctionIneq, is_first_eval) + m._working_problem._relaxed_evaluator.is_first_eval = is_first_eval + fL, fU = bound_objective(m) + if fL > m._lower_objective_value + m._lower_objective_value = fL + fill!(m._lower_lvd, 0.0) + fill!(m._lower_uvd, 0.0) + m._cut_add_flag = false + end +end +function interval_objective_bound!(m::GlobalOptimizer, f, is_first_eval) + m._working_problem._relaxed_evaluator.is_first_eval = is_first_eval + if is_first_eval + m._working_problem._relaxed_evaluator.pass_number = 1 + end + fL, fU = bound_objective(m) + fv = _is_input_min(m) ? fL : -fU + if fv > m._lower_objective_value + m._lower_objective_value = fv + fill!(m._lower_lvd, 0.0) + fill!(m._lower_uvd, 0.0) + m._cut_add_flag = false + end + return +end +interval_objective_bound!(m::GlobalOptimizer, is_first_eval = true) = interval_objective_bound!(m, m._working_problem._objective, is_first_eval) + +""" +$(SIGNATURES) + +Runs interval, linear, quadratic contractor methods followed by obbt and a +constraint programming walk up to tolerances specified in +`EAGO.Optimizer` object. +""" +function preprocess!(t::ExtensionType, m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} + + feasible_flag = true + reset_relaxation!(m) + if _fbbt_lp_depth(m) >= _iteration_count(m) + load_fbbt_buffer!(m) + for _ = 1:_fbbt_lp_repetitions(m) + ns = NodeBB(_current_node(m)) + for f in m._working_problem._saf_leq + !(feasible_flag = feasible_flag && fbbt!(m, f)) && break + end + !feasible_flag && break + for f in m._working_problem._saf_eq + !(feasible_flag = feasible_flag && fbbt!(m, f)) && break + end + (same_box(ns,_current_node(m),0.0) || !feasible_flag) && break + end + unpack_fbbt_buffer!(m) + end + + # done after cp to prevent using cp specific flags in cut generation + set_first_relax_point!(m) + # nonlinear CP can detect infeasibility and bound objective even if + # the relaxation is ill-posed, so one is always used to mitigate numerical issues + cp_reps = _cp_depth(m) >= _iteration_count(m) ? _cp_repetitions(m) : 1 + for _ = 1:_cp_repetitions(m) + ns = NodeBB(_current_node(m)) + feasible_flag = feasible_flag && set_constraint_propagation_fbbt!(m) + (same_box(ns,_current_node(m),0.0) || !feasible_flag) && break + end + + if _obbt_depth(m) >= _iteration_count(m) + for k = 1:_obbt_repetitions(m) + ns = NodeBB(_current_node(m)) + feasible_flag = feasible_flag && obbt!(m) + m._obbt_performed_flag = true + (same_box(ns,_current_node(m),0.0) || !feasible_flag) && break + end + end + m._preprocess_feasibility = feasible_flag + return +end +preprocess!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} = preprocess!(_ext(m), m) + +""" +$(SIGNATURES) + +Checks if a cut should be added and computes a new reference point to add the +cut at. If no cut should be added the constraints not modified in place are +deleted from the relaxed optimizer and the solution is compared with the +interval lower bound. The best lower bound is then used. +""" +function cut_condition(t::ExtensionType, m::GlobalOptimizer) + obj_old = m._last_cut_objective + obj_new = m._lower_objective_value + flag = m._cut_iterations < _cut_max_iterations(m) + flag &= obj_new - obj_old > _cut_ϵ_abs(m) + flag &= obj_new - obj_old > _cut_ϵ_rel(m)*abs(obj_new) + return flag +end +cut_condition(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} = cut_condition(_ext(m), m) + +is_integer_subproblem(m) = !continuous(_current_node(m)) +""" +$(SIGNATURES) + +Constructs and solves the relaxation using the default EAGO relaxation scheme +and optimizer on node `y`. +""" +function lower_problem!(t::ExtensionType, m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} + + num_feasible_relax_flag = true + + d = _relaxed_optimizer(m) + m._last_cut_objective = typemin(Float64) + m._lower_objective_value = typemin(Float64) + + t_status = MOI.OPTIMIZE_NOT_CALLED + p_status = MOI.OTHER_RESULT_STATUS + d_status = MOI.OTHER_RESULT_STATUS + status = RRS_INVALID + + set_first_relax_point!(m) + MOI.set(d, MOI.ObjectiveFunction{SAF}(), m._working_problem._objective_saf) + + while true + valid_prob, feas_flag = relax_problem!(m) + if !feas_flag + num_feasible_relax_flag = false + break + end + m._last_cut_objective = m._lower_objective_value + MOI.optimize!(d) + t_status = MOI.get(d, MOI.TerminationStatus()) + p_status = MOI.get(d, MOI.PrimalStatus()) + d_status = MOI.get(d, MOI.DualStatus()) + status = relaxed_problem_status(t_status, p_status, d_status) + if status != RRS_OPTIMAL + break + end + m._lower_objective_value = MOI.get(d, MOI.ObjectiveValue()) + if cut_condition(m) + store_lower_solution!(m, d) + m._cut_iterations += 1 + else + break + end + end + if !num_feasible_relax_flag + status = RRS_INFEASIBLE + end + + # activate integrality conditions for MIP & solve MIP subproblem + if is_integer_subproblem(m) && (status !== RRS_INFEASIBLE) + m._last_cut_objective = m._lower_objective_value + for i = 1:_variable_num(BranchVar(), m) + l = _lower_bound(BranchVar(), m, i) + u = _upper_bound(BranchVar(), m, i) + if is_integer(BranchVar(), m, i) && (l != u) + c_integer = MOI.add_constraint(d, VI(_bvi(m, i)), MOI.Integer()) + push!(m._relaxed_variable_integer, c_integer) + end + end + MOI.optimize!(d) + t_status = MOI.get(d, MOI.TerminationStatus()) + p_status = MOI.get(d, MOI.PrimalStatus()) + d_status = MOI.get(d, MOI.DualStatus()) + status = relaxed_problem_status(t_status, p_status, d_status) + if status == RRS_OPTIMAL + m._lower_objective_value = MOI.get(d, MOI.ObjectiveValue()) + end + end + + # check status -- if not feasible/infeasible then fallback to interval bounds + if status == RRS_OPTIMAL + m._lower_objective_value = MOI.get(d, MOI.ObjectiveValue()) + end + + m._lower_termination_status = t_status + m._lower_primal_status = p_status + m._lower_dual_status = d_status + status = relaxed_problem_status(t_status, p_status, d_status) + if !num_feasible_relax_flag + status = RRS_INFEASIBLE + end + if status == RRS_INFEASIBLE + m._lower_feasibility = false + m._lower_objective_value = -Inf + return + end + + # set dual values + set_dual!(m) + m._lower_feasibility = true + store_lower_solution!(m, d) + if status == RRS_DUAL_FEASIBLE + m._lower_objective_value = MOI.get(d, MOI.DualObjectiveValue()) + end + interval_objective_bound!(m, true) + return +end +lower_problem!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} = lower_problem!(_ext(m), m) \ No newline at end of file diff --git a/src/eago_optimizer/optimize/nonconvex/postprocess.jl b/src/eago_optimizer/optimize/nonconvex/postprocess.jl new file mode 100644 index 00000000..862eaf97 --- /dev/null +++ b/src/eago_optimizer/optimize/nonconvex/postprocess.jl @@ -0,0 +1,23 @@ + +""" +$(SIGNATURES) + +Default postprocess perfoms duality-based bound tightening on the `y`. +""" +function postprocess!(t::ExtensionType, m::GlobalOptimizer) + if m._parameters.dbbt_depth > m._iteration_count + variable_dbbt!(m._current_node, m._lower_lvd, m._lower_uvd, + m._lower_objective_value, m._global_upper_bound, + m._branch_variable_count) + end + return +end +postprocess!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} = postprocess!(_ext(m), m) + +""" +$(SIGNATURES) + +Checks to see if current node should be reprocessed. +""" +repeat_check(t::ExtensionType, m::GlobalOptimizer) = false +repeat_check(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} = repeat_check(_ext(m), m) diff --git a/src/eago_optimizer/optimize/nonconvex/relax.jl b/src/eago_optimizer/optimize/nonconvex/relax.jl new file mode 100644 index 00000000..d4a5050f --- /dev/null +++ b/src/eago_optimizer/optimize/nonconvex/relax.jl @@ -0,0 +1,237 @@ +# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. +# This code is licensed under MIT license (see LICENSE.md for full details) +############################################################################# +# EAGO +# A development environment for robust and global optimization +# See https://github.com/PSORLab/EAGO.jl +############################################################################# +# src/eago_optimizer/relax.jl +# Defines routines used construct the relaxed subproblem. +############################################################################# + +""" +$(FUNCTIONNAME) + +Applies the safe cut checks detailed in Khajavirad, 2018 [Khajavirad, Aida, +and Nikolaos V. Sahinidis. "A hybrid LP/NLP paradigm for global optimization +relaxations." Mathematical Programming Computation 10.3 (2018): 383-421] to +ensure that only numerically safe affine relaxations are added. Checks that +i) ``|b| <= safe b`, ii) `safe_l <= abs(ai) <= safe u`, and iii) violates +`safe_l <= abs(ai/aj) <= safe_u`. +""" +function is_safe_cut!(m::GlobalOptimizer, f::SAF) + + safe_l = m._parameters.cut_safe_l + safe_u = m._parameters.cut_safe_u + safe_b = m._parameters.cut_safe_b + + (abs(f.constant) > safe_b) && return false # violates |b| <= safe_b + term_count = length(f.terms) + @inbounds for i = 1:term_count + ai = f.terms[i].coefficient + if ai !== 0.0 + if !(safe_l <= abs(ai) <= safe_u) # violates safe_l <= abs(ai) <= safe_u + return false + end + @inbounds for j = i:term_count # violates safe_l <= abs(ai/aj) <= safe_u + aj = f.terms[j].coefficient + if aj !== 0.0 + if !(safe_l <= abs(ai/aj) <= safe_u) + return false + end + end + end + end + end + return true +end + +function add_affine_relaxation!(m::GlobalOptimizer{R,S,Q}, f::SAF, check_safe::Bool) where {R,S,Q<:ExtensionType} + valid_cut_flag = !check_safe || is_safe_cut!(m, f) + if valid_cut_flag + s = LT(-f.constant + _constraint_tol(m)) + f.constant = 0.0 + ci = MOI.add_constraint(_relaxed_optimizer(m), f, s)::CI{SAF,LT} + push!(m._affine_relax_ci, ci) + end + return valid_cut_flag +end + +""" +$(FUNCTIONNAME) + +Relaxs the constraint by adding an affine constraint to the model. +""" +function relax! end + +""" +$(FUNCTIONNAME) + +Default routine for relaxing quadratic constraint `func < 0.0` on node `n`. +Takes affine bounds of convex part at point `x0` and secant line bounds on +concave parts. +""" +function affine_relax_quadratic!(m::GlobalOptimizer, func::SQF, buffer::Dict{Int,Float64}, saf::SAF) where {R,S,Q<:ExtensionType} + + quadratic_constant = func.constant + + # Affine terms only contribute coefficients, so the respective + # values do not contribute to the cut. Since all quadratic terms + # are considered to be branch variables we exclude any potential + # need to retrieve variable bounds from locations other than + # the node. + for term in func.quadratic_terms + a = term.coefficient + i = term.variable_1.value + j = term.variable_2.value + x0 = _lower_solution(FullVar(), m, i) + xL = _lower_bound(FullVar(), m, i) + xU = _upper_bound(FullVar(), m, i) + if i == j + if a > 0.0 + buffer[i] += a*x0 + quadratic_constant -= 0.5*a*x0*x0 + else + if !isinf(xL) && !isinf(xU) + buffer[i] += 0.5*a*(xL + xU) + quadratic_constant -= 0.5*a*xL*xU + else + return false + end + end + else + y0 = _lower_solution(FullVar(), m, j) + yL = _lower_bound(FullVar(), m, j) + yU = _upper_bound(FullVar(), m, j) + if a > 0.0 + if (!isinf(xL) && !isinf(yL)) && ((xU - xL)*y0 + (yU - yL)*x0 <= xU*yU - xL*yL) + buffer[i] += a*yL + buffer[j] += a*xL + quadratic_constant -= a*xL*yL + + elseif !isinf(xU) && !isinf(yU) + buffer[i] += a*yU + buffer[j] += a*xU + quadratic_constant -= a*xU*yU + else + return false + end + else + if (!isinf(xU) && !isinf(yL)) && ((xU - xL)*y0 - (yU - yL)*x0 <= xU*yL - xL*yU) + buffer[i] += a*yL + buffer[j] += a*xU + quadratic_constant -= a*xU*yL + elseif !isinf(xL) && !isinf(yU) + buffer[i] += a*yU + buffer[j] += a*xL + quadratic_constant -= a*xL*yU + else + return false + end + end + end + end + + for t in func.affine_terms + buffer[t.variable.value] += t.coefficient + end + count = 1 + for (key, value) in buffer + saf.terms[count] = SAT(value, VI(key)) + buffer[key] = 0.0 + count += 1 + end + saf.constant = quadratic_constant + return +end + +""" +$(TYPEDSIGNATURES) +""" +function relax!(m::GlobalOptimizer, f::BQI, k::Int, check_safe::Bool) + affine_relax_quadratic!(m, f.func, f.buffer, f.saf) + valid_cut_flag = add_affine_relaxation!(m, f.saf, check_safe) + return valid_cut_flag +end + +""" +$(TYPEDSIGNATURES) +""" +function relax!(m::GlobalOptimizer, f::BQE, i::Int, check_safe::Bool) + affine_relax_quadratic!(m, f.func, f.buffer, f.saf) + valid_cut_flag = add_affine_relaxation!(m, f.saf, check_safe) + affine_relax_quadratic!(m, f.minus_func, f.buffer, f.saf) + valid_cut_flag &= add_affine_relaxation!(m, f.saf, check_safe) + return valid_cut_flag +end + +""" +$(TYPEDSIGNATURES) +""" +function check_set_affine_nl!(m::GlobalOptimizer{R,S,Q}, f::BufferedNonlinearFunction{V,N,T}, finite_cut::Bool, check_safe::Bool) where {V,R,S,N,T<:RelaxTag,Q<:ExtensionType} + valid_cut_flag = finite_cut && (!check_safe || is_safe_cut!(m, f.saf)) + if valid_cut_flag + lt = LT(_constraint_tol(m) - f.saf.constant) + f.saf.constant = 0.0 + push!(m._affine_relax_ci, MOI.add_constraint(_relaxed_optimizer(m), f.saf, lt)) + end + return valid_cut_flag +end + +""" +$(TYPEDSIGNATURES) +""" +function relax!(m::GlobalOptimizer{R,S,Q}, f::BufferedNonlinearFunction{V,N,T}, k::Int, check_safe::Bool) where {V,R,S,N,T<:RelaxTag,Q<:ExtensionType} + d = m._working_problem._relaxed_evaluator + x = d.variable_values.x + d.pass_number = k + forward_pass!(d, f) + valid_cut_flag = true + num_feasible_cut = true + + grad_sparsity = sparsity(f) + if is_num(f) + f.saf.constant = num(f) + for i = 1:length(grad_sparsity) + f.saf.terms[i] = SAT(0.0, VI(grad_sparsity[i])) + end + if !(lower_bound(f) <= num(f) <= upper_bound(f)) + num_feasible_cut = false + end + else + v = set(f) + if !isempty(v) + # if has less than or equal to bound (<=) + if isfinite(upper_bound(f)) + lower_cut_valid = !isnan(v.cv) && isfinite(v.cv) + if lower_cut_valid + f.saf.constant = v.cv - upper_bound(f) + for (i, k) in enumerate(grad_sparsity) + c = v.cv_grad[i] + f.saf.terms[i] = SAT(c, VI(k)) + f.saf.constant -= c*x[k] + end + valid_cut_flag = check_set_affine_nl!(m, f, lower_cut_valid, check_safe) + end + end + # if has greater than or equal to bound (>=) + if isfinite(lower_bound(f)) + upper_cut_valid = !isnan(v.cc) && isfinite(v.cc) + if upper_cut_valid + f.saf.constant = -v.cc + lower_bound(f) + for (i, k) in enumerate(grad_sparsity) + c = -v.cc_grad[i] + f.saf.terms[i] = SAT(c, VI(k)) + f.saf.constant -= c*x[k] + end + valid_cut_flag &= check_set_affine_nl!(m, f, upper_cut_valid, check_safe) + end + end + else + num_feasible_cut = false + end + end + return valid_cut_flag, num_feasible_cut +end + +relax!(m::GlobalOptimizer, f::Union{Nothing, VI, AFI}, k::Int, b::Bool) = true diff --git a/src/eago_optimizer/optimize/nonconvex/stack_management.jl b/src/eago_optimizer/optimize/nonconvex/stack_management.jl new file mode 100644 index 00000000..bf3f9dfd --- /dev/null +++ b/src/eago_optimizer/optimize/nonconvex/stack_management.jl @@ -0,0 +1,232 @@ +# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. +# This code is licensed under MIT license (see LICENSE.md for full details) +############################################################################# +# EAGO +# A development environment for robust and global optimization +# See https://github.com/PSORLab/EAGO.jl +############################################################################# +# src/eago_optimizer/optimize/nonconvex_branch/stack_management.jl +# Contains the subroutines used for stack management. Namely, node_selection!, +# single_storage!, branch_node!, and fathom!. +############################################################################# + +function _variable_infeasibility(m::GlobalOptimizer, i::Int) + tsum = zero(Float64); tmin = typemax(Float64); tmax = typemin(Float64) + d = m._branch_cost + for j in _sparsity(BranchVar(), m, i) + v = m._constraint_infeasiblity[j] + tsum += v + (v > tmax) && (tmax = v) + (v < tmin) && (tmin = v) + end + return d.μ1*tsum + d.μ2*tmin + d.μ3*tmax +end + +function _store_pseudocosts!(m::GlobalOptimizer, n::NodeBB) + k = n.last_branch + d = m._branch_cost + if n.branch_direction == BD_POS + d.ηp[k] += 1 + d.𝛹p[k] = n.lower_bound - m._lower_objective_value + d.δp[k] = n.branch_extent + elseif n.branch_direction == BD_NEG + d.ηn[k] += 1 + d.𝛹n[k] = n.lower_bound - m._lower_objective_value + d.δn[k] = n.branch_extent + end + return +end + +function _lo_extent(m::GlobalOptimizer, xb::Float64, k::Int) + c = _branch_cost(m) + (c == BC_INFEASIBLE) && return _variable_infeasibility(m, k) + + l = _lower_bound(BranchVar(), m, k) + u = _upper_bound(BranchVar(), m, k) + (c == BC_INTERVAL) && return xb - l + (c == BC_INTERVAL_REV) && return u - xb + + xlp = _lower_solution(BranchVar(), m, k) + ρ = _cost_offset_β(m)*(u - l) + y = max(min(xlp, u - ρ), l + ρ) + return (c == BC_INTERVAL_LP) ? (y - l) : (u - y) +end + +function _hi_extent(m::GlobalOptimizer, xb::Float64, k::Int) + c = _branch_cost(m) + (c == BC_INFEASIBLE) && return _variable_infeasibility(m, k) + + l = _lower_bound(BranchVar(), m, k) + u = _upper_bound(BranchVar(), m, k) + (c == BC_INTERVAL) && return u - xb + (c == BC_INTERVAL_REV) && return xb - l + + xlp = _lower_solution(BranchVar(), m, k) + ρ = _cost_offset_β(m)*(u - l) + y = max(min(xlp, u - ρ), l + ρ) + return (c == BC_INTERVAL_LP) ? (u - y) : (y - l) +end + +@inline _score(x::T, y::T, μ::T) where T<:Real = (one(T) - μ)*min(x, y) + max(x, y) +@inline function _score(d::BranchCostStorage{T}, i::Int) where T<:Real + _score(d.𝛹n[i]*d.δn[i], d.𝛹p[i]*d.δp[i], d.μ_score) +end + +function _select_branch_variable_cost(m::GlobalOptimizer) + return map_argmax(i -> score(m.branch_cost, i), 1:_variable_num(BranchVar(),m)) +end + +# In the case of diam(X) = Inf variables rel_diam may equal NaN, this should be set to +# a zero relative diameter to prevent "branching" on these variables for a functionally infinite +# amount of time +function rel_diam(m::GlobalOptimizer, i::Int) + rd = _diam(BranchVar(), m, i)/diam(_working_variable_info(m, _bvi(m, i))) + return isnan(rd) ? 0.0 : rd +end +function _select_branch_variable_width(m::GlobalOptimizer) + map_argmax(i -> rel_diam(m,i), 1:_variable_num(BranchVar(), m)) +end + +""" +$(SIGNATURES) + +Selects the variable to branch on psuedocost branching is used if +(parameter: `branch_pseudocost_on` = true). +""" +function select_branch_variable(t::ExtensionType, m::GlobalOptimizer) + _branch_pseudocost_on(m) && return _select_branch_variable_cost(m) + return _select_branch_variable_width(m) +end + +""" +$(SIGNATURES) + +Selects a point `xb` which is a convex combination (parameter: +`branch_cvx_factor`) of the solution to the relaxation and the midpoint of the +node. If this solution lies within (parameter: `branch_offset`) of a bound then +the branch point is moved to a distance of `branch_offset` from the bound. +""" +function select_branch_point(t::ExtensionType, m::GlobalOptimizer, i) + l = _lower_bound(BranchVar(), m, i) + u = _upper_bound(BranchVar(), m, i) + s = _lower_solution(BranchVar(), m, i) + α = _branch_cvx_α(m) + b = _branch_offset_β(m)*(u - l) + return max(l + b, min(u - b, α*s + (one(Float64) - α)*_mid(BranchVar(), m, i))) +end + +""" +$(SIGNATURES) + +Creates two nodes from `current_node` and stores them to the stack. Calls +`select_branch_variable(t, m)` and `select_branch_point(t, m, k)`. +""" +function branch_node!(t::ExtensionType, m::GlobalOptimizer) + + k = select_branch_variable(t, m) + x = select_branch_point(t, m, k) + n = m._current_node + + isfinite(n.last_branch) && _store_pseudocosts!(m, n) + + l_bound = max(n.lower_bound, m._lower_objective_value) + u_bound = min(n.upper_bound, m._upper_objective_value) + + l_lbd = copy(n.lower_variable_bounds); u_lbd = copy(n.lower_variable_bounds) + l_ubd = copy(n.upper_variable_bounds); u_ubd = copy(n.upper_variable_bounds) + l_int = copy(n.is_integer); u_int = copy(n.is_integer) + + flag = is_integer(BranchVar(), m, k) + if flag + l_int[k] = floor(x) != n.lower_variable_bounds[k] + u_int[k] = ceil(x) != n.upper_variable_bounds[k] + end + l_cont = flag ? !any(l_int) : true + u_cont = flag ? !any(u_int) : true + lx = flag ? floor(x) : x + ux = flag ? ceil(x) : x + l_ubd[k] = lx + u_lbd[k] = ux + + psuedo_cost_flag = _branch_pseudocost_on(m) + l_ext = psuedo_cost_flag ? _lo_extent(m, lx, k) : zero(Float64) + u_ext = psuedo_cost_flag ? _hi_extent(m, ux, k) : zero(Float64) + + l_idepth = (flag && l_cont) ? n.depth + 1 : n.cont_depth + u_idepth = (flag && u_cont) ? n.depth + 1 : n.cont_depth + + n1 = NodeBB(l_lbd, l_ubd, l_int, l_cont, l_bound, u_bound, n.depth + 1, l_idepth, n.id + 1, BD_NEG, k, l_ext) + n2 = NodeBB(u_lbd, u_ubd, u_int, u_cont, l_bound, u_bound, n.depth + 1, u_idepth, n.id + 2, BD_POS, k, u_ext) + push!(m._stack, n1) + push!(m._stack, n2) + + m._node_repetitions = 1 + m._maximum_node_id += 2 + m._node_count += 2 + return +end +branch_node!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} = branch_node!(_ext(m), m) + +""" +$(SIGNATURES) + +Stores the current node to the stack after updating lower/upper bounds. +""" +function single_storage!(t::ExtensionType, m::GlobalOptimizer) + y = m._current_node + m._node_repetitions += 1 + m._node_count += 1 + lower_bound = max(y.lower_bound, m._lower_objective_value) + upper_bound = min(y.upper_bound, m._upper_objective_value) + push!(m._stack, NodeBB(y.lower_variable_bounds, y.upper_variable_bounds, y.is_integer, y.continuous, + lower_bound, upper_bound, y.depth, y.cont_depth, y.id, y.branch_direction, y.last_branch, y.branch_extent)) + return +end +single_storage!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} = single_storage!(_ext(m), m) + +""" +$(SIGNATURES) + +Selects node with the lowest lower bound in stack. +""" +function node_selection!(t::ExtensionType, m::GlobalOptimizer) + m._node_count -= 1 + m._current_node = popmin!(m._stack) + return +end +node_selection!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} = node_selection!(_ext(m), m) + +""" +$(SIGNATURES) + +Selects and deletes nodes from stack with lower bounds greater than global +upper bound. +""" +function fathom!(t::ExtensionType, m::GlobalOptimizer) + u = m._global_upper_bound + continue_flag = !isempty(m._stack) + while continue_flag + n = maximum(m._stack) + max_check = n.lower_bound > u + if max_check + popmax!(m._stack) + m._node_count -= 1 + end + continue_flag = !isempty(m._stack) && max_check + end + return +end +fathom!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} = fathom!(_ext(m), m) + +""" +$(TYPEDSIGNATURES) + +Creates an initial node with initial box constraints and adds it to the stack. +""" +function initialize_stack!(m::GlobalOptimizer) + d = _working_variable_info.(m, m._branch_to_sol_map) + push!(m._stack, NodeBB(lower_bound.(d), upper_bound.(d), is_integer.(d))) + m._node_count = 1 + m._maximum_node_id += 1 + return +end diff --git a/src/eago_optimizer/optimize/nonconvex/upper_problem.jl b/src/eago_optimizer/optimize/nonconvex/upper_problem.jl new file mode 100644 index 00000000..1b08e290 --- /dev/null +++ b/src/eago_optimizer/optimize/nonconvex/upper_problem.jl @@ -0,0 +1,67 @@ + +""" +$(SIGNATURES) + +Checks thats the integer +""" +function is_integer_feasible_relaxed(m::GlobalOptimizer) + bool = true + atol = _integer_abs_tol(m) + rtol = _integer_rel_tol(m) + for i = 1:_variable_num(BranchVar(), m) + if is_integer(BranchVar(), m, i) + xsol = _lower_solution(BranchVar(), m, i) + if isapprox(floor(xsol), xsol; atol = atol, rtol = rtol) + continue + elseif isapprox(ceil(xsol), xsol; atol = atol, rtol = rtol) + continue + else + bool &= false + break + end + end + end + return bool +end + +""" +$(SIGNATURES) + +Default check to see if the upper bounding problem should be run. By default, +The upper bounding problem is run on every node up to depth `upper_bounding_depth` +and is triggered with a probability of `0.5^(depth - upper_bounding_depth)` +afterwards for continuous problems. For integral problems, the `upper_bounding_depth` +the above approach is used as well as running on every node up to depth +`upper_bounding_depth + cont_depth` and is triggered with a probability of +`0.5^(depth - upper_bounding_depth - cont_depth)`. +""" +function default_nlp_heurestic(m::GlobalOptimizer) + bool = false + ubd_limit = m._parameters.upper_bounding_depth + n = _current_node(m) + if is_integer_feasible_relaxed(m) + Δdepth = n.depth - n.cont_depth + bool |= (Δdepth <= ubd_limit) + bool |= (rand() < 0.5^(Δdepth - ubd_limit)) + end + bool |= (n.depth <= ubd_limit) + bool |= (rand() < 0.5^(n.depth - ubd_limit)) + return bool +end + +""" +$(SIGNATURES) + +Default upper bounding problem which simply calls `solve_local_nlp!` to solve +the nlp locally. +""" +function upper_problem!(t::ExtensionType, m::GlobalOptimizer) + if !default_nlp_heurestic(m) + m._upper_feasibility = false + m._upper_objective_value = Inf + else + solve_local_nlp!(m) + end + return +end +upper_problem!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} = upper_problem!(_ext(m), m) diff --git a/src/eago_optimizer/optimize/optimize.jl b/src/eago_optimizer/optimize/optimize.jl index d71c7674..23ef5bc8 100644 --- a/src/eago_optimizer/optimize/optimize.jl +++ b/src/eago_optimizer/optimize/optimize.jl @@ -10,16 +10,18 @@ # throw_optimize_hook!. ############################################################################# -include("optimize_lp_cone.jl") +include("optimize_lp.jl") +include("optimize_conic.jl") include("optimize_convex.jl") include("optimize_nonconvex.jl") -throw_optimize_hook!(m::Optimizer) = optimize_hook!(m.ext_type, m) - -function MOI.optimize!(m::Optimizer) +function throw_optimize_hook!(m::Optimizer{Q,S,T}) where {Q,S,T} + optimize_hook!(_ext(m), m) +end - m._start_time = time() +function MOI.optimize!(m::Optimizer{Q,S,T}) where {Q,S,T} + m._global_optimizer._start_time = time() # Runs the branch and bound routine if !m.enable_optimize_hook @@ -29,18 +31,17 @@ function MOI.optimize!(m::Optimizer) # Determines if the problem is an LP, MILP, SOCP, MISCOP, # CONVEX, OF MINCVX PROBLEM TYPE - parse_classify_problem!(m) + parse_classify_problem!(m._global_optimizer) - m._parse_time = m._start_time - time() + m._global_optimizer._parse_time = m._global_optimizer._start_time - time() # Throws the problem to the appropriate solution routine - optimize!(Val{m._working_problem._problem_type}(), m) + optimize!(m._working_problem._problem_type, m) else # throws to user-defined optimization hook throw_optimize_hook!(m) - end - return nothing + return end diff --git a/src/eago_optimizer/optimize/optimize_conic.jl b/src/eago_optimizer/optimize/optimize_conic.jl new file mode 100644 index 00000000..85711d71 --- /dev/null +++ b/src/eago_optimizer/optimize/optimize_conic.jl @@ -0,0 +1,32 @@ +### LP and MILP routines +function add_soc_constraints!(m::GlobalOptimizer, opt::T) where T + for (func, set) in m._input_problem._conic_second_order + MOI.add_constraint(opt, func, set) + end + return nothing +end + +function optimize!(::SOCP, m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} + + relaxed_optimizer = m.relaxed_optimizer + MOI.empty!(relaxed_optimizer) + + m._relaxed_variable_index = add_variables(m, relaxed_optimizer, m._input_problem._variable_count) + add_linear_constraints!(m, relaxed_optimizer) + add_soc_constraints!(m, relaxed_optimizer) + add_sv_or_aff_obj!(m, relaxed_optimizer, m._input_problem._objective) + MOI.set(relaxed_optimizer, MOI.ObjectiveSense(), m._input_problem._optimization_sense) + + if m._parameters.verbosity < 5 + MOI.set(relaxed_optimizer, MOI.Silent(), true) + end + m._parse_time = time() - m._start_time + + MOI.optimize!(relaxed_optimizer) + + #unpack_local_solve!(m, relaxed_optimizer) + + return +end + +optimize!(::MISOCP, m::GlobalOptimizer) = optimize!(SOCP(), m) \ No newline at end of file diff --git a/src/eago_optimizer/optimize/optimize_convex.jl b/src/eago_optimizer/optimize/optimize_convex.jl index 07c9f5e5..fe681b22 100644 --- a/src/eago_optimizer/optimize/optimize_convex.jl +++ b/src/eago_optimizer/optimize/optimize_convex.jl @@ -13,12 +13,37 @@ ############################################################################# """ +$(SIGNATURES) + +""" +function is_integer_feasible_local(m::GlobalOptimizer, d) + bool = true + atol = _integer_abs_tol(m) + rtol = _integer_rel_tol(m) + for i = 1:_variable_num(BranchVar(), m) + if is_integer(BranchVar(), m, i) + xsol = MOI.get(d, MOI.VariablePrimal(), m._upper_variables[i]) + if isapprox(floor(xsol), xsol; atol = atol, rtol = rtol) + continue + elseif isapprox(ceil(xsol), xsol; atol = atol, rtol = rtol) + continue + else + bool &= false + break + end + end + end + return bool +end + +""" +$(SIGNATURES) Shifts the resulting local nlp objective value `f*` by `(1.0 + relative_tolerance/100.0)*f* + absolute_tolerance/100.0`. This assumes that the local solvers relative tolerance and absolute tolerance is significantly lower than the global tolerance (local problem is minimum). """ -function stored_adjusted_upper_bound!(d::Optimizer, v::Float64) +function stored_adjusted_upper_bound!(d::GlobalOptimizer, v::Float64) adj_atol = d._parameters.absolute_tolerance/100.0 adj_rtol = d._parameters.relative_tolerance/100.0 if v > 0.0 @@ -26,41 +51,104 @@ function stored_adjusted_upper_bound!(d::Optimizer, v::Float64) else d._upper_objective_value = v*(1.0 - adj_rtol) + adj_atol end - return nothing end +function _update_upper_variables!(d, m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} + for i = 1:_variable_num(FullVar(), m) + v = m._upper_variables[i] + l = _lower_bound(FullVar(), m, i) + u = _upper_bound(FullVar(), m, i) + if is_integer(FullVar(), m, i) + l = ceil(l) + u = floor(u) + end + is_fixed_int = l == u + vi = _working_variable_info(m,i) + if is_fixed(vi) || is_fixed_int + MOI.add_constraint(d, v, ET(l)) + elseif is_less_than(vi) + MOI.add_constraint(d, v, LT(u)) + elseif is_greater_than(vi) + MOI.add_constraint(d, v, GT(l)) + elseif is_real_interval(vi) + MOI.add_constraint(d, v, LT(u)) + MOI.add_constraint(d, v, GT(l)) + end + end + return +end -revert_adjusted_upper_bound!(t::ExtensionType, d::Optimizer) = nothing +function _finite_mid(l::T, u::T) where T + (isfinite(l) && isfinite(u)) && return 0.5*(l + u) + isfinite(l) ? l : (isfinite(u) ? u : zero(T)) +end +function _set_starting_point!(d, m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} + for i = 1:_variable_num(FullVar(), m) + l = _lower_bound(FullVar(), m, i) + u = _upper_bound(FullVar(), m, i) + v = m._upper_variables[i] + MOI.set(d, MOI.VariablePrimalStart(), v, _finite_mid(l, u)) + end + return +end -function revert_adjusted_upper_bound!(t::DefaultExt, d::Optimizer) +""" + LocalResultStatus - adj_atol = d._parameters.absolute_tolerance/100.0 - adj_rtol = d._parameters.relative_tolerance/100.0 +Status code used internally to determine how to interpret the results from the +solution of a local problem solve. +""" +@enum(LocalResultStatus, LRS_FEASIBLE, LRS_OTHER) - adj_objective_value = d._global_upper_bound - adj_objective_value -= adj_atol - if adj_objective_value > 0.0 - adj_objective_value /= (1.0 + adj_rtol) - else - adj_objective_value /= (1.0 - adj_rtol) - end - d._global_upper_bound = adj_objective_value +""" +$(SIGNATURES) - return nothing +Takes an `MOI.TerminationStatusCode` and a `MOI.ResultStatusCode` and returns `true` +if this corresponds to a solution that is proven to be feasible. +Returns `false` otherwise. +""" +function local_problem_status(t::MOI.TerminationStatusCode, r::MOI.ResultStatusCode) + + if (t == MOI.OPTIMAL) && (r == MOI.FEASIBLE_POINT) + return LRS_FEASIBLE + elseif (t == MOI.LOCALLY_SOLVED) && (r == MOI.FEASIBLE_POINT) + return LRS_FEASIBLE + # This is default solver specific... the acceptable constraint tolerances + # are set to the same values as the basic tolerance. As a result, an + # acceptably solved solution is feasible but non necessarily optimal + # so it should be treated as a feasible point + elseif (t == MOI.ALMOST_LOCALLY_SOLVED) && (r == MOI.NEARLY_FEASIBLE_POINT) + return LRS_FEASIBLE + end + return LRS_OTHER end -# translates quadratic cone -function add_soc_constraints_as_quad!(m::Optimizer, opt::T) where T - - for (func, set) in m._input_problem._conic_second_order - # quadratic cone implies variable[1] >= 0.0, bounds contracted accordingly in initial_parse! - quad_terms = SQT[SQT((), func.variables[i], func.variables[i]) for i = 1:length(func.variables)] - sqf = SQF(SQT[], SAF[], 0.0) - MOI.add_constraint(opt, sqf, LT_ZERO) +function _unpack_local_nlp_solve!(m::GlobalOptimizer, d::T) where T + + tstatus = MOI.get(d, MOI.TerminationStatus()) + pstatus = MOI.get(d, MOI.PrimalStatus()) + m._upper_termination_status = tstatus + m._upper_result_status = pstatus + + if local_problem_status(tstatus, pstatus) == LRS_FEASIBLE + if is_integer_feasible_local(m, d) + + m._upper_feasibility = true + obj_val = MOI.get(d, MOI.ObjectiveValue()) + stored_adjusted_upper_bound!(m, obj_val) + m._best_upper_value = min(obj_val, m._best_upper_value) + m._upper_solution .= MOI.get(d, MOI.VariablePrimal(), m._upper_variables) + + ip = m._input_problem + _extract_primal_linear!(d, ip) + _extract_primal_quadratic!(d, ip) + end + else + m._upper_feasibility = false + m._upper_objective_value = Inf end - - return nothing + return end """ @@ -68,140 +156,31 @@ end Constructs and solves the problem locally on on node `y` updated the upper solution informaton in the optimizer. """ -function solve_local_nlp!(m::Optimizer) +function solve_local_nlp!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} - upper_optimizer = m.upper_optimizer + upper_optimizer = _upper_optimizer(m) MOI.empty!(upper_optimizer) - upper_variables = m._upper_variables for i = 1:m._working_problem._variable_count - @inbounds upper_variables[i] = MOI.add_variable(upper_optimizer) - end - - n = m._current_node - sol_to_branch_map = m._sol_to_branch_map - lower_variable_bounds = n.lower_variable_bounds - upper_variable_bounds = n.upper_variable_bounds - variable_info = m._input_problem._variable_info - - lvb = 0.0 - uvb = 0.0 - x0 = 0.0 - - for i = 1:m._input_problem._variable_count - vinfo = @inbounds variable_info[i] - single_variable = MOI.SingleVariable(@inbounds upper_variables[i]) - - if vinfo.branch_on === BRANCH - if vinfo.is_integer - else - indx = @inbounds sol_to_branch_map[i] - lvb = @inbounds lower_variable_bounds[indx] - uvb = @inbounds upper_variable_bounds[indx] - if vinfo.is_fixed - MOI.add_constraint(upper_optimizer, single_variable, ET(lvb)) - - elseif vinfo.has_lower_bound - if vinfo.has_upper_bound - MOI.add_constraint(upper_optimizer, single_variable, LT(uvb)) - MOI.add_constraint(upper_optimizer, single_variable, GT(lvb)) - - else - MOI.add_constraint(upper_optimizer, single_variable, GT(lvb)) - - end - elseif vinfo.has_upper_bound - MOI.add_constraint(upper_optimizer, single_variable, LT(uvb)) - - end - end - x0 = 0.5*(lvb + uvb) - upper_variable_index = @inbounds upper_variables[i] - MOI.set(upper_optimizer, MOI.VariablePrimalStart(), upper_variable_index, x0) - - else - # not branch variable - if vinfo.is_integer - else - lvb = vinfo.lower_bound - uvb = vinfo.upper_bound - if vinfo.is_fixed - MOI.add_constraint(upper_optimizer, single_variable, ET(lvb)) - - elseif vinfo.has_lower_bound - if vinfo.has_upper_bound - MOI.add_constraint(upper_optimizer, single_variable, LT(uvb)) - MOI.add_constraint(upper_optimizer, single_variable, GT(lvb)) - - else - MOI.add_constraint(upper_optimizer, single_variable, GT(lvb)) - - end - elseif vinfo.has_upper_bound - MOI.add_constraint(upper_optimizer, single_variable, LT(uvb)) - end - x0 = 0.5*(lvb + uvb) - upper_variable_index = @inbounds upper_variables[i] - MOI.set(upper_optimizer, MOI.VariablePrimalStart(), upper_variable_index, x0) - end - end - end - - # Add linear and quadratic constraints to model - add_linear_constraints!(m, upper_optimizer) - - for (func, set) in m._input_problem._quadratic_leq_constraints - MOI.add_constraint(upper_optimizer, func, set) + m._upper_variables[i] = MOI.add_variable(upper_optimizer) end - for (func, set) in m._input_problem._quadratic_geq_constraints - MOI.add_constraint(upper_optimizer, func, set) - end - for (func, set) in m._input_problem._quadratic_eq_constraints - MOI.add_constraint(upper_optimizer, func, set) - end - - if MOI.supports_constraint(upper_optimizer, VECOFVAR, SOC) - add_soc_constraints!(m, upper_optimizer) - else - add_soc_constraints_as_quad!(m, upper_optimizer) - end - + _update_upper_variables!(upper_optimizer, m) + _set_starting_point!(upper_optimizer, m) + + # add constraints + ip = m._input_problem + _add_constraint_store_ci_linear!(upper_optimizer, ip) + _add_constraint_store_ci_quadratic!(upper_optimizer, ip) + #add_soc_constraints!(m, upper_optimizer) + # Add nonlinear evaluation block MOI.set(upper_optimizer, MOI.NLPBlock(), m._working_problem._nlp_data) MOI.set(upper_optimizer, MOI.ObjectiveSense(), MOI.MIN_SENSE) - - # set objective as NECESSARY - add_sv_or_aff_obj!(m, upper_optimizer) - if m._input_problem._objective_type === SCALAR_QUADRATIC - MOI.set(upper_optimizer, MOI.ObjectiveFunction{SQF}(), m._input_problem._objective_sqf) - end + MOI.set(upper_optimizer, MOI.ObjectiveFunction{SAF}(), m._working_problem._objective_saf) # Optimizes the object MOI.optimize!(upper_optimizer) - - # Process output info and save to CurrentUpperInfo object - m._upper_termination_status = MOI.get(upper_optimizer, MOI.TerminationStatus()) - m._upper_result_status = MOI.get(upper_optimizer, MOI.PrimalStatus()) - - if is_feasible_solution(m._upper_termination_status, m._upper_result_status) - m._upper_feasibility = true - value = MOI.get(upper_optimizer, MOI.ObjectiveValue()) - stored_adjusted_upper_bound!(m, value) - m._best_upper_value = min(value, m._best_upper_value) - m._upper_solution .= MOI.get(upper_optimizer, MOI.VariablePrimal(), upper_variables) - - else - m._upper_feasibility = false - m._upper_objective_value = Inf - - end - - return nothing + _unpack_local_nlp_solve!(m, upper_optimizer) end -function optimize!(::Val{DIFF_CVX}, m::Optimizer) - - solve_local_nlp!(m) - - return nothing -end +optimize!(::DIFF_CVX, m::GlobalOptimizer) = solve_local_nlp!(m) \ No newline at end of file diff --git a/src/eago_optimizer/optimize/optimize_lp.jl b/src/eago_optimizer/optimize/optimize_lp.jl new file mode 100644 index 00000000..d9ec9ed1 --- /dev/null +++ b/src/eago_optimizer/optimize/optimize_lp.jl @@ -0,0 +1,104 @@ +# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. +# This code is licensed under MIT license (see LICENSE.md for full details) +############################################################################# +# EAGO +# A development environment for robust and global optimization +# See https://github.com/PSORLab/EAGO.jl +############################################################################# +# src/eeago_optimizer/optimize/optimize_lp_cone.jl +# Contains the optimize! routines for LP, SOCP, (and in the future MILP and +# MISOCP) type problems. This also includes functions to add variables, +# linear constraints, soc constraints, and unpack solutions. +############################################################################# + +function add_variables(m::GlobalOptimizer, d) + n = m._input_problem._variable_count + z = fill(VI(1), n) + for i = 1:n + z[i] = MOI.add_variable(d) + vi = m._working_problem._variable_info[i] + if is_fixed(vi) + MOI.add_constraint(d, z[i], ET(vi)) + elseif is_interval(vi) + MOI.add_constraint(d, z[i], IT(vi)) + elseif is_greater_than(vi) + MOI.add_constraint(d, z[i], GT(vi)) + elseif is_less_than(vi) + MOI.add_constraint(d, z[i], LT(vi)) + end + if is_integer(vi) + MOI.add_constraint(d, z[i], MOI.Integer()) + end + end + return z +end + +lp_obj!(m::GlobalOptimizer, d, f::Nothing) = false +function lp_obj!(m::GlobalOptimizer, d, f::VI) + MOI.set(d, MOI.ObjectiveFunction{VI}(), f) + MOI.set(d, MOI.ObjectiveSense(), m._input_problem._optimization_sense) + return false +end +function lp_obj!(m::GlobalOptimizer, d, f::SAF) + MOI.set(d, MOI.ObjectiveFunction{SAF}(), f) + MOI.set(d, MOI.ObjectiveSense(), MOI.MIN_SENSE) + #MOI.set(d, MOI.ObjectiveSense(), m._input_problem._optimization_sense) + return m._input_problem._optimization_sense == MOI.MAX_SENSE +end + +function optimize!(::LP, m::Optimizer{Q,S,T}) where {Q,S,T} + + d = m._global_optimizer + ip = d._input_problem + r = _relaxed_optimizer(d) + MOI.empty!(r) + + d._relaxed_variable_index = add_variables(d, r) + + # TODO: Remove when upstream Cbc issue https://github.com/jump-dev/Cbc.jl/issues/168 is fixed + # Add extra binary variable `issue_var` fixed to zero to prevent Cbc from displaying even though + # silent is set to off. Sets `issue_var` to zero. + issue_var = MOI.add_variable(r) + MOI.add_constraint(r, issue_var, ZO()) + MOI.add_constraint(r, issue_var, ET(0.0)) + + _add_constraint_store_ci_linear!(r, ip) + + min_to_max = lp_obj!(d, r, ip._objective) + if ip._optimization_sense == MOI.FEASIBILITY_SENSE + MOI.set(r, MOI.ObjectiveSense(), ip._optimization_sense) + end + + (d._parameters.verbosity < 5) && MOI.set(r, MOI.Silent(), true) + d._parse_time = time() - d._start_time + + MOI.optimize!(r) + + m._termination_status_code = MOI.get(r, MOI.TerminationStatus()) + m._result_status_code = MOI.get(r, MOI.PrimalStatus()) + + if MOI.get(r, MOI.ResultCount()) > 0 + + obj_val = MOI.get(r, MOI.ObjectiveValue()) + if min_to_max + obj_val *= -1.0 + end + d._global_lower_bound = obj_val + d._global_upper_bound = obj_val + d._best_upper_value = obj_val + d._solution_value = obj_val + m._objective_value = obj_val + m._objective_bound = obj_val + + d._continuous_solution = zeros(d._input_problem._variable_count) + for i = 1:d._input_problem._variable_count + d._continuous_solution[i] = MOI.get(r, MOI.VariablePrimal(), d._relaxed_variable_index[i]) + end + + _extract_primal_linear!(r, ip) + end + d._run_time = time() - d._start_time + return +end + +optimize!(::MILP, m::Optimizer{Q,S,T}) where {Q,S,T} = optimize!(LP(), m) \ No newline at end of file diff --git a/src/eago_optimizer/optimize/optimize_lp_cone.jl b/src/eago_optimizer/optimize/optimize_lp_cone.jl deleted file mode 100644 index 29dceb7a..00000000 --- a/src/eago_optimizer/optimize/optimize_lp_cone.jl +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eeago_optimizer/optimize/optimize_lp_cone.jl -# Contains the optimize! routines for LP, SOCP, (and in the future MILP and -# MISOCP) type problems. This also includes functions to add variables, -# linear constraints, soc constraints, and unpack solutions. -############################################################################# - -#= -LP -> COPY TO RELAXED SOLVER AND SOLVE -MILP -> COPY TO RELAXED SOLVER AND SOLVE -SOCP -> COPY TO RELAXED SOLVER AND SOLVE -MISOCP -> COPY TO RELAXED SOLVER AND SOLVE -DIFF_CVX -> COPY TO NLP SOLVER AND SOLVE (POTENTIAL MULTISTART) -NS_CVX -> COPY TO NLP SOLVER AND SOLVE (POTENTIAL MULTISTART) -DIFF_NCVX -> APPLY GLOBAL SOLVER (UNLESS USER REQUEST LOCAL SOLVE THEN NLP) -NS_NCVX -> APPLY GLOBAL SOLVER (UNLESS USER REQUEST LOCAL SOLVE THEN NLP) -MINCVX -> APPLY GLOBAL SOLVER (LOCAL SOLVE OPTION FUTURE FEATURE) -=# - -function add_variables(m::Optimizer, optimizer::T, variable_number::Int) where T - - variable_index = fill(VI(1), variable_number) - for i = 1:variable_number - @inbounds variable_index[i] = MOI.add_variable(optimizer) - relaxed_variable = SV(@inbounds variable_index[i]) - v_info = @inbounds m._working_problem._variable_info[i] - if v_info.is_integer && v_info.is_fixed - MOI.add_constraint(optimizer, relaxed_variable, ET(v_info.lower_bound)) - elseif v_info.is_integer - MOI.add_constraint(optimizer, relaxed_variable, ZO()) - elseif v_info.is_fixed - MOI.add_constraint(optimizer, relaxed_variable, ET(v_info.lower_bound)) - elseif v_info.has_lower_bound && v_info.has_upper_bound - MOI.add_constraint(optimizer, relaxed_variable, IT(v_info.lower_bound, v_info.upper_bound)) - elseif v_info.has_lower_bound - MOI.add_constraint(optimizer, relaxed_variable, GT(v_info.lower_bound)) - elseif v_info.has_upper_bound - MOI.add_constraint(optimizer, relaxed_variable, LT(v_info.upper_bound)) - end - end - - return variable_index -end - -### LP and MILP routines -function add_linear_constraints!(m::Optimizer, opt::T) where T - - # add linear constraints - for (func, set) in m._input_problem._linear_leq_constraints - MOI.add_constraint(opt, func, set) - end - for (func, set) in m._input_problem._linear_geq_constraints - MOI.add_constraint(opt, func, set) - end - for (func, set) in m._input_problem._linear_eq_constraints - MOI.add_constraint(opt, func, set) - end - - return nothing -end - -### LP and MILP routines -function add_soc_constraints!(m::Optimizer, opt::T) where T - - for (func, set) in m._input_problem._conic_second_order - MOI.add_constraint(opt, func, set) - end - - return nothing -end - -function add_sv_or_aff_obj!(m::Optimizer, opt::T) where T - - if m._input_problem._objective_type === SINGLE_VARIABLE - MOI.set(opt, MOI.ObjectiveFunction{SV}(), m._input_problem._objective_sv) - elseif m._input_problem._objective_type === SCALAR_AFFINE - MOI.set(opt, MOI.ObjectiveFunction{SAF}(), m._input_problem._objective_saf) - end - - return nothing -end - -function unpack_local_solve!(m::Optimizer, opt::T) where T - - m._maximum_node_id = 0 - - m._termination_status_code = MOI.get(opt, MOI.TerminationStatus()) - m._result_status_code = MOI.get(opt, MOI.PrimalStatus()) - m._feasible_solution_found = m._result_status_code === MOI.FEASIBLE_POINT - - if MOI.get(opt, MOI.ResultCount()) > 0 - - objective_value = MOI.get(opt, MOI.ObjectiveValue()) - - # corrects for standard printing multiplier - if m._input_problem._optimization_sense === MOI.MAX_SENSE - objective_value *= -1.0 - end - - m._global_lower_bound = objective_value - m._global_upper_bound = objective_value - m._objective_value = objective_value - m._best_upper_value = objective_value - m._solution_value = objective_value - end - - m._continuous_solution = zeros(m._input_problem._variable_count) - for i = 1:m._input_problem._variable_count - m._continuous_solution[i] = MOI.get(opt, MOI.VariablePrimal(), m._relaxed_variable_index[i]) - end - - m._run_time = time() - m._start_time - - return nothing -end - -function optimize!(::Val{LP}, m::Optimizer) - - relaxed_optimizer = m.relaxed_optimizer - MOI.empty!(relaxed_optimizer) - - m._relaxed_variable_index = add_variables(m, relaxed_optimizer, m._input_problem._variable_count) - add_linear_constraints!(m, relaxed_optimizer) - add_sv_or_aff_obj!(m, relaxed_optimizer) - MOI.set(relaxed_optimizer, MOI.ObjectiveSense(), m._input_problem._optimization_sense) - - if m._parameters.verbosity < 5 - MOI.set(relaxed_optimizer, MOI.Silent(), true) - end - m._parse_time = time() - m._start_time - - MOI.optimize!(relaxed_optimizer) - - unpack_local_solve!(m, relaxed_optimizer) - - return nothing -end - -optimize!(::Val{MILP}, m::Optimizer) = optimize!(Val{LP}(), m) - -function optimize!(::Val{SOCP}, m::Optimizer) - - relaxed_optimizer = m.relaxed_optimizer - MOI.empty!(relaxed_optimizer) - - m._relaxed_variable_index = add_variables(m, relaxed_optimizer, m._input_problem._variable_count) - add_linear_constraints!(m, relaxed_optimizer) - add_soc_constraints!(m, relaxed_optimizer) - add_sv_or_aff_obj!(m, relaxed_optimizer) - MOI.set(relaxed_optimizer, MOI.ObjectiveSense(), m._input_problem._optimization_sense) - - if m._parameters.verbosity < 5 - MOI.set(relaxed_optimizer, MOI.Silent(), true) - end - m._parse_time = time() - m._start_time - - MOI.optimize!(relaxed_optimizer) - - unpack_local_solve!(m, relaxed_optimizer) - - return nothing -end - -optimize!(::Val{MISOCP}, m::Optimizer) = optimize!(Val{SOCP}(), m) diff --git a/src/eago_optimizer/optimize/optimize_nonconvex.jl b/src/eago_optimizer/optimize/optimize_nonconvex.jl index 1acd563f..0a59da24 100644 --- a/src/eago_optimizer/optimize/optimize_nonconvex.jl +++ b/src/eago_optimizer/optimize/optimize_nonconvex.jl @@ -10,60 +10,20 @@ # bound routine called by EAGO. ############################################################################# -function set_evaluator_flags!(d, is_post, is_intersect, is_first_eval, interval_intersect) - - d.is_post = is_post - d.is_intersect = is_intersect - d.is_first_eval = is_first_eval - d.interval_intersect = interval_intersect - - return nothing -end - -function reset_relaxation!(m::Optimizer) - - m._working_problem._relaxed_evaluator.is_first_eval = true - fill!(m._working_problem._relaxed_evaluator.subexpressions_eval, false) - - m._new_eval_objective = true - m._new_eval_constraint = true - - delete_nl_constraints!(m) - delete_objective_cuts!(m) - - return nothing -end +include(joinpath(@__DIR__,"nonconvex","stack_management.jl")) +include(joinpath(@__DIR__,"nonconvex","lower_problem.jl")) +include(joinpath(@__DIR__,"nonconvex","upper_problem.jl")) +include(joinpath(@__DIR__,"nonconvex","postprocess.jl")) +include(joinpath(@__DIR__,"nonconvex","log_iteration.jl")) +include(joinpath(@__DIR__,"nonconvex","display.jl")) +include(joinpath(@__DIR__,"nonconvex","configure_subsolver.jl")) """ -$(TYPEDSIGNATURES) -Creates an initial node with initial box constraints and adds it to the stack. +Basic parsing for global solutions (no extensive manipulation) """ -function create_initial_node!(m::Optimizer) - - branch_variable_count = m._branch_variable_count - - variable_info = m._working_problem._variable_info - lower_bound = zeros(Float64, branch_variable_count) - upper_bound = zeros(Float64, branch_variable_count) - branch_count = 1 - - for i = 1:m._working_problem._variable_count - vi = variable_info[i] - if vi.branch_on === BRANCH - lower_bound[branch_count] = vi.lower_bound - upper_bound[branch_count] = vi.upper_bound - branch_count += 1 - end - end - - n = NodeBB(lower_bound, upper_bound, -Inf, Inf, 1, 1) - push!(m._stack, n) - m._node_count = 1 - m._maximum_node_id += 1 - - return nothing -end +parse_global!(t::ExtensionType, m::GlobalOptimizer) = nothing +parse_global!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} = parse_global!(_ext(m), m) """ $(TYPEDSIGNATURES) @@ -71,84 +31,77 @@ $(TYPEDSIGNATURES) Loads variables, linear constraints, and empty storage for first nlp and quadratic cut. """ -function load_relaxed_problem!(m::Optimizer) - - relaxed_optimizer = m.relaxed_optimizer +function load_relaxed_problem!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} + d = _relaxed_optimizer(m) # add variables and indices and constraints wp = m._working_problem branch_variable_count = 0 - variable_count = wp._variable_count - for i = 1:variable_count - - relaxed_variable_indx = MOI.add_variable(relaxed_optimizer) - relaxed_variable = SV(relaxed_variable_indx) - push!(m._relaxed_variable_index, relaxed_variable_indx) - - vinfo = wp._variable_info[i] + full_var_num = _variable_num(FullVar(), m) + relaxed_index_new = length(m._relaxed_variable_index) != full_var_num + for i = 1:full_var_num + v = MOI.add_variable(d) + if relaxed_index_new + push!(m._relaxed_variable_index, v) + else + m._relaxed_variable_index[i] = v + end is_branch_variable = m._branch_variables[i] - vinfo.branch_on = is_branch_variable ? BRANCH : NO_BRANCH is_branch_variable && (branch_variable_count += 1) - if vinfo.is_integer - - elseif vinfo.is_fixed - ci_sv_et = MOI.add_constraint(relaxed_optimizer, relaxed_variable, ET(vinfo.lower_bound)) - if is_branch_variable - push!(m._relaxed_variable_eq, (ci_sv_et, branch_variable_count)) - wp._var_eq_count += 1 - end - - else - if vinfo.has_lower_bound - ci_sv_gt = MOI.add_constraint(relaxed_optimizer, relaxed_variable, GT(vinfo.lower_bound)) - if is_branch_variable - push!(m._relaxed_variable_gt, (ci_sv_gt, branch_variable_count)) - wp._var_geq_count += 1 - end - end - - if vinfo.has_upper_bound - ci_sv_lt = MOI.add_constraint(relaxed_optimizer, relaxed_variable, LT(vinfo.upper_bound)) - if is_branch_variable - push!(m._relaxed_variable_lt, (ci_sv_lt, branch_variable_count)) - wp._var_leq_count += 1 - end + vi = wp._variable_info[i] + if !is_branch_variable + if is_fixed(vi) + MOI.add_constraint(d, v, ET(vi)) + elseif is_interval(vi) + MOI.add_constraint(d, v, IT(vi)) + elseif is_greater_than(vi) + MOI.add_constraint(d, v, GT(vi)) + elseif is_less_than(vi) + MOI.add_constraint(d, v, LT(vi)) end end end - # set node index to single variable constraint index maps - m._node_to_sv_leq_ci = fill(CI{SV,LT}(-1), branch_variable_count) - m._node_to_sv_geq_ci = fill(CI{SV,GT}(-1), branch_variable_count) - for i = 1:wp._var_leq_count - ci_sv_lt, branch_index = m._relaxed_variable_lt[i] - m._node_to_sv_leq_ci[branch_index] = ci_sv_lt - end - for i = 1:wp._var_geq_count - ci_sv_gt, branch_index = m._relaxed_variable_gt[i] - m._node_to_sv_geq_ci[branch_index] = ci_sv_gt - end + # TODO: Remove when upstream Cbc issue https://github.com/jump-dev/Cbc.jl/issues/168 is fixed + # Add extra binary variable `issue_var` fixed to zero to prevent Cbc from displaying even though + # silent is set to off. Sets `issue_var` to zero. + issue_var = MOI.add_variable(d) + MOI.add_constraint(d, issue_var, ZO()) + MOI.add_constraint(d, issue_var, ET(0.0)) + # set number of variables to branch on m._branch_variable_count = branch_variable_count # add linear constraints - add_linear_constraints!(m, relaxed_optimizer) + for (f, s) in collect(values(m._input_problem._linear_leq_constraints)) + MOI.add_constraint(d, f, s) + end + for (f, s) in collect(values(m._input_problem._linear_geq_constraints)) + MOI.add_constraint(d, f, s) + end + for (f, s) in collect(values(m._input_problem._linear_eq_constraints)) + MOI.add_constraint(d, f, s) + end # sets relaxed problem objective sense to Min as all problems # are internally converted in Min problems in EAGO - MOI.set(relaxed_optimizer, MOI.ObjectiveSense(), MOI.MIN_SENSE) - return nothing + MOI.set(d, MOI.ObjectiveSense(), MOI.MIN_SENSE) + MOI.set(d, MOI.ObjectiveFunction{SAF}(), wp._objective_saf) + + return end -function presolve_global!(t::ExtensionType, m::Optimizer) +function presolve_global!(t::ExtensionType, m::GlobalOptimizer) + set_default_config!(m) load_relaxed_problem!(m) - create_initial_node!(m) + initialize_stack!(m) + wp = m._working_problem branch_variable_count = m._branch_variable_count m._current_xref = fill(0.0, branch_variable_count) @@ -161,15 +114,14 @@ function presolve_global!(t::ExtensionType, m::Optimizer) # populate in full space until local MOI nlp solves support constraint deletion # uses input model for local nlp solves... may adjust this if a convincing reason # to use a reformulated upper problem presents itself - m._lower_solution = zeros(Float64, m._working_problem._variable_count) - m._cut_solution = zeros(Float64, m._working_problem._variable_count) - m._continuous_solution = zeros(Float64, m._working_problem._variable_count) - m._upper_solution = zeros(Float64, m._working_problem._variable_count) - m._upper_variables = fill(VI(-1), m._working_problem._variable_count) + m._lower_solution = zeros(Float64, wp._variable_count) + m._continuous_solution = zeros(Float64, wp._variable_count) + m._upper_solution = zeros(Float64, wp._variable_count) + m._upper_variables = fill(VI(-1), wp._variable_count) # add storage for fbbt - m._lower_fbbt_buffer = zeros(Float64, m._working_problem._variable_count) - m._upper_fbbt_buffer = zeros(Float64, m._working_problem._variable_count) + m._lower_fbbt_buffer = zeros(Float64, wp._variable_count) + m._upper_fbbt_buffer = zeros(Float64, wp._variable_count) # add storage for obbt ( perform obbt on all relaxed variables, potentially) m._obbt_working_lower_index = fill(false, branch_variable_count) @@ -182,184 +134,15 @@ function presolve_global!(t::ExtensionType, m::Optimizer) m._upper_indx_diff = fill(false, branch_variable_count) m._obbt_variable_count = branch_variable_count - # add storage for objective cut if quadratic or nonlinear - wp = m._working_problem - obj_type = wp._objective_type - if obj_type === SCALAR_QUADRATIC - wp._objective_saf.terms = copy(wp._objective_sqf.saf.terms) - elseif obj_type === NONLINEAR - wp._objective_saf.terms = copy(wp._objective_nl.saf.terms) - end - # set subgradient refinement flag wp._relaxed_evaluator.is_post = m._parameters.subgrad_tighten wp._relaxed_evaluator.subgrad_tighten = m._parameters.subgrad_tighten wp._relaxed_evaluator.reverse_subgrad_tighten = m._parameters.reverse_subgrad_tighten m._presolve_time = time() - m._parse_time - - return nothing -end - -""" -$(SIGNATURES) - -Selects node with the lowest lower bound in stack. -""" -function node_selection!(t::ExtensionType, m::Optimizer) - - m._node_count -= 1 - m._current_node = popmin!(m._stack) - - return nothing - -end - -""" -$(SIGNATURES) - -Creates two nodes from `current_node` using information available the `x` -and stores them to the stack. By default, relative width bisection is perfomed -at a point `branch_pnt` which is a convex combination -(parameter: `branch_cvx_factor`) of the solution to the relaxation and -the midpoint of the node. If this solution lies within `branch_offset/width` of -a bound then the branch point is moved to a distance of `branch_offset/width` -from the bound. -""" -function branch_node!(t::ExtensionType, m::Optimizer) - - n = m._current_node - - lvbs = n.lower_variable_bounds - uvbs = n.upper_variable_bounds - - max_pos = 0 - max_val = -Inf - temp_max = 0.0 - - flag = true - for i = 1:m._branch_variable_count - si = m._branch_to_sol_map[i] - vi = m._working_problem._variable_info[si] - if vi.branch_on === BRANCH - temp_max = uvbs[i] - lvbs[i] - temp_max /= vi.upper_bound - vi.lower_bound - if temp_max > max_val - max_pos = i - max_val = temp_max - end - end - end - - lvb = lvbs[max_pos] - uvb = uvbs[max_pos] - si = m._branch_to_sol_map[max_pos] - lsol = m._lower_solution[si] - - cvx_f = m._parameters.branch_cvx_factor - cvx_g = m._parameters.branch_offset - - branch_pnt = cvx_f*lsol + (1.0 - cvx_f)*(lvb + uvb)/2.0 - if branch_pnt < lvb*(1.0 - cvx_g) + cvx_g*uvb - branch_pnt = (1.0 - cvx_g)*lvb + cvx_g*uvb - elseif branch_pnt > cvx_g*lvb + (1.0 - cvx_g)*uvb - branch_pnt = cvx_g*lvb + (1.0 - cvx_g)*uvb - end - - # rounds into branch points, which in turn prevents the - # solution at the branch point from being discarded - N1::Interval{Float64} = Interval{Float64}(lvb, branch_pnt) - N2::Interval{Float64} = Interval{Float64}(branch_pnt, uvb) - lvb_1 = copy(lvbs) - uvb_1 = copy(uvbs) - lvb_2 = copy(lvbs) - uvb_2 = copy(uvbs) - lvb_1[max_pos] = N1.lo - uvb_1[max_pos] = N1.hi - lvb_2[max_pos] = N2.lo - uvb_2[max_pos] = N2.hi - - lower_bound = max(n.lower_bound, m._lower_objective_value) - upper_bound = min(n.upper_bound, m._upper_objective_value) - new_depth = n.depth + 1 - - m._maximum_node_id += 1 - X1 = NodeBB(lvb_1, uvb_1, lower_bound, upper_bound, new_depth, m._maximum_node_id) - m._maximum_node_id += 1 - X2 = NodeBB(lvb_2, uvb_2, lower_bound, upper_bound, new_depth, m._maximum_node_id) - - push!(m._stack, X1) - push!(m._stack, X2) - - m._node_repetitions = 1 - m._node_count += 2 - - return nothing + return end - -""" -$(SIGNATURES) - -Stores the current node to the stack after updating lower/upper bounds. -""" -function single_storage!(t::ExtensionType, m::Optimizer) - y = m._current_node - m._node_repetitions += 1 - m._node_count += 1 - lower_bound = max(y.lower_bound, m._lower_objective_value) - upper_bound = min(y.upper_bound, m._upper_objective_value) - push!(m._stack, NodeBB(y.lower_variable_bounds, y.upper_variable_bounds, - lower_bound, upper_bound, y.depth, y.id)) - - return nothing -end - -""" -$(SIGNATURES) - -Selects and deletes nodes from stack with lower bounds greater than global -upper bound. -""" -function fathom!(t::ExtensionType, m::Optimizer) - - upper = m._global_upper_bound - continue_flag = !isempty(m._stack) - - while continue_flag - max_node = maximum(m._stack) - max_check = (max_node.lower_bound > upper) - - if max_check - popmax!(m._stack) - m._node_count -= 1 - if isempty(m._stack) - continue_flag = false - end - - else - if !max_check - continue_flag = false - elseif isempty(m._stack) - continue_flag = false - end - - end - end - - return nothing -end - -""" -$(SIGNATURES) - -Checks to see if current node should be reprocessed. -""" -function repeat_check(t::ExtensionType, m::Optimizer) - return false -end - -relative_gap(L::Float64, U::Float64) = ((L > -Inf) && (U < Inf)) ? abs(U - L)/(max(abs(L), abs(U))) : Inf -relative_tolerance(L::Float64, U::Float64, tol::Float64) = relative_gap(L, U) > tol || ~(L > -Inf) +presolve_global!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} = presolve_global!(_ext(m), m) """ $(SIGNATURES) @@ -368,639 +151,84 @@ Checks for termination of algorithm due to satisfying absolute or relative tolerance, infeasibility, or a specified limit, returns a boolean valued true if algorithm should continue. """ -function termination_check(t::ExtensionType, m::Optimizer) - - node_in_stack = length(m._stack) +function termination_check(t::ExtensionType, m::GlobalOptimizer) + nlen = length(m._stack) L = m._global_lower_bound U = m._global_upper_bound - - if node_in_stack === 0 - - if m._first_solution_node > 0 - m._termination_status_code = MOI.OPTIMAL - m._result_status_code = MOI.FEASIBLE_POINT - (m._parameters.verbosity >= 3) && println("Empty Stack: Exhaustive Search Finished") - - else - m._termination_status_code = MOI.INFEASIBLE - m._result_status_code = MOI.INFEASIBILITY_CERTIFICATE - (m._parameters.verbosity >= 3) && println("Empty Stack: Infeasible") - end - - elseif node_in_stack >= m._parameters.node_limit - - m._termination_status_code = MOI.NODE_LIMIT - m._result_status_code = MOI.UNKNOWN_RESULT_STATUS - (m._parameters.verbosity >= 3) && println("Node Limit Exceeded") - + if nlen == 0 && m._first_solution_node > 0 + m._end_state = GS_OPTIMAL + elseif nlen == 0 && !(m._first_solution_node > 0) + m._end_state = GS_INFEASIBLE + elseif nlen >= m._parameters.node_limit + m._end_state = GS_NODE_LIMIT elseif m._iteration_count >= m._parameters.iteration_limit - - m._termination_status_code = MOI.ITERATION_LIMIT - m._result_status_code = MOI.UNKNOWN_RESULT_STATUS - (m._parameters.verbosity >= 3) && println("Maximum Iteration Exceeded") - - elseif ~relative_tolerance(L, U, m._parameters.relative_tolerance) - - m._termination_status_code = MOI.OPTIMAL - m._result_status_code = MOI.FEASIBLE_POINT - (m._parameters.verbosity >= 3) && println("Relative Tolerance Achieved") - - elseif (U - L) < m._parameters.absolute_tolerance - - m._termination_status_code = MOI.OPTIMAL - m._result_status_code = MOI.FEASIBLE_POINT - (m._parameters.verbosity >= 3) && println("Absolute Tolerance Achieved") - + m._end_state = GS_ITERATION_LIMIT + elseif !relative_tolerance(L, U, m._parameters.relative_tolerance) + m._end_state = GS_RELATIVE_TOL + elseif abs(U - L) < m._parameters.absolute_tolerance + m._end_state = GS_ABSOLUTE_TOL elseif m._run_time > m._parameters.time_limit - - m._termination_status_code = MOI.TIME_LIMIT - m._result_status_code = MOI.UNKNOWN_RESULT_STATUS - (m._parameters.verbosity >= 3) && println("Time Limit Exceeded") - + m._end_state = GS_TIME_LIMIT else - return false - end - return true end - -""" -$(SIGNATURES) - -Checks for convergence of algorithm with respect to absolute and/or relative -tolerances. -""" -function convergence_check(t::ExtensionType, m::Optimizer) - - L = m._lower_objective_value - U = m._global_upper_bound - t = (U - L) <= m._parameters.absolute_tolerance - if (U < Inf) && (L > Inf) - t |= (abs(U - L)/(max(abs(L), abs(U))) <= m._parameters.relative_tolerance) - end - - if t && m._min_converged_value < Inf - m._min_converged_value = min(m._min_converged_value, L) - else - m._min_converged_value = L - end - - return t -end - -""" -$(SIGNATURES) - -Takes an `MOI.TerminationStatusCode` and a `MOI.ResultStatusCode` and returns -the tuple `(valid_result::Bool, feasible::Bool)`. The value `valid_result` is -`true` if the pair of codes prove that either the subproblem solution was solved -to global optimality or the subproblem solution is infeasible. The value of -`feasible` is true if the problem is feasible and false if the problem is infeasible. -""" -function is_globally_optimal(t::MOI.TerminationStatusCode, r::MOI.ResultStatusCode) - - feasible = false - valid_result = false - - if (t === MOI.INFEASIBLE && r === MOI.INFEASIBILITY_CERTIFICATE) - valid_result = true - - elseif (t === MOI.INFEASIBLE && r === MOI.NO_SOLUTION) - valid_result = true - - elseif (t === MOI.INFEASIBLE && r === MOI.UNKNOWN_RESULT_STATUS) - valid_result = true - - elseif (t === MOI.OPTIMAL && r === MOI.FEASIBLE_POINT) - valid_result = true - feasible = true - - elseif (t === MOI.INFEASIBLE_OR_UNBOUNDED && r === MOI.NO_SOLUTION) - valid_result = true - feasible = false - - end - - return valid_result, feasible -end - -""" -$(SIGNATURES) - -Takes an `MOI.TerminationStatusCode` and a `MOI.ResultStatusCode` and returns `true` -if this corresponds to a solution that is proven to be feasible. -Returns `false` otherwise. -""" -function is_feasible_solution(t::MOI.TerminationStatusCode, r::MOI.ResultStatusCode) - - termination_flag = false - result_flag = false - - (t === MOI.OPTIMAL) && (termination_flag = true) - (t === MOI.LOCALLY_SOLVED) && (termination_flag = true) - - # This is default solver specific... the acceptable constraint tolerances - # are set to the same values as the basic tolerance. As a result, an - # acceptably solved solution is feasible but non necessarily optimal - # so it should be treated as a feasible point - if (t === MOI.ALMOST_LOCALLY_SOLVED) && (r === MOI.NEARLY_FEASIBLE_POINT) - termination_flag = true - result_flag = true - end - - (r === MOI.FEASIBLE_POINT) && (result_flag = true) - - return (termination_flag && result_flag) -end - -""" -$(SIGNATURES) - -Retrieves the lower and upper duals for variable bounds from the -`relaxed_optimizer` and sets the appropriate values in the -`_lower_lvd` and `_lower_uvd` storage fields. -""" -function set_dual!(m::Optimizer) - - relaxed_optimizer = m.relaxed_optimizer - relaxed_variable_lt = m._relaxed_variable_lt - relaxed_variable_gt = m._relaxed_variable_gt - - for i = 1:m._working_problem._var_leq_count - ci_lt, i_lt = @inbounds relaxed_variable_lt[i] - @inbounds m._lower_uvd[i_lt] = MOI.get(relaxed_optimizer, MOI.ConstraintDual(), ci_lt) - end - for i = 1:m._working_problem._var_geq_count - ci_gt, i_gt = @inbounds relaxed_variable_gt[i] - @inbounds m._lower_lvd[i_gt] = MOI.get(relaxed_optimizer, MOI.ConstraintDual(), ci_gt) - end - - return nothing -end - -""" -$(SIGNATURES) - -Runs interval, linear, quadratic contractor methods followed by obbt and a -constraint programming walk up to tolerances specified in -`EAGO.Optimizer` object. -""" -function preprocess!(t::ExtensionType, m::Optimizer) - - reset_relaxation!(m) - - wp = m._working_problem - params = m._parameters - - # Sets initial feasibility - feasible_flag = true - m._obbt_performed_flag = false - - # compute initial volume - m._initial_volume = prod(upper_variable_bounds(m._current_node) - - lower_variable_bounds(m._current_node)) - - if params.fbbt_lp_depth >= m._iteration_count - load_fbbt_buffer!(m) - for i = 1:m._parameters.fbbt_lp_repetitions - if feasible_flag - for j = 1:wp._saf_leq_count - !feasible_flag && break - saf_leq = wp._saf_leq[j] - feasible_flag &= fbbt!(m, saf_leq) - end - !feasible_flag && break - - for j = 1:wp._saf_eq_count - !feasible_flag && break - saf_eq = wp._saf_eq[j] - feasible_flag &= fbbt!(m, saf_eq) - end - !feasible_flag && break - end - end - unpack_fbbt_buffer!(m) - end - - # done after cp to prevent using cp specific flags in cut generation - set_first_relax_point!(m) - - cp_walk_count = 0 - perform_cp_walk_flag = feasible_flag - perform_cp_walk_flag &= (params.cp_depth >= m._iteration_count) - perform_cp_walk_flag &= (cp_walk_count < m._parameters.cp_repetitions) - while perform_cp_walk_flag - feasible_flag &= set_constraint_propagation_fbbt!(m) - !feasible_flag && break - cp_walk_count += 1 - perform_cp_walk_flag = (cp_walk_count < m._parameters.cp_repetitions) - end - - obbt_count = 0 - perform_obbt_flag = feasible_flag - perform_obbt_flag &= (params.obbt_depth >= m._iteration_count) - perform_obbt_flag &= (obbt_count < m._parameters.obbt_repetitions) - - while perform_obbt_flag - feasible_flag &= obbt!(m) - m._obbt_performed_flag = true - !feasible_flag && break - obbt_count += 1 - perform_obbt_flag = (obbt_count < m._parameters.obbt_repetitions) - end - - m._final_volume = prod(upper_variable_bounds(m._current_node) - - lower_variable_bounds(m._current_node)) - - m._preprocess_feasibility = feasible_flag - - return nothing -end - -""" -$(SIGNATURES) - -Updates the relaxed constraint by setting the constraint set of `v == x*`` , -`xL_i <= x_i`, and `x_i <= xU_i` for each such constraint added to the relaxed -optimizer. -""" -function update_relaxed_problem_box!(m::Optimizer) - - opt = m.relaxed_optimizer - wp = m._working_problem - - n = m._current_node - lower_bound = n.lower_variable_bounds - upper_bound = n.upper_variable_bounds - - relaxed_variable_eq = m._relaxed_variable_eq - for i = 1:wp._var_eq_count - constr_indx, node_indx = relaxed_variable_eq[i] - MOI.set(opt, MOI.ConstraintSet(), constr_indx, ET( lower_bound[node_indx])) - end - - relaxed_variable_lt = m._relaxed_variable_lt - for i = 1:wp._var_leq_count - constr_indx, node_indx = relaxed_variable_lt[i] - MOI.set(opt, MOI.ConstraintSet(), constr_indx, LT( upper_bound[node_indx])) - end - - relaxed_variable_gt = m._relaxed_variable_gt - for i = 1:wp._var_geq_count - constr_indx, node_indx = relaxed_variable_gt[i] - MOI.set(opt, MOI.ConstraintSet(), constr_indx, GT( lower_bound[node_indx])) - end - - return nothing -end - -function interval_objective_bound(m::Optimizer, n::NodeBB) - - interval_objective_bound = bound_objective(m) - - if interval_objective_bound > m._lower_objective_value - m._lower_objective_value = interval_objective_bound - fill!(m._lower_lvd, 0.0) - fill!(m._lower_uvd, 0.0) - m._cut_add_flag = false - return true - - end - - return false -end - -""" -$(SIGNATURES) - -A fallback lower bounding problem that consists of an natural interval extension -calculation. This is called when the optimizer used to compute the lower bound -does not return a termination and primal status code indicating that it -successfully solved the relaxation to a globally optimal point. -""" -function fallback_interval_lower_bound!(m::Optimizer, n::NodeBB) - - feasible_flag = true - - if !cp_condition(m) - for i = 1:m._working_problem._saf_leq_count - saf_leq = m._working_problem._saf_leq[i] - feasible_flag &= (lower_interval_bound(m, saf_leq, n) <= 0.0) - !feasible_flag && break - end - - if feasible_flag - for i = 1:m._working_problem._saf_eq_count - saf_eq = m._working_problem._saf_eq[i] - lower_value, upper_value = interval_bound(m, saf_eq, n) - feasible_flag &= (lower_value <= 0.0 <= upper_value) - !feasible_flag && break - end - end - - if feasible_flag - for i = 1:m._working_problem._sqf_leq_count - sqf_leq = m._working_problem._sqf_leq[i] - feasible_flag &= (lower_interval_bound(m, sqf_leq, n) <= 0.0) - !feasible_flag && break - end - end - - if feasible_flag - for i = 1:m._working_problem._sqf_eq_count - sqf_eq = m._working_problem._sqf_eq[i] - lower_value, upper_value = interval_bound(m, sqf_eq, n) - feasible_flag &= (lower_value <= 0.0 <= upper_value) - !feasible_flag && break - end - end - - if feasible_flag - for i = 1:m._working_problem._nonlinear_count - nl_constr = m._working_problem._nonlinear_constr[i] - lower_value, upper_value = interval_bound(m, nl_constr, n) - feasible_flag &= upper_value < nl_constr.lower_bound - feasible_flag &= lower_value > nl_constr.upper_bound - !feasible_flag && break - end - end - end - - if feasible_flag - interval_objective_used = interval_objective_bound(m, n) - @__dot__ m._current_xref = 0.5*(n.upper_variable_bounds + n.lower_variable_bounds) - unsafe_check_fill!(isnan, m._current_xref, 0.0, length(m._current_xref)) - else - m._lower_objective_value = -Inf - end - m._lower_feasibility = feasible_flag - - return nothing +termination_check(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} = termination_check(_ext(m), m) + +const GLOBALEND_TSTATUS = Dict{GlobalEndState, MOI.TerminationStatusCode}( + GS_OPTIMAL => MOI.OPTIMAL, + GS_INFEASIBLE => MOI.INFEASIBLE, + GS_NODE_LIMIT => MOI.NODE_LIMIT, + GS_ITERATION_LIMIT => MOI.ITERATION_LIMIT, + GS_RELATIVE_TOL => MOI.OPTIMAL, + GS_ABSOLUTE_TOL => MOI.OPTIMAL, + GS_TIME_LIMIT => MOI.TIME_LIMIT + ) + +function set_termination_status!(m::GlobalOptimizer) + m._termination_status_code = GLOBALEND_TSTATUS[m._end_state] + return end -""" -$(SIGNATURES) - -Constructs and solves the relaxation using the default EAGO relaxation scheme -and optimizer on node `y`. -""" -function lower_problem!(t::ExtensionType, m::Optimizer) - - n = m._current_node - - m._working_problem._relaxed_evaluator.is_post = m._parameters.subgrad_tighten - if !m._obbt_performed_flag - if m._nonlinear_evaluator_created - set_node!(m._working_problem._relaxed_evaluator, n) - set_node_flag!(m) - set_reference_point!(m) - fill!(m._working_problem._relaxed_evaluator.subexpressions_eval, false) - end - update_relaxed_problem_box!(m) - end - m._working_problem._objective_nl.has_value = false - m._working_problem._relaxed_evaluator.interval_intersect = false - - if !m._obbt_performed_flag - relax_constraints!(m, 1) - end - relax_objective!(m, 1) - - # Optimizes the object - relaxed_optimizer = m.relaxed_optimizer - - MOI.set(relaxed_optimizer, MOI.ObjectiveSense(), MOI.MIN_SENSE) - - MOI.optimize!(relaxed_optimizer) - - m._lower_termination_status = MOI.get(relaxed_optimizer, MOI.TerminationStatus()) - m._lower_result_status = MOI.get(relaxed_optimizer, MOI.PrimalStatus()) - valid_flag, feasible_flag = is_globally_optimal(m._lower_termination_status, m._lower_result_status) - - if valid_flag && feasible_flag - set_dual!(m) - m._cut_add_flag = true - m._lower_feasibility = true - m._lower_objective_value = MOI.get(relaxed_optimizer, MOI.ObjectiveValue()) - for i = 1:m._working_problem._variable_count - m._lower_solution[i] = MOI.get(relaxed_optimizer, MOI.VariablePrimal(), m._relaxed_variable_index[i]) - end - - elseif valid_flag - m._cut_add_flag = false - m._lower_feasibility = false - m._lower_objective_value = -Inf - - else - fallback_interval_lower_bound!(m, n) - end - - return nothing +const GLOBALEND_PSTATUS = Dict{GlobalEndState, MOI.ResultStatusCode}( + GS_OPTIMAL => MOI.FEASIBLE_POINT, + GS_INFEASIBLE => MOI.NO_SOLUTION, # Proof of infeasibility implies not solution found + GS_NODE_LIMIT => MOI.UNKNOWN_RESULT_STATUS, + GS_ITERATION_LIMIT => MOI.UNKNOWN_RESULT_STATUS, + GS_RELATIVE_TOL => MOI.FEASIBLE_POINT, + GS_ABSOLUTE_TOL => MOI.FEASIBLE_POINT, + GS_TIME_LIMIT => MOI.UNKNOWN_RESULT_STATUS + ) + +function set_result_status!(m::GlobalOptimizer) + m._result_status_code = GLOBALEND_PSTATUS[m._end_state] + return end """ $(SIGNATURES) -Updates the internal storage in the optimizer after a valid feasible cut is added. -""" -function cut_update!(m::Optimizer) - - m._cut_feasibility = true - - relaxed_optimizer = m.relaxed_optimizer - obj_val = MOI.get(relaxed_optimizer, MOI.ObjectiveValue()) - prior_obj_val = (m._cut_iterations == 2) ? m._lower_objective_value : m._cut_objective_value - - m._cut_add_flag = true - m._lower_termination_status = m._cut_termination_status - m._lower_result_status = m._cut_result_status - m._cut_solution[:] = MOI.get(relaxed_optimizer, MOI.VariablePrimal(), m._relaxed_variable_index) - - if prior_obj_val < obj_val - m._cut_objective_value = obj_val - m._lower_objective_value = obj_val - set_dual!(m) - copyto!(m._lower_solution, m._cut_solution) - - else - m._cut_objective_value = prior_obj_val - m._lower_objective_value = prior_obj_val - m._cut_add_flag = false - end - - return nothing -end - - -""" -$(SIGNATURES) - -Checks if a cut should be added and computes a new reference point to add the -cut at. If no cut should be added the constraints not modified in place are -deleted from the relaxed optimizer and the solution is compared with the -interval lower bound. The best lower bound is then used. -""" -function cut_condition(t::ExtensionType, m::Optimizer) - - # always add cut if below the minimum iteration limit, otherwise add cut - # the number of cuts is less than the maximum and the distance between - # prior solutions exceeded a tolerance. - continue_cut_flag = m._cut_add_flag - continue_cut_flag &= (m._cut_iterations < m._parameters.cut_max_iterations) - - # compute distance between prior solutions and compare to tolerances - n = m._current_node - ns_indx = m._branch_to_sol_map - - cvx_factor = m._parameters.cut_cvx - xsol = (m._cut_iterations > 1) ? m._cut_solution[ns_indx] : m._lower_solution[ns_indx] - xnew = (1.0 - cvx_factor)*mid(n) + cvx_factor*xsol - - continue_cut_flag &= (norm((xsol - xnew)/diam(n), 1) > m._parameters.cut_tolerance) - continue_cut_flag |= (m._cut_iterations < m._parameters.cut_min_iterations) - - # update reference point for new cut - if continue_cut_flag - copyto!(m._current_xref, xnew) - if m._nonlinear_evaluator_created - set_reference_point!(m) - fill!(m._working_problem._relaxed_evaluator.subexpressions_eval, false) - end - end - - # check to see if interval bound is preferable and replaces the objective - # value with the interval value if so. Any available dual values are then - # set to zero since the interval bounds are by definition constant - if m._lower_feasibility && !continue_cut_flag - objective_lo = -Inf - obj_type = m._working_problem._objective_type - if obj_type === SINGLE_VARIABLE - var_index = m._working_problem._objective_sv.variable.value - if m._branch_variables[var_index] - obj_indx = m._sol_to_branch_map[var_index] - lower_variable_bnd = n.lower_variable_bounds[obj_indx] - if !isinf(lower_variable_bnd) - objective_lo = lower_variable_bnd - end - end - - elseif obj_type === SCALAR_AFFINE - objective_lo = lower_interval_bound(m, m._working_problem._objective_saf_parsed, n) - - elseif obj_type === SCALAR_QUADRATIC - objective_lo = lower_interval_bound(m, m._working_problem._objective_sqf, n) - - elseif obj_type === NONLINEAR - objective_lo = lower_interval_bound(m, m._working_problem._objective_nl, n) - - end - - if objective_lo > m._lower_objective_value - m._lower_objective_value = objective_lo - fill!(m._lower_lvd, 0.0) - fill!(m._lower_uvd, 0.0) - end - end - - m._cut_iterations += 1 - - return continue_cut_flag -end - -""" -$(SIGNATURES) - -Adds a cut for each constraint and the objective function to the subproblem. +Checks for convergence of algorithm with respect to absolute and/or relative +tolerances. """ -function add_cut!(t::ExtensionType, m::Optimizer) - - fill!(m._working_problem._relaxed_evaluator.subexpressions_eval, false) - m._working_problem._relaxed_evaluator.is_first_eval = true - m._working_problem._relaxed_evaluator.is_intersect = false - m._new_eval_objective = true - m._new_eval_constraint = true - - relax_constraints!(m, m._cut_iterations) - relax_objective!(m, m._cut_iterations) - - # Optimizes the object - relaxed_optimizer = m.relaxed_optimizer - MOI.optimize!(relaxed_optimizer) - - m._cut_termination_status = MOI.get(relaxed_optimizer, MOI.TerminationStatus()) - m._cut_result_status = MOI.get(relaxed_optimizer, MOI.PrimalStatus()) - valid_flag, feasible_flag = is_globally_optimal(m._cut_termination_status, m._cut_result_status) - - if valid_flag && feasible_flag - cut_update!(m) +function convergence_check(t::ExtensionType, m::GlobalOptimizer) - elseif valid_flag - m._cut_add_flag = false - m._lower_feasibility = false - m._lower_objective_value = -Inf - - else - m._cut_add_flag = false + L = m._lower_objective_value + U = m._global_upper_bound + t = (U - L) <= m._parameters.absolute_tolerance + if (U < Inf) && (L > Inf) + t |= (abs(U - L)/(max(abs(L), abs(U))) <= m._parameters.relative_tolerance) end - - return nothing -end - -""" -$(SIGNATURES) - -Default check to see if the upper bounding problem should be run. By default, -The upper bounding problem is run on every node up to depth `upper_bounding_depth` -and is triggered with a probability of `0.5^(depth - upper_bounding_depth)` -afterwards. -""" -function default_nlp_heurestic(m::Optimizer) - bool = false - ubd_limit = m._parameters.upper_bounding_depth - depth = m._current_node.depth - bool |= (depth <= ubd_limit) - bool |= (rand() < 0.5^(depth - m._parameters.upper_bounding_depth)) - return bool -end - -""" -$(SIGNATURES) - -Default upper bounding problem which simply calls `solve_local_nlp!` to solve -the nlp locally. -""" -function upper_problem!(t::ExtensionType, m::Optimizer) - - if !default_nlp_heurestic(m) - m._upper_feasibility = false - m._upper_objective_value = Inf - + if t && m._min_converged_value < Inf + m._min_converged_value = min(m._min_converged_value, L) else - solve_local_nlp!(m) - + m._min_converged_value = L end - return nothing -end - - -""" -$(SIGNATURES) - -Default postprocess perfoms duality-based bound tightening on the `y`. -""" -function postprocess!(t::ExtensionType, m::Optimizer) - - if m._parameters.dbbt_depth > m._iteration_count - variable_dbbt!(m._current_node, m._lower_lvd, m._lower_uvd, - m._lower_objective_value, m._global_upper_bound, - m._branch_variable_count) - end - - return nothing + return t end +convergence_check(m::GlobalOptimizer{R,S,Q}) where {R,S,Q<:ExtensionType} = convergence_check(_ext(m), m) """ $(SIGNATURES) @@ -1008,61 +236,35 @@ $(SIGNATURES) Provides a hook for extensions to EAGO as opposed to standard global, local, or linear solvers. """ -optimize_hook!(t::ExtensionType, m::Optimizer) = nothing - -function store_candidate_solution!(m::Optimizer) +optimize_hook!(t::ExtensionType, m::GlobalOptimizer) = nothing +function store_candidate_solution!(m::GlobalOptimizer) if m._upper_feasibility && (m._upper_objective_value < m._global_upper_bound) - m._feasible_solution_found = true m._first_solution_node = m._maximum_node_id - m._solution_value = m._upper_objective_value m._global_upper_bound = m._upper_objective_value @__dot__ m._continuous_solution = m._upper_solution - end - return nothing + return end -function set_global_lower_bound!(m::Optimizer) - +function set_global_lower_bound!(m::GlobalOptimizer) if !isempty(m._stack) - - min_node = minimum(m._stack) - lower_bound = min_node.lower_bound + n = minimum(m._stack) + lower_bound = n.lower_bound if m._global_lower_bound < lower_bound m._global_lower_bound = lower_bound end - end - - return nothing + return end -# wraps subroutine call to isolate ExtensionType -parse_global!(m::Optimizer) = parse_global!(m.ext_type, m) -presolve_global!(m::Optimizer) = presolve_global!(m.ext_type, m) -termination_check(m::Optimizer) = termination_check(m.ext_type, m) -cut_condition(m::Optimizer) = cut_condition(m.ext_type, m) -convergence_check(m::Optimizer) = convergence_check(m.ext_type, m) -repeat_check(m::Optimizer) = repeat_check(m.ext_type, m) -node_selection!(m::Optimizer) = node_selection!(m.ext_type, m) -preprocess!(m::Optimizer) = preprocess!(m.ext_type, m) -lower_problem!(m::Optimizer) = lower_problem!(m.ext_type, m) -add_cut!(m::Optimizer) = add_cut!(m.ext_type, m) -upper_problem!(m::Optimizer) = upper_problem!(m.ext_type, m) -postprocess!(m::Optimizer) = postprocess!(m.ext_type, m) -single_storage!(m::Optimizer) = single_storage!(m.ext_type, m) -branch_node!(m::Optimizer) = branch_node!(m.ext_type, m) -fathom!(m::Optimizer) = fathom!(m.ext_type, m) -revert_adjusted_upper_bound!(m::Optimizer) = revert_adjusted_upper_bound!(m.ext_type, m) - """ $(TYPEDSIGNATURES) Solves the branch and bound problem with the input EAGO optimizer object. """ -function global_solve!(m::Optimizer) +function global_solve!(m::GlobalOptimizer) m._iteration_count = 1 m._node_count = 1 @@ -1070,73 +272,41 @@ function global_solve!(m::Optimizer) parse_global!(m) presolve_global!(m) - logging_on = m._parameters.log_on - verbosity = m._parameters.verbosity + print_preamble!(m) # terminates when max nodes or iteration is reach, or when node stack is empty while !termination_check(m) # Selects node, deletes it from stack, prints based on verbosity + fathom!(m) node_selection!(m) - (verbosity >= 3) && print_node!(m) + print_node!(m) # Performs prepocessing and times - logging_on && (start_time = time()) - preprocess!(m) - if logging_on - m._last_preprocess_time = time() - start_time - end + m._last_preprocess_time += @elapsed preprocess!(m) if m._preprocess_feasibility # solves & times lower bounding problem - logging_on && (start_time = time()) - m._cut_iterations = 1 - lower_problem!(m) - while cut_condition(m) - add_cut!(m) - end - if logging_on - m._last_lower_problem_time = time() - start_time - end + m._last_lower_problem_time += @elapsed lower_problem!(m) print_results!(m, true) - print_results_post_cut!(m) # checks for infeasibility stores solution - if m._lower_feasibility - if !convergence_check(m) + if m._lower_feasibility && !convergence_check(m) - logging_on && (start_time = time()) - upper_problem!(m) - if logging_on - m._last_upper_problem_time = time() - start_time - end - print_results!(m, false) - store_candidate_solution!(m) - if m._input_problem._optimization_sense === MOI.FEASIBILITY_SENSE - if !m.feasible_local_continue || m.local_solve_only - break - end - end + # Solves upper problem + m._last_upper_problem_time += @elapsed upper_problem!(m) + print_results!(m, false) + store_candidate_solution!(m) - # Performs and times post processing - logging_on && (start_time = time()) - postprocess!(m) - if logging_on - m._last_postprocessing_time = time() - start_time - end + # Performs post processing + m._last_postprocessing_time += @elapsed postprocess!(m) - # Checks to see if the node - if m._postprocess_feasibility - if repeat_check(m) - single_storage!(m) - else - branch_node!(m) - end - end + # Checks to see if the node + if m._postprocess_feasibility + repeat_check(m) ? single_storage!(m) : branch_node!(m) end end - fathom!(m) else m._lower_objective_value = -Inf m._lower_feasibility = false @@ -1150,13 +320,39 @@ function global_solve!(m::Optimizer) m._iteration_count += 1 end - revert_adjusted_upper_bound!(m) - m._objective_value = m._global_upper_bound + set_termination_status!(m) + set_result_status!(m) - # Prints the solution print_solution!(m) +end - return nothing +function unpack_global_solution!(m::Optimizer{R,S,Q}) where {R,S,Q<:ExtensionType} + g = m._global_optimizer + + m._termination_status_code = g._termination_status_code + m._result_status_code = g._result_status_code + + m._run_time = g._run_time + m._node_count = g._maximum_node_id + + # evaluate objective (so there isn't a small difference in f(x) and objective_value) + # local solvers that solve to feasibility may result in a slightly lower than true solve... + # TODO + + # stores objective value and bound + if _is_input_min(g) + m._objective_bound = g._global_lower_bound + m._objective_value = g._global_upper_bound + else + m._objective_bound = -g._global_lower_bound + m._objective_value = -g._global_upper_bound + end + + return end -optimize!(::Val{MINCVX}, m::Optimizer) = global_solve!(m) +function optimize!(::MINCVX, m::Optimizer{R,S,Q}) where {R,S,Q<:ExtensionType} + global_solve!(m._global_optimizer) + unpack_global_solution!(m) + return +end diff --git a/src/eago_optimizer/optimizer.jl b/src/eago_optimizer/optimizer.jl index bec18a9a..0df9d148 100644 --- a/src/eago_optimizer/optimizer.jl +++ b/src/eago_optimizer/optimizer.jl @@ -10,410 +10,6 @@ # EAGOParameters, InputProblem, ParsedProblem, and Optimizer. ############################################################################# -""" -$(TYPEDEF) - -An abstract type the subtypes of which are associated with functions method -overloaded for for new extensions. An instance of the `DefaultExt <:ExtensionType` -structure to the `Optimizer` in the `ext_type` field. -""" -abstract type ExtensionType end -struct DefaultExt <: ExtensionType end -MOIU.map_indices(::Function, x::ExtensionType) = x -MOIU.map_indices(::Function, x::DefaultExt) = x - -@enum(ObjectiveType, UNSET, SINGLE_VARIABLE, SCALAR_AFFINE, SCALAR_QUADRATIC, NONLINEAR) -@enum(ProblemType, UNCLASSIFIED, LP, MILP, SOCP, MISOCP, DIFF_CVX, MINCVX) - -export EAGOParameters -""" -$(TYPEDEF) - -Storage for parameters that do not change during a global solve. - -$(TYPEDFIELDS) -""" -Base.@kwdef mutable struct EAGOParameters - - # Presolving options - "Should EAGO attempt to remove type assert issues for user-defined functions (default = false)" - presolve_scrubber_flag::Bool = false - "Create and use DAG representations of user-defined function (default = false)." - presolve_to_JuMP_flag::Bool = false - "[FUTURE FEATURE, NOT CURRENTLY IMPLEMENTED] Apply the epigraph reformulation - to the problem (default = false)." - presolve_epigraph_flag::Bool = false - "Rerranges the DAG using registered transformations (default = false)" - presolve_flatten_flag::Bool = false - - # Conic reformulations - "Attempt to bridge convex constraint to second order cone" - conic_convert_quadratic::Bool = false - - # Iteration logging options - "Turns logging on records global bounds, node count and run time. Additional - options are available for recording information specific to subproblems (default = false)." - log_on::Bool = false - "Turns on logging of times and feasibility of subproblems (default = false)" - log_subproblem_info::Bool = false - "Log data every `log_interval` iterations (default = 1)." - log_interval::Int64 = 1 - - # Optimizer display options - "The amount of information that should be printed to console while solving - values range from 0 - 4: 0 is silent, 1 shows iteration summary statistics - only, 2-4 show varying degrees of details about calculations within each - iteration (default = 1)." - verbosity::Int64 = 1 - "Display summary of iteration to console every `output_iterations` (default = 10)" - output_iterations::Int64 = 1000 - "Display header for summary to console every `output_iterations` (default = 100)" - header_iterations::Int64 = 10000 - - # Node branching options - "Convex coefficient used to select branch point. Branch point is given by - `branch_cvx_factor*xmid + (1-branch_cvx_factor)*xsol` (default = 0.25)" - branch_cvx_factor::Float64 = 0.25 - "Minimum distance from bound to have branch point normalized by width of - dimension to branch on (default = 0.15)" - branch_offset::Float64 = 0.15 - "Variables to branch on (default is all nonlinear)." - branch_variable::Vector{Bool} = Bool[] - "[FUTURE FEATURE, NOT CURRENTLY IMPLEMENTED] Number of times repeat node - processing priorto branching (default = 4)." - branch_max_repetitions::Int64 = 4 - "[FUTURE FEATURE, NOT CURRENTLY IMPLEMENTED] Volume ratio tolerance required - to repeat processing the current node (default = 0.9)" - branch_repetition_tol::Float64 = 0.9 - - # Termination limits - "Maximum number of nodes (default = 1E-7)" - node_limit::Int64 = 1*10^7 - "Maximum CPU time in seconds (default = 1000)" - time_limit::Float64 = 1000.0 - "Maximum number of iterations (default 3E6)" - iteration_limit::Int64 = 3*10^6 - "Absolute tolerance for termination (default = 1E-3)" - absolute_tolerance::Float64 = 1E-3 - "Relative tolerance for termination (default = 1E-3)" - relative_tolerance::Float64 = 1E-3 - "Absolute constraint feasibility tolerance" - absolute_constraint_feas_tolerance::Float64 = 1E-6 - "Perform only a local solve of the problem (default = false)." - local_solve_only::Bool = false - "[TO BE REMOVED] Flag stops B&B loop if feasible point found." - feasible_local_continue::Bool = false - - # Options for constraint propagation - "Depth in B&B tree above which constraint propagation should be disabled (default = 1000)" - cp_depth::Int64 = 20 - "Number of times to repeat forward-reverse pass routine (default = 3)" - cp_repetitions::Int64 = 3 - "Disable constraint propagation if the ratio of new node volume to beginning node volume exceeds - this number (default = 0.99)" - cp_tolerance::Float64 = 0.99 - "Use only valid interval bounds during constraint propagation (default = false)" - cp_interval_only::Bool = false - - # obbt options - "Depth in B&B tree above which OBBT should be disabled (default = 6)" - obbt_depth::Int64 = 4 - "Number of repetitions of OBBT to perform in preprocessing (default = 3)" - obbt_repetitions::Int64 = 20 - "Turn aggresive OBBT on (default = false)" - obbt_aggressive_on::Bool = true - "Maximum iteration to perform aggresive OBBT (default = 2)" - obbt_aggressive_max_iteration::Int64 = 2 - "Minimum dimension to perform aggresive OBBT (default = 2)" - obbt_aggressive_min_dimension::Int64 = 2 - "Tolerance to consider bounds equal (default = 1E-9)" - obbt_tolerance::Float64 = 1E-9 - - # Options for linear bound tightening - "Depth in B&B tree above which linear FBBT should be disabled (default = 1000)" - fbbt_lp_depth::Int64 = 1000 - "Number of repetitions of linear FBBT to perform in preprocessing (default = 3)" - fbbt_lp_repetitions::Int64 = 3 - - # Options for quadratic bound tightening - "[FUTURE FEATURE, NOT CURRENTLY IMPLEMENTED] Depth in B&B tree above which univariate quadratic FBBT should be disabled (default = -1)" - quad_uni_depth::Int64 = -1 - "[FUTURE FEATURE, NOT CURRENTLY IMPLEMENTED] Number of repetitions of univariate quadratic FBBT to perform in preprocessing (default = 2)" - quad_uni_repetitions::Int64 = 2 - "[FUTURE FEATURE, NOT CURRENTLY IMPLEMENTED] Depth in B&B tree above which bivariate - quadratic FBBT should be disabled (default = -1)" - quad_bi_depth::Int64 = -1 - "[FUTURE FEATURE, NOT CURRENTLY IMPLEMENTED] Number of repetitions of bivariate quadratic FBBT to perform in preprocessing (default = 2)." - quad_bi_repetitions::Int64 = 2 - - # Duality-based bound tightening (DBBT) options - "Depth in B&B tree above which duality-based bound tightening should be disabled (default = 1E10)" - dbbt_depth::Int64 = 10^10 - "New bound is considered equal to the prior bound if within dbbt_tolerance (default = 1E-9)." - dbbt_tolerance::Float64 = 1E-8 - - # Subgradient tightening flag - "Relax Tag used to specify type of McCormick operator" - relax_tag::RelaxTag = NS() - "Perform tightening of interval bounds using subgradients at each factor in - each nonlinear tape during a forward pass (default = true)." - subgrad_tighten::Bool = true - "Perform tightening of interval bounds using subgradients at each factor in - each nonlinear tape during a reverse pass (default = false)." - reverse_subgrad_tighten::Bool = false - "Outer round computed subgradient bounds by this amount" - subgrad_tol::Float64 = 1E-10 - - # Tolerance to add cuts and max number of cuts - "Minimum number of cuts at each node to attempt (unsafe cuts not necessarily added)" - cut_min_iterations::Int64 = 1 - "Maximum number of cuts at each node to attempt" - cut_max_iterations::Int64 = 3 - "Convex coefficient used to select point for new added cuts. Branch point is - given by `(1-cut_cvx)*xmid + cut_cvx*xsol` (default = 0.9)." - cut_cvx::Float64 = 0.9 - "Add cut if the L1 distance from the prior cutting point to the new cutting - point normalized by the box volume is greater than the tolerance (default = 0.05)." - cut_tolerance::Float64 = 0.05 - "Adds an objective cut to the relaxed problem (default = true)." - objective_cut_on::Bool = true - - "Use tolerances to determine safe cuts in a Khajavirad 2018 manner" - cut_safe_on::Bool = true - "Lower tolerance for safe-lp cut, Khajavirad 2018" - cut_safe_l::Float64 = 1E-8 - "Upper tolerance for safe-lp cut, Khajavirad 2018" - cut_safe_u::Float64 = 1E8 - "Constant tolerance for safe-lp cut, Khajavirad 2018" - cut_safe_b::Float64 = 1E9 - - "Solve upper problem for every node with depth less than `upper_bounding_depth` - and with a probabilityof (1/2)^(depth-upper_bounding_depth) otherwise (default = 6)" - upper_bounding_depth::Int64 = 6 - - # handling for domain violations - "Amount about a domain violation to ignore when propagating bounds." - domain_violation_guard_on::Bool = false - "Amount about a domain violation to ignore when propagating bounds." - domain_violation_ϵ::Float64 = 1E-9 -end - -""" -$(TYPEDEF) - -A structure used to hold objectives and constraints added to EAGO model. -The constraints generally aren't used for relaxations. -""" -Base.@kwdef mutable struct InputProblem - - # variables (set by MOI.add_variable in variables.jl) - _variable_info::Vector{VariableInfo} = VariableInfo[] - _variable_count::Int64 = 0 - - # last constraint index added - _last_constraint_index::Int = 0 - - # linear constraint storage and count (set by MOI.add_constraint in moi_constraints.jl) - _linear_leq_constraints::Vector{Tuple{SAF, LT}} = Tuple{SAF, LT}[] - _linear_geq_constraints::Vector{Tuple{SAF, GT}} = Tuple{SAF, GT}[] - _linear_eq_constraints::Vector{Tuple{SAF, ET}} = Tuple{SAF, ET}[] - - _linear_leq_count::Int = 0 - _linear_geq_count::Int = 0 - _linear_eq_count::Int = 0 - - # quadratic constraint storage and count (set by MOI.add_constraint in moi_constraints.jl) - _quadratic_leq_constraints::Vector{Tuple{SQF, LT}} = Tuple{SQF, LT}[] - _quadratic_geq_constraints::Vector{Tuple{SQF, GT}} = Tuple{SQF, GT}[] - _quadratic_eq_constraints::Vector{Tuple{SQF, ET}} = Tuple{SQF, ET}[] - - _quadratic_leq_count::Int = 0 - _quadratic_geq_count::Int = 0 - _quadratic_eq_count::Int = 0 - - # conic constraint storage and count (set by MOI.add_constraint in moi_constraints.jl) - _conic_second_order::Vector{Tuple{VECOFVAR, MOI.SecondOrderCone}} = Tuple{VECOFVAR, MOI.SecondOrderCone}[] - - _conic_second_order_count::Int = 0 - - # nonlinear constraint storage - _nonlinear_count::Int = 0 - - # objective information (set by MOI.set(m, ::ObjectiveFunction...) in optimizer.jl) - _objective_sv::SV = SV(VI(-1)) - _objective_saf::SAF = SAF(SAT[], 0.0) - _objective_sqf::SQF = SQF(SAT[], SQT[], 0.0) - _objective_type::ObjectiveType = UNSET - - # nlp constraints (set by MOI.set(m, ::NLPBlockData...) in optimizer.jl) - _nlp_data::MOI.NLPBlockData = empty_nlp_data() - - # objective sense information (set by MOI.set(m, ::ObjectiveSense...) in optimizer.jl) - _optimization_sense::MOI.OptimizationSense = MOI.MIN_SENSE -end - -function Base.isempty(x::InputProblem) - - is_empty_flag = true - new_input_problem = InputProblem() - - for field in fieldnames(InputProblem) - - field_value = getfield(x, field) - - if field_value isa Array - if !isempty(field_value) - is_empty_flag = false - break - end - - elseif field_value isa Number - if getfield(new_input_problem, field) !== field_value - is_empty_flag = false - break - end - - end - end - - is_empty_flag &= x._nlp_data.evaluator isa EmptyNLPEvaluator - is_empty_flag &= !x._nlp_data.has_objective - is_empty_flag &= isempty(x._nlp_data.constraint_bounds) - - is_empty_flag &= isempty(x._objective_saf.terms) - is_empty_flag &= x._objective_saf.constant === 0.0 - - is_empty_flag &= isempty(x._objective_sqf.quadratic_terms) - is_empty_flag &= isempty(x._objective_sqf.affine_terms) - is_empty_flag &= x._objective_sqf.constant === 0.0 - - return is_empty_flag -end - -""" -$(TYPEDEF) - -A structure used to expressions and problem descriptions EAGO uses to formulate -relaxed problems. -""" -Base.@kwdef mutable struct ParsedProblem - - # Problem classification (set in parse_classify_problem!) - _problem_type::ProblemType = UNCLASSIFIED - - # objectives (set in initial_parse) - _objective_sv::SV = SV(VI(-1)) - "_objective_saf stores the objective and is used for constructing linear affine cuts - of any ObjectiveType" - _objective_saf::SAF = SAF(SAT[], 0.0) - _objective_saf_parsed::AffineFunctionIneq = AffineFunctionIneq() - _objective_sqf::BufferedQuadraticIneq = BufferedQuadraticIneq() - _objective_nl::BufferedNonlinearFunction = BufferedNonlinearFunction() - _objective_type::ObjectiveType = UNSET - - # objective sense information (set by convert_to_min in parse.jl) - _optimization_sense::MOI.OptimizationSense = MOI.MIN_SENSE - - # non-single variable constraints (set in initial_parse) - _saf_leq::Vector{AffineFunctionIneq} = AffineFunctionIneq[] - _saf_eq::Vector{AffineFunctionEq} = AffineFunctionEq[] - _sqf_leq::Vector{BufferedQuadraticIneq} = BufferedQuadraticIneq[] - _sqf_eq::Vector{BufferedQuadraticEq} = BufferedQuadraticEq[] - _conic_second_order::Vector{BufferedSOC} = BufferedSOC[] - - # count of non-single variable constraints (set in initial_parse) - _saf_leq_count::Int = 0 - _saf_eq_count::Int = 0 - _sqf_leq_count::Int = 0 - _sqf_eq_count::Int = 0 - _conic_second_order_count::Int = 0 - - # nlp constraints (set in initial_parse) - _nlp_data::MOI.NLPBlockData = empty_nlp_data() - - # storage for nonlinear functions - _nonlinear_constr::Vector{BufferedNonlinearFunction} = BufferedNonlinearFunction[] - - # nonlinear constraint storage - _nonlinear_count::Int = 0 - - # nonlinear evaluator - _relaxed_evaluator = Evaluator() - - # variables (set in initial_parse) - _variable_info::Vector{VariableInfo} = VariableInfo[] - _variable_count::Int = 0 - - # count of single variable constraint types (set in load_relaxed_problem!) - _var_leq_count::Int = 0 - _var_geq_count::Int = 0 - _var_eq_count::Int = 0 -end - -function Base.isempty(x::ParsedProblem) - - is_empty_flag = true - - new_input_problem = ParsedProblem() - for field in fieldnames(ParsedProblem) - - field_value = getfield(x, field) - if field_value isa Array - if !isempty(field_value) - is_empty_flag = false - break - end - - elseif field_value isa Number - if getfield(new_input_problem, field) !== field_value - is_empty_flag = false - break - end - end - end - - is_empty_flag &= x._nlp_data.evaluator isa EmptyNLPEvaluator - is_empty_flag &= !x._nlp_data.has_objective - is_empty_flag &= isempty(x._nlp_data.constraint_bounds) - - is_empty_flag &= isempty(x._objective_saf.terms) - is_empty_flag &= x._objective_saf.constant === 0.0 - - is_empty_flag &= isempty(x._objective_saf.terms) - is_empty_flag &= x._objective_saf.constant === 0.0 - is_empty_flag &= isempty(x._objective_saf_parsed.terms) - is_empty_flag &= x._objective_saf_parsed.constant === 0.0 - is_empty_flag &= x._objective_saf_parsed.len === 0 - - is_empty_flag &= isempty(x._objective_sqf.func.quadratic_terms) - is_empty_flag &= isempty(x._objective_sqf.func.affine_terms) - is_empty_flag &= x._objective_sqf.func.constant === 0.0 - - is_empty_flag &= isempty(x._objective_sqf.buffer) - is_empty_flag &= isempty(x._objective_sqf.saf.terms) - is_empty_flag &= x._objective_sqf.saf.constant === 0.0 - is_empty_flag &= x._objective_sqf.len === 0 - - return is_empty_flag -end - - -function default_nlp_solver() - - upper_optimizer = Ipopt.Optimizer() - - MOI.set(upper_optimizer, MOI.RawParameter("max_iter"),3000) - MOI.set(upper_optimizer, MOI.RawParameter("acceptable_tol"), 1E30) - MOI.set(upper_optimizer, MOI.RawParameter("acceptable_iter"), 300) - MOI.set(upper_optimizer, MOI.RawParameter("constr_viol_tol"), 0.000001) - MOI.set(upper_optimizer, MOI.RawParameter("acceptable_compl_inf_tol"), 0.000001) - MOI.set(upper_optimizer, MOI.RawParameter("acceptable_dual_inf_tol"), 1.0) - MOI.set(upper_optimizer, MOI.RawParameter("acceptable_constr_viol_tol"), 0.000001) - MOI.set(upper_optimizer, MOI.RawParameter("print_level"), 0) - - return upper_optimizer -end - export Optimizer """ $(TYPEDEF) @@ -431,427 +27,65 @@ which are expected to be constant over the entire solve are stored in - `ext::Dict{Symbol, Any}`: Holds additional storage needed for constructing extensions to EAGO (default = Dict{Symbol,Any}). - `ext_type::ExtensionType`: Holds an instance of a subtype of `EAGO.ExtensionType` used to define new custom subroutines (default = DefaultExt()). """ -Base.@kwdef mutable struct Optimizer <: MOI.AbstractOptimizer - - # Options for optimality-based bound tightening - # set as a user-specified option - relaxed_optimizer::MOI.AbstractOptimizer = GLPK.Optimizer() - - # set as a user-specified option (if empty set to all nonlinear by TODO in TODO) - obbt_variable_values::Vector{Bool} = Bool[] - - # Upper bounding options (set as a user-specified option) - upper_optimizer::MOI.AbstractOptimizer = default_nlp_solver() - - # Extensions (set as user-specified option) - enable_optimize_hook::Bool = false - ext::Dict{Symbol, Any} = Dict{Symbol,Any}() - ext_type::ExtensionType = DefaultExt() - - # set as user-specified option - _parameters::EAGOParameters = EAGOParameters() - - # set by MOI manipulations (see Input problem structure) - _input_problem::InputProblem = InputProblem() - - # loaded from _input_problem by TODO - _working_problem::ParsedProblem = ParsedProblem() - - _termination_status_code::MOI.TerminationStatusCode = MOI.OPTIMIZE_NOT_CALLED - _result_status_code::MOI.ResultStatusCode = MOI.OTHER_RESULT_STATUS - - _stack::BinaryMinMaxHeap{NodeBB} = BinaryMinMaxHeap{NodeBB}() - - # set in node_selection! - _current_node::NodeBB = NodeBB() - - _first_relax_point_set::Bool = false - _current_xref::Vector{Float64} = Float64[] - _candidate_xref::Vector{Float64} = Float64[] +mutable struct Optimizer{Q,S,T} <: MOI.AbstractOptimizer - _use_prior_objective_xref::Bool = false - _current_objective_xref::Vector{Float64} = Float64[] - _prior_objective_xref::Vector{Float64} = Float64[] + subsolver_block::SubSolvers{Q,S,T} + enable_optimize_hook::Bool + ext::Union{Nothing,T} + + _auxillary_variable_info::Union{Nothing,_AuxVarData} + _global_optimizer::GlobalOptimizer{Q,S,T} + _input_problem::InputProblem + _working_problem::ParsedProblem - # set in label_branch_variables! and label_fixed_variables! respectively in parse.jl - _user_branch_variables::Bool = false - _fixed_variable::Vector{Bool} = Bool[] - _branch_variable_count::Int = 0 - _branch_to_sol_map::Vector{Int} = Int[] - _sol_to_branch_map::Vector{Int} = Int[] + # set as user-specified option + _parameters::EAGOParameters - _continuous_solution::Vector{Float64} = Float64[] - - # all subproblem immutable subproblem status are set in global_solve in corresponding routines - # in optimize_nonconvex.jl - _preprocess_feasibility::Bool = true - _preprocess_result_status::MOI.ResultStatusCode = MOI.OTHER_RESULT_STATUS - _preprocess_termination_status::MOI.TerminationStatusCode = MOI.OPTIMIZE_NOT_CALLED - - _lower_result_status::MOI.ResultStatusCode = MOI.OTHER_RESULT_STATUS - _lower_termination_status::MOI.TerminationStatusCode = MOI.OPTIMIZE_NOT_CALLED - _lower_feasibility::Bool = true - _lower_objective_value::Float64 = -Inf - - # set in TODO - _lower_solution::Vector{Float64} = Float64[] - _lower_lvd::Vector{Float64} = Float64[] - _lower_uvd::Vector{Float64} = Float64[] - - _cut_result_status::MOI.ResultStatusCode = MOI.OTHER_RESULT_STATUS - _cut_termination_status::MOI.TerminationStatusCode = MOI.OPTIMIZE_NOT_CALLED - _cut_objective_value::Float64 = -Inf - _cut_feasibility::Bool = true - - # set in TODO - _cut_solution::Vector{Float64} = Float64[] - - _upper_result_status::MOI.ResultStatusCode = MOI.OTHER_RESULT_STATUS - _upper_termination_status::MOI.TerminationStatusCode = MOI.OPTIMIZE_NOT_CALLED - _upper_feasibility::Bool = true - _upper_objective_value::Float64 = Inf - - # array is initialized to correct size in TODO, reset in single_nlp_solve! in optimize_convex.jl - _upper_variables::Vector{VI} = VI[] - - # set in TODO - _upper_solution::Vector{Float64} = Float64[] - - _postprocess_feasibility::Bool = true - - # set to time limit in initial_parse! in parse.jl, decremented throughout global_solve in optimize_nonconvex.jl - _time_left::Float64 = 1000.0 + _termination_status_code::MOI.TerminationStatusCode + _result_status_code::MOI.ResultStatusCode # set constructor reset on empty! and to zero in initial parse! in parse.jl - _start_time::Float64 = 0.0 - _run_time::Float64 = 0.0 - _parse_time::Float64 = 0.0 - _presolve_time::Float64 = 0.0 - _last_preprocess_time::Float64 = 0.0 - _last_lower_problem_time::Float64 = 0.0 - _last_upper_problem_time::Float64 = 0.0 - _last_postprocessing_time::Float64 = 0.0 - - # reset in initial_parse! in parse.jl - _min_converged_value::Float64 = Inf - _global_lower_bound::Float64 = -Inf - _global_upper_bound::Float64 = Inf - _maximum_node_id::Int64 = 0 - _iteration_count::Int64 = 0 - _node_count::Int64 = 0 - - # Storage for output, reset in initial_parse! in parse.jl - _solution_value::Float64 = 0.0 - _feasible_solution_found::Bool = false - _first_solution_node::Int64 = -1 - _objective_value::Float64 = -Inf - _best_upper_value::Float64 = Inf - - # Optimality-Based Bound Tightening (OBBT) Options - # set in TODO - _obbt_working_lower_index::Vector{Bool} = Bool[] - _obbt_working_upper_index::Vector{Bool} = Bool[] - _lower_indx_diff::Vector{Bool} = Bool[] - _upper_indx_diff::Vector{Bool} = Bool[] - _old_low_index::Vector{Bool} = Bool[] - _old_upp_index::Vector{Bool} = Bool[] - _new_low_index::Vector{Bool} = Bool[] - _new_upp_index::Vector{Bool} = Bool[] - _obbt_variables::Vector{VI} = VI[] - _obbt_variable_count::Int = 0 - _obbt_performed_flag::Bool = false - - # Buffers for fbbt, set in presolve, used in preprocess - _lower_fbbt_buffer::Vector{Float64} = Float64[] - _upper_fbbt_buffer::Vector{Float64} = Float64[] - - # Feasibility-Based Bound Tightening Options - # set in set_constraint_propagation_fbbt in domain_reduction.jl - _cp_improvement::Float64 = 0.0 - _cp_evaluation_reverse::Bool = false - - _cut_iterations::Int64 = 0 - _cut_add_flag::Bool = false - - # Options for Repetition (If DBBT Performed Well) - # set in within preprocess in optimize_nonconvex.jl - _node_repetitions::Int64 = 0 - _initial_volume::Float64 = 0.0 - _final_volume::Float64 = 0.0 - - # Log - _log::Log = Log() - - # set in TODO - _buffered_quadratic_ineq_ci::Vector{CI{SAF,LT}} = CI{SAF,LT}[] - _buffered_quadratic_eq_ci::Vector{CI{SAF,LT}} = CI{SAF,LT}[] - - _buffered_nonlinear_ci::Vector{CI{SAF,LT}} = CI{SAF,LT}[] - - # set initially in TODO, reset in objective_cut in relax.jl - _objective_cut_ci_sv::CI{SV,LT} = CI{SV,LT}(-1) - - # initialized to empty in constructor (or via MOI.empty), filled in objective_cut in relax.jl - # called by obbt in domain_reduction.jl, lower_problem, and add_cut in optimize_nonconvex.jl, - # emptied in delete_objective_cuts! in relax.jl - _objective_cut_ci_saf::Vector{CI{SAF,LT}} = CI{SAF,LT}[] - - # need to retreive primal _relaxed_variable_index - # set in TODO - #"Number of variables actively branched on in B&B routine (excludes linear and fixed)" - _relaxed_variable_number::Int = 0 - _relaxed_variable_index::Vector{VI} = VI[] - _relaxed_variable_eq::Vector{Tuple{CI{SV, ET}, Int}} = Tuple{CI{SV, ET}, Int}[] - _relaxed_variable_lt::Vector{Tuple{CI{SV, LT}, Int}} = Tuple{CI{SV, LT}, Int}[] - _relaxed_variable_gt::Vector{Tuple{CI{SV, GT}, Int}} = Tuple{CI{SV, GT}, Int}[] - - # set as user-input - _branch_variables::Vector{Bool} = Bool[] - - _new_eval_constraint::Bool = false - _new_eval_objective::Bool = false - - _node_to_sv_leq_ci::Vector{CI{SV,LT}} = CI{SV,LT}[] - _node_to_sv_geq_ci::Vector{CI{SV,GT}} = CI{SV,GT}[] - - #"Set to true if a nonlinear evaluator was created (NLconstraint or NLobjective specified)" - _nonlinear_evaluator_created::Bool = false - - #_relaxed_evaluator::Evaluator = Evaluator{1,NS}() - #_relaxed_constraint_bounds::Vector{MOI.NLPBoundsPair} = Vector{MOI.NLPBoundsPair}[] -end - -##### -##### -##### General MOI utilities required -##### -##### - -const EAGO_OPTIMIZER_ATTRIBUTES = Symbol[:relaxed_optimizer, :relaxed_optimizer_kwargs, :upper_optimizer, - :enable_optimize_hook, :ext, :ext_type, :_parameters] -const EAGO_MODEL_STRUCT_ATTRIBUTES = Symbol[:_stack, :_log, :_current_node, :_working_problem, :_input_problem] -const EAGO_MODEL_NOT_STRUCT_ATTRIBUTES = setdiff(fieldnames(Optimizer), union(EAGO_OPTIMIZER_ATTRIBUTES, - EAGO_MODEL_STRUCT_ATTRIBUTES)) - -function MOI.empty!(m::Optimizer) - - # create a new empty optimizer and copy fields to m - new_optimizer = Optimizer() - for field in union(EAGO_MODEL_STRUCT_ATTRIBUTES, EAGO_MODEL_NOT_STRUCT_ATTRIBUTES) - setfield!(m, field, getfield(new_optimizer, field)) - end - - return nothing -end - -function MOI.is_empty(m::Optimizer) - - is_empty_flag = uninitialized(m._current_node) - is_empty_flag &= isempty(m._stack) - is_empty_flag &= isempty(m._log) - is_empty_flag &= isempty(m._input_problem) - is_empty_flag &= isempty(m._working_problem) - - new_optimizer = Optimizer() - for field in EAGO_MODEL_NOT_STRUCT_ATTRIBUTES - if getfield(m, field) != getfield(new_optimizer, field) - is_empty_flag = false - break - end - end - - return is_empty_flag -end - -function MOI.copy_to(model::Optimizer, src::MOI.ModelLike; copy_names = false) - return MOI.Utilities.default_copy_to(model, src, copy_names) -end - -##### -##### -##### Utilities for checking that JuMP model contains variables used in expression -##### -##### - -function check_inbounds!(m::Optimizer, vi::VI) - if !(1 <= vi.value <= m._input_problem._variable_count) - error("Invalid variable index $vi. ($(m._input_problem._variable_count) variables in the model.)") - end - return nothing -end - -check_inbounds!(m::Optimizer, var::SV) = check_inbounds!(m, var.variable) - -function check_inbounds!(m::Optimizer, aff::SAF) - for term in aff.terms - check_inbounds!(m, term.variable_index) - end - return nothing -end - -function check_inbounds!(m::Optimizer, quad::SQF) - for term in quad.affine_terms - check_inbounds!(m, term.variable_index) - end - for term in quad.quadratic_terms - check_inbounds!(m, term.variable_index_1) - check_inbounds!(m, term.variable_index_2) - end - return nothing -end - -function check_inbounds!(m::Optimizer, vov::VECOFVAR) - for vi in vov.variables - check_inbounds!(m, vi) - end - return -end - -##### -##### -##### Set & get attributes of model -##### -##### - -function MOI.set(m::Optimizer, ::MOI.Silent, value) - - m._parameters.verbosity = 0 - m._parameters.log_on = false - return nothing - -end - -function MOI.set(m::Optimizer, p::MOI.RawParameter, value) - - if p.name isa String - psym = Symbol(p.name) - elseif p.name isa Symbol - psym = p.name - else - error("EAGO only supports raw parameters with Symbol or String names.") - end - - if psym in fieldnames(EAGOParameters) - setfield!(m._parameters, psym, value) - else - setfield!(m, psym, value) - end - - return nothing -end - -function MOI.set(m::Optimizer, ::MOI.TimeLimitSec, value::Nothing) - m._parameters.time_limit = Inf - return nothing -end - -function MOI.set(m::Optimizer, ::MOI.TimeLimitSec, value::Float64) - m._parameters.time_limit = value - return nothing -end - -function MOI.get(m::Optimizer, ::MOI.ListOfVariableIndices) - return [MOI.VariableIndex(i) for i = 1:length(m._input_problem._variable_info)] -end - -function MOI.get(m::Optimizer, ::MOI.ObjectiveValue) - mult = 1.0 - if m._input_problem._optimization_sense === MOI.MAX_SENSE - mult *= -1.0 - end - return mult*m._objective_value -end - -MOI.get(m::Optimizer, ::MOI.NumberOfVariables) = m._input_problem._variable_count - -function MOI.get(m::Optimizer, ::MOI.ObjectiveBound) - if m._input_problem._optimization_sense === MOI.MAX_SENSE - bound = -m._global_lower_bound - else - bound = m._global_upper_bound - end - return bound -end - -function MOI.get(m::Optimizer, ::MOI.RelativeGap) - LBD = m._global_lower_bound - UBD = m._global_upper_bound - if m._input_problem._optimization_sense === MOI.MAX_SENSE - gap = abs(UBD - LBD)/abs(LBD) - else - gap = abs(UBD - LBD)/abs(UBD) - end - return gap -end - -MOI.get(m::Optimizer, ::MOI.SolverName) = "EAGO: Easy Advanced Global Optimization" -MOI.get(m::Optimizer, ::MOI.TerminationStatus) = m._termination_status_code -MOI.get(m::Optimizer, ::MOI.PrimalStatus) = m._result_status_code -MOI.get(m::Optimizer, ::MOI.SolveTime) = m._run_time -MOI.get(m::Optimizer, ::MOI.NodeCount) = m._maximum_node_id -MOI.get(m::Optimizer, ::MOI.ResultCount) = (m._result_status_code === MOI.FEASIBLE_POINT) ? 1 : 0 -MOI.get(m::Optimizer, ::MOI.TimeLimitSec) = m.time_limit - -function MOI.get(model::Optimizer, ::MOI.VariablePrimal, vi::MOI.VariableIndex) - check_inbounds!(model, vi) - return model._continuous_solution[vi.value] -end - -function MOI.get(m::Optimizer, p::MOI.RawParameter) - if p.name isa String - psym = Symbol(p.name) - elseif p.name isa Symbol - psym = p.name - else - error("EAGO only supports raw parameters with Symbol or String names.") - end - - if psym in fieldnames(EAGOParameters) - return getfield(m._parameters, psym) - else - return getfield(m, psym) - end -end - -##### -##### -##### Support, set, and evaluate objective functions -##### -##### -MOI.supports(::Optimizer, ::MOI.TimeLimitSec) = true -MOI.supports(::Optimizer, ::MOI.ObjectiveSense) = true -MOI.supports(::Optimizer, ::MOI.ObjectiveFunction{F}) where {F <: Union{SV, SAF, SQF}} = true - -function MOI.set(m::Optimizer, ::MOI.NLPBlock, nlp_data::MOI.NLPBlockData) - if nlp_data.has_objective - m._input_problem._objective_type = NONLINEAR - end - m._input_problem._nlp_data = nlp_data - return nothing -end - -function MOI.set(m::Optimizer, ::MOI.ObjectiveFunction{SV}, func::SV) - check_inbounds!(m, func) - m._input_problem._objective_sv = func - m._input_problem._objective_type = SINGLE_VARIABLE - return nothing -end - -function MOI.set(m::Optimizer, ::MOI.ObjectiveFunction{SAF}, func::SAF) - check_inbounds!(m, func) - m._input_problem._objective_saf = func - m._input_problem._objective_type = SCALAR_AFFINE - return nothing -end - -function MOI.set(m::Optimizer, ::MOI.ObjectiveFunction{SQF}, func::SQF) - check_inbounds!(m, func) - m._input_problem._objective_sqf = func - m._input_problem._objective_type = SCALAR_QUADRATIC - return nothing -end - -function MOI.set(m::Optimizer, ::MOI.ObjectiveSense, sense::MOI.OptimizationSense) - m._input_problem._optimization_sense = sense - return nothing -end + _run_time::Float64 + + _objective_value::Float64 + _objective_bound::Float64 + _relative_gap::Float64 + _iteration_count::Int + _node_count::Int +end +function Optimizer{Q,S,T}(sb::SubSolvers{Q,S,T}) where {Q,S,T} + return Optimizer{Q,S,T}(sb, false, nothing, nothing, GlobalOptimizer{Q,S,T}(_subsolvers = sb, ext = _ext(sb)), + InputProblem(), ParsedProblem(), EAGOParameters(), + MOI.OPTIMIZE_NOT_CALLED, MOI.OTHER_RESULT_STATUS, + 0.0, -Inf, Inf, Inf, 0, 0) +end +function Optimizer(subsolver_block::SubSolvers{Q,S,T} = SubSolvers(); kwargs...) where {Q,S,T} + if length(kwargs) > 0 + error("""Passing optimizer attributes as keyword arguments to `EAGO.Optimizer` is deprecated. + Use MOI.set(model, MOI.RawParameter("key"), value) or + JuMP.set_optimizer_attribute(model, "key", value) instead.""") + end + sb = SubSolvers{Incremental{Q},Incremental{S},T}(Incremental(subsolver_block.relaxed_optimizer), + Incremental(subsolver_block.upper_optimizer), + subsolver_block.ext) + m = Optimizer{Incremental{Q},Incremental{S},T}(sb) + m._global_optimizer = GlobalOptimizer{Incremental{Q},Incremental{S},T}(; _subsolvers = sb, ext = _ext(sb)) + return m +end + +_constraints(m::Optimizer, ::Type{VI}, ::Type{LT}) = m._input_problem._vi_leq_constraints +_constraints(m::Optimizer, ::Type{VI}, ::Type{GT}) = m._input_problem._vi_geq_constraints +_constraints(m::Optimizer, ::Type{VI}, ::Type{ET}) = m._input_problem._vi_eq_constraints +_constraints(m::Optimizer, ::Type{VI}, ::Type{IT}) = m._input_problem._vi_it_constraints +_constraints(m::Optimizer, ::Type{VI}, ::Type{ZO}) = m._input_problem._vi_zo_constraints +_constraints(m::Optimizer, ::Type{VI}, ::Type{MOI.Integer}) = m._input_problem._vi_int_constraints + +_constraints(m::Optimizer, ::Type{SAF}, ::Type{LT}) = _constraints(m._input_problem, SAF, LT) +_constraints(m::Optimizer, ::Type{SAF}, ::Type{GT}) = _constraints(m._input_problem, SAF, GT) +_constraints(m::Optimizer, ::Type{SAF}, ::Type{ET}) = _constraints(m._input_problem, SAF, ET) + +_constraints(m::Optimizer, ::Type{SQF}, ::Type{LT}) = _constraints(m._input_problem, SQF, LT) +_constraints(m::Optimizer, ::Type{SQF}, ::Type{GT}) = _constraints(m._input_problem, SQF, GT) +_constraints(m::Optimizer, ::Type{SQF}, ::Type{ET}) = _constraints(m._input_problem, SQF, ET) + +_ext(m::Optimizer) = _ext(m._global_optimizer) \ No newline at end of file diff --git a/src/eago_optimizer/parse.jl b/src/eago_optimizer/parse.jl index 551a1059..1d917cd5 100644 --- a/src/eago_optimizer/parse.jl +++ b/src/eago_optimizer/parse.jl @@ -11,106 +11,297 @@ ############################################################################# """ -$(TYPEDSIGNATURES) + add_objective! -Converts `MOI.MAX_SENSE` objective to equivalent `MOI.MIN_SENSE` objective -`max(f) = -min(-f)`. +Adds objective function (if any) to the parsed problem. """ -function convert_to_min!(m::Optimizer) - - m._working_problem._optimization_sense = MOI.MIN_SENSE - if m._input_problem._optimization_sense === MOI.MAX_SENSE - - obj_type = m._input_problem._objective_type - if obj_type === SINGLE_VARIABLE - m._working_problem._objective_type = SCALAR_AFFINE - m._working_problem._objective_saf = MOIU.operate(-, Float64, m._working_problem._objective_sv) - m._working_problem._objective_saf_parsed = AffineFunctionIneq(m._working_problem._objective_saf, LT_ZERO) +add_objective!(m::ParsedProblem, f) = nothing +add_objective!(m::ParsedProblem, f::VI) = (m._objective = f; nothing) +add_objective!(m::ParsedProblem, f::SAF) = (m._objective = AffineFunctionIneq(f, LT_ZERO); nothing) +add_objective!(m::ParsedProblem, f::SQF) = (m._objective = BufferedQuadraticIneq(f, LT_ZERO); nothing) - elseif obj_type === SCALAR_AFFINE - m._working_problem._objective_saf = MOIU.operate(-, Float64, m._working_problem._objective_saf) - m._working_problem._objective_saf_parsed = AffineFunctionIneq(m._working_problem._objective_saf, LT_ZERO) +""" + add_nonlinear_evaluator! - elseif obj_type === SCALAR_QUADRATIC - sqf = m._working_problem._objective_sqf.sqf - m._working_problem._objective_sqf.sqf = MOIU.operate(-, Float64, sqf) + Adds a Evaluator structure if nonlinear terms are attached. +""" +add_nonlinear_evaluator!(m::GlobalOptimizer, evaluator::Nothing) = nothing +function add_nonlinear_evaluator!(m::GlobalOptimizer, d::JuMP.NLPEvaluator) + m._working_problem._relaxed_evaluator = Evaluator() + relax_eval = m._working_problem._relaxed_evaluator + relax_eval.user_operators = OperatorRegistry(d.model.nlp_data.user_operators) + relax_eval.subgrad_tol = m._parameters.subgrad_tol + m._nonlinear_evaluator_created = true + return +end +add_nonlinear_evaluator!(m::GlobalOptimizer, nldata) = add_nonlinear_evaluator!(m, nldata.evaluator) +add_nonlinear_evaluator!(m::GlobalOptimizer) = add_nonlinear_evaluator!(m, m._input_problem._nlp_data) - elseif obj_type === NONLINEAR - # updates tape for nlp_data block (used by local optimizer) - nd = m._working_problem._nlp_data.evaluator.m.nlp_data.nlobj.nd - pushfirst!(nd, NodeData(JuMP._Derivatives.CALLUNIVAR, 2, -1)) - nd[2] = NodeData(nd[2].nodetype, nd[2].index, 1) - for i = 3:length(nd) - @inbounds nd[i] = NodeData(nd[i].nodetype, nd[i].index, nd[i].parent + 1) - end +function link_subexpression_dicts!(m::GlobalOptimizer) + evaluator = m._working_problem._relaxed_evaluator + n_subexpr = length(evaluator.subexpressions) - # updates tape used by evaluator for the nonlinear objective (used by the relaxed optimizer) - nd = m._working_problem._objective_nl.expr.nd - pushfirst!(nd, NodeData(JuMP._Derivatives.CALLUNIVAR, 2, -1)) - nd[2] = NodeData(nd[2].nodetype, nd[2].index, 1) - for i = 3:length(nd) - @inbounds nd[i] = NodeData(nd[i].nodetype, nd[i].index, nd[i].parent + 1) - end - I, J, V = findnz(m._working_problem._objective_nl.expr.adj) - I .+= 1 - J .+= 1 - pushfirst!(I, 2) - pushfirst!(J, 1) - pushfirst!(V, true) - m._working_problem._objective_nl.expr.adj = sparse(I, J, V) - - set_val = copy(m._working_problem._objective_nl.expr.setstorage[1]) - pushfirst!(m._working_problem._objective_nl.expr.setstorage, set_val) - pushfirst!(m._working_problem._objective_nl.expr.numberstorage, 0.0) - pushfirst!(m._working_problem._objective_nl.expr.isnumber, false) + dn = Dict{Int,Float64}() + din = Dict{Int,Bool}() + for i = 1:n_subexpr + dn[i] = 0.0 + din[i] = false + end + for ex in evaluator.subexpressions + mctyp = mc_type(ex) + ds = Dict{Int,mctyp}() + di = Dict{Int,mctyp}() + for i = 1:n_subexpr + ds[i] = zero(mctyp) + di[i] = zero(mctyp) end + copy_subexpr!(ex.relax_cache, ds, dn, din, di) end - - return nothing + if m._working_problem._objective isa BufferedNonlinearFunction + ex = m._working_problem._objective.ex + mctyp = mc_type(ex) + ds = Dict{Int,mctyp}() + di = Dict{Int,mctyp}() + for i = 1:n_subexpr + ds[i] = zero(mctyp) + di[i] = zero(mctyp) + end + copy_subexpr!(ex.relax_cache, ds, dn, din, di) + end + for f in m._working_problem._nonlinear_constr + mctyp = mc_type(f.ex) + ds = Dict{Int,mctyp}() + di = Dict{Int,mctyp}() + for i = 1:n_subexpr + ds[i] = zero(mctyp) + di[i] = zero(mctyp) + end + copy_subexpr!(f.ex.relax_cache, ds, dn, din, di) + end + return end """ -Performs an epigraph reformulation assuming the working_problem is a minimization problem. +Adds a Evaluator, nonlinear functions, and populates each appropriately. """ -function reform_epigraph!(m::Optimizer) +add_nonlinear!(m::GlobalOptimizer, evaluator::Nothing) = nothing +function add_nonlinear!(m::GlobalOptimizer, evaluator::JuMP.NLPEvaluator) - if m._parameters.presolve_epigraph_flag - #= - # add epigraph variable - obj_variable_index = MOI.add_variable(m) + add_nonlinear_evaluator!(m, m._input_problem._nlp_data.evaluator) - # converts ax + b objective to ax - y <= -b constraint with y objective - obj_type = m._working_problem._objective_type - if obj_type === SCALAR_AFFINE + nlp_data = m._input_problem._nlp_data - # update unparsed expression - objective_saf = m._working_problem._objective_saf - push!(objective_saf, SAT(-1.0, obj_variable_index)) - obj_ci = MOI.add_constraint(m, saf, LT(-objective_saf.constant)) + MOI.initialize(evaluator, Symbol[:Grad, :Jac, :ExprGraph]) - # update parsed expression (needed for interval bounds) + user_operator_registry = OperatorRegistry(evaluator.model.nlp_data.user_operators) - # converts ax + b objective to ax - y <= -b constraint with y objective - elseif obj_type === SCALAR_QUADRATIC + # set nlp data structure + m._working_problem._nlp_data = nlp_data + mul_relax = m._parameters.mul_relax_style + if mul_relax == 1 + rtype = Relax() + renum = STD_RELAX + ruse_apriori = true + elseif mul_relax == 2 + rtype = RelaxAA() + renum = MC_AFF_RELAX + ruse_apriori = true + elseif mul_relax == 3 + rtype = RelaxMulEnum() + renum = MC_ENUM_RELAX + ruse_apriori = true + else + rtype = Relax() + renum = STD_RELAX + ruse_apriori = false + end - # update parsed expression - objective_sqf = m._working_problem._objective_sqf - obj_ci = MOI.add_constraint(m, saf, LT()) + # add subexpressions (assumes they are already ordered by JuMP) + # creates a dictionary that lists the subexpression sparsity + # by search each node for variables dict[2] = [2,3] indicates + # that subexpression 2 depends on variables 2 and 3 + # this is referenced when subexpressions are called by other + # subexpressions or functions to determine overall sparsity + # the sparsity of a function is the collection of indices + # in all participating subexpressions and the function itself + # it is necessary to define this as such to enable reverse + # McCormick constraint propagation + relax_evaluator = m._working_problem._relaxed_evaluator + relax_evaluator.relax_type = renum + dict_sparsity = Dict{Int,Vector{Int}}() + if length(evaluator.model.nlp_data.nlexpr) > 0 # should check for nonlinear objective, constraint + for i = 1:length(evaluator.subexpressions) + subexpr = evaluator.subexpressions[i] + nlexpr = NonlinearExpression!(m._auxillary_variable_info, rtype, subexpr, MOI.NLPBoundsPair(-Inf, Inf), + dict_sparsity, i, evaluator.subexpression_linearity, + user_operator_registry, evaluator.model.nlp_data.nlparamvalues, + m._parameters.relax_tag, ruse_apriori; is_sub = true) + push!(relax_evaluator.subexpressions, nlexpr) + end + end - elseif obj_type === NONLINEAR + # scrubs udf functions using Cassette to remove odd data structures... + # alternatively convert udfs to JuMP scripts... + m._parameters.presolve_scrubber_flag && Script.scrub!(m._working_problem._nlp_data) + if m._parameters.presolve_to_JuMP_flag + Script.udf_loader!(m) + end - # updated parsed expressions - objective_nl = m._working_problem._objective_nl + # add nonlinear objective + if evaluator.has_nlobj + m._working_problem._objective = BufferedNonlinearFunction(m._auxillary_variable_info, rtype, evaluator.objective, MOI.NLPBoundsPair(-Inf, Inf), + dict_sparsity, evaluator.subexpression_linearity, + user_operator_registry, + evaluator.model.nlp_data.nlparamvalues, + m._parameters.relax_tag, ruse_apriori) + end - end + # add nonlinear constraints + constraint_bounds = m._working_problem._nlp_data.constraint_bounds + for i = 1:length(evaluator.constraints) + constraint = evaluator.constraints[i] + bnds = constraint_bounds[i] + push!(m._working_problem._nonlinear_constr, BufferedNonlinearFunction(m._auxillary_variable_info, rtype, constraint, bnds, dict_sparsity, + evaluator.subexpression_linearity, + user_operator_registry, + evaluator.model.nlp_data.nlparamvalues, + m._parameters.relax_tag, ruse_apriori)) + end - MOI.set(m, MOI.ObjectiveFunction{SV}(), SV(obj_variable_index)) - =# + relax_evaluator.subexpressions_eval = fill(false, length(relax_evaluator.subexpressions)) + return +end +add_nonlinear!(m::GlobalOptimizer, nldata) = add_nonlinear!(m, nldata.evaluator) +add_nonlinear!(m::GlobalOptimizer) = add_nonlinear!(m, m._input_problem._nlp_data) + +function reform_epigraph_min!(m::GlobalOptimizer, d::ParsedProblem, f::VI) + flag = m._input_problem._optimization_sense == MOI.MAX_SENSE + d._objective = AffineFunctionIneq(f, is_max = flag) + d._objective_saf = SAF([SAT(flag ? -1.0 : 1.0, VI(f.value))], 0.0) + return +end +function reform_epigraph_min!(m::GlobalOptimizer, d::ParsedProblem, f::AffineFunctionIneq) + d._objective_saf = m._input_problem._objective + if m._input_problem._optimization_sense == MOI.MAX_SENSE + MOIU.operate!(-, Float64, d._objective_saf) + d._objective = AffineFunctionIneq([(-i,j) for (i,j) in f.terms], -f.constant, f.len) + else + d._objective = f end + return +end - return nothing +function add_η!(m::ParsedProblem) + m._variable_count += 1 + push!(m._variable_info, VariableInfo{Float64}()) + return m._variable_count +end + +set_variable_values!(wp, v) = set_variable_values!(wp._relaxed_evaluator, v) +reform_epigraph_min!(m::GlobalOptimizer, d::ParsedProblem, f::Nothing) = nothing +function reform_epigraph_min!(m::GlobalOptimizer, d::ParsedProblem, f::BufferedQuadraticIneq) + ip = m._input_problem + wp = m._working_problem + + vi = wp._variable_info + q = _variable_num(FullVar(), m) + v = VariableValues{Float64}(x = mid.(vi), x0 = mid.(vi), + lower_variable_bounds = lower_bound.(vi), + upper_variable_bounds = upper_bound.(vi), + node_to_variable_map = [i for i in 1:q], + variable_to_node_map = [i for i in 1:q]) + set_variable_values!(wp, v) + + n = NodeBB(lower_bound.(vi), upper_bound.(vi), is_integer.(vi)) + m._current_node = n + set_node!(wp._relaxed_evaluator, n) + + ηi = add_η!(d) + if !isnothing(ip._nlp_data) + @variable(ip._nlp_data.evaluator.model, η) + @objective(ip._nlp_data.evaluator.model, Min, η) + end + + sqf_obj = copy(m._input_problem._objective) + if !_is_input_min(m) + MOIU.operate!(-, Float64, sqf_obj) + end + d._objective_saf = SAF([SAT(1.0, VI(ηi))], 0.0) + push!(sqf_obj.affine_terms, SAT(-1.0, VI(ηi))) + _constraints(ip, SQF, LT)[CI{SQF,LT}(ip._constraint_count += 1)] = (sqf_obj, LT(0.0)) + push!(wp._sqf_leq, BufferedQuadraticIneq(sqf_obj, LT(0.0))) + + f.buffer[ηi] = 0.0 + f.len += 1 + if !_is_input_min(m) + MOIU.operate!(-, Float64, f.func) + end + MOIU.operate!(-, Float64, f.func, VI(ηi)) + push!(f.saf.terms, SAT(-1.0, VI(ηi))) + m._obj_var_slack_added = true + return +end +function reform_epigraph_min!(m::GlobalOptimizer, d::ParsedProblem, f::BufferedNonlinearFunction) + ip = m._input_problem + wp = m._working_problem + + vi = wp._variable_info + q = _variable_num(FullVar(), m) + v = VariableValues{Float64}(x = mid.(vi), x0 = mid.(vi), + lower_variable_bounds = lower_bound.(vi), + upper_variable_bounds = upper_bound.(vi), + node_to_variable_map = [i for i in 1:q], + variable_to_node_map = [i for i in 1:q]) + + ηi = add_η!(d) + @variable(ip._nlp_data.evaluator.model, η) + wp._objective_saf = SAF([SAT(1.0, VI(ηi))], 0.0) + + nd = ip._nlp_data.evaluator.model.nlp_data.nlobj.nd + if !_is_input_min(m) + pushfirst!(nd, NodeData(JuMP._Derivatives.CALLUNIVAR, 2, 1)) + pushfirst!(nd, NodeData(JuMP._Derivatives.CALL, 2, -1)) + nd[3] = NodeData(nd[3].nodetype, nd[3].index, 2) + for i = 4:length(nd) + @inbounds nd[i] = NodeData(nd[i].nodetype, nd[i].index, nd[i].parent + 2) + end + else + pushfirst!(nd, NodeData(JuMP._Derivatives.CALL, 2, -1)) + nd[2] = NodeData(nd[2].nodetype, nd[2].index, 1) + for i = 3:length(nd) + @inbounds nd[i] = NodeData(nd[i].nodetype, nd[i].index, nd[i].parent + 1) + end + end + push!(nd, NodeData(JuMP._Derivatives.VARIABLE, ηi, 1)) + nlobj = ip._nlp_data.evaluator.model.nlp_data.nlobj + nlexpr = JuMP._NonlinearExprData(copy(nlobj.nd), copy(nlobj.const_values)) + nlcons = JuMP._NonlinearConstraint(nlexpr, -Inf, 0.0) + + ip._nlp_data.evaluator.model.nlp_data.nlobj = nothing + ip._nlp_data.evaluator.has_nlobj = false + push!(ip._nlp_data.evaluator.model.nlp_data.nlconstr, nlcons) + constraint_bounds = ip._nlp_data.constraint_bounds + push!(constraint_bounds, MOI.NLPBoundsPair(-Inf, 0.0)) + ip._nlp_data = MOI.NLPBlockData(constraint_bounds, ip._nlp_data.evaluator, false) + + empty!(d._relaxed_evaluator.subexpressions) + empty!(d._nonlinear_constr) + add_nonlinear!(m) + m._obj_var_slack_added = true + return +end + +""" + +Performs an epigraph reformulation assuming the working_problem is a minimization problem. +""" +function reform_epigraph_min!(m::GlobalOptimizer) + ip = m._input_problem + m._branch_variables = fill(false, m._working_problem._variable_count) + m._obj_mult = (ip._optimization_sense == MOI.MAX_SENSE) ? -1.0 : 1.0 + reform_epigraph_min!(m, m._working_problem, m._working_problem._objective) end function check_set_is_fixed(v::VariableInfo) @@ -125,9 +316,7 @@ $(TYPEDSIGNATURES) Detects any variables set to a fixed value by equality or inequality constraints and populates the `_fixed_variable` storage array. """ -function label_fixed_variables!(m::Optimizer) - map!(x -> check_set_is_fixed(x), m._fixed_variable, m._working_problem._variable_info) -end +label_fixed_variables!(m::GlobalOptimizer) = map!(x -> check_set_is_fixed(x), m._fixed_variable, m._working_problem._variable_info) """ $(TYPEDSIGNATURES) @@ -135,397 +324,183 @@ $(TYPEDSIGNATURES) Detects any variables participating in nonconvex terms and populates the `_branch_variables` storage array. """ -function label_branch_variables!(m::Optimizer) +function label_branch_variables!(m::GlobalOptimizer) + wp = m._working_problem m._user_branch_variables = !isempty(m._parameters.branch_variable) if m._user_branch_variables - append!(m._branch_variables, m._parameters.branch_variable) + m._branch_variables = m._parameters.branch_variable else - - append!(m._branch_variables, fill(false, m._working_problem._variable_count)) - - # adds nonlinear terms in quadratic constraints - sqf_leq = m._working_problem._sqf_leq - for i = 1:m._working_problem._sqf_leq_count - quad_ineq = @inbounds sqf_leq[i] - for term in quad_ineq.func.quadratic_terms - variable_index_1 = term.variable_index_1.value - variable_index_2 = term.variable_index_2.value - @inbounds m._branch_variables[variable_index_1] = true - @inbounds m._branch_variables[variable_index_2] = true - end + m._branch_variables = fill(false, m._working_problem._variable_count) + for f in wp._sqf_leq, t in f.func.quadratic_terms + m._branch_variables[t.variable_1.value] = true + m._branch_variables[t.variable_2.value] = true end - - sqf_eq = m._working_problem._sqf_eq - for i = 1:m._working_problem._sqf_eq_count - quad_eq = @inbounds sqf_eq[i] - for term in quad_eq.func.quadratic_terms - variable_index_1 = term.variable_index_1.value - variable_index_2 = term.variable_index_2.value - @inbounds m._branch_variables[variable_index_1] = true - @inbounds m._branch_variables[variable_index_2] = true - end + for f in wp._sqf_eq, t in f.func.quadratic_terms + m._branch_variables[t.variable_1.value] = true + m._branch_variables[t.variable_2.value] = true end - - obj_type = m._working_problem._objective_type - if obj_type === SCALAR_QUADRATIC - for term in m._working_problem._objective_sqf.func.quadratic_terms - variable_index_1 = term.variable_index_1.value - variable_index_2 = term.variable_index_2.value - @inbounds m._branch_variables[variable_index_1] = true - @inbounds m._branch_variables[variable_index_2] = true - end + for f in wp._nonlinear_constr, i in sparsity(f) + m._branch_variables[i] = true end - - # label nonlinear branch variables (assumes affine terms have been extracted) - nl_constr = m._working_problem._nonlinear_constr - for i = 1:m._working_problem._nonlinear_count - nl_constr_eq = @inbounds nl_constr[i] - grad_sparsity = nl_constr_eq.expr.grad_sparsity - for indx in grad_sparsity - @inbounds m._branch_variables[indx] = true + if wp._objective isa BufferedQuadraticIneq + for t in wp._objective.func.quadratic_terms + m._branch_variables[t.variable_1.value] = true + m._branch_variables[t.variable_2.value] = true end - end - - if obj_type === NONLINEAR - grad_sparsity = m._working_problem._objective_nl.expr.grad_sparsity - for indx in grad_sparsity - @inbounds m._branch_variables[indx] = true + elseif wp._objective isa BufferedNonlinearFunction + for i in sparsity(wp._objective) + m._branch_variables[i] = true end end end # add a map of branch/node index to variables in the continuous solution - for i = 1:m._working_problem._variable_count - if m._working_problem._variable_info[i].is_fixed + for i = 1:wp._variable_count + if is_fixed(wp._variable_info[i]) m._branch_variables[i] = false - continue - end - if m._branch_variables[i] + elseif m._branch_variables[i] || wp._variable_info[i].is_integer push!(m._branch_to_sol_map, i) + elseif i == wp._variable_count + m._branch_variables[i] = false end end # creates reverse map - m._sol_to_branch_map = zeros(m._working_problem._variable_count) + m._sol_to_branch_map = zeros(wp._variable_count) for i = 1:length(m._branch_to_sol_map) j = m._branch_to_sol_map[i] m._sol_to_branch_map[j] = i end # adds branch solution to branch map to evaluator - m._working_problem._relaxed_evaluator.node_to_variable_map = m._branch_to_sol_map - m._working_problem._relaxed_evaluator.variable_to_node_map = m._sol_to_branch_map - m._working_problem._relaxed_evaluator.node_count = length(m._branch_to_sol_map) - - return nothing + vnum = wp._variable_count + initialize!(m._branch_cost, length(m._branch_to_sol_map)) + l = lower_bound.(m._working_problem._variable_info) + u = upper_bound.(m._working_problem._variable_info) + v = VariableValues{Float64}(x = zeros(vnum), + x0 = zeros(vnum), + lower_variable_bounds = l, + upper_variable_bounds = u, + node_to_variable_map = m._branch_to_sol_map, + variable_to_node_map = m._sol_to_branch_map) + + wp._relaxed_evaluator.variable_values = v + (wp._objective isa BufferedNonlinearFunction) && set_variable_storage!(wp._objective, v) + foreach(i -> set_variable_storage!(i, v), wp._nonlinear_constr) + foreach(i -> set_variable_storage!(i, v), wp._relaxed_evaluator.subexpressions) + return end -add_nonlinear_functions!(m::Optimizer) = add_nonlinear_functions!(m, m._input_problem._nlp_data.evaluator) - -add_nonlinear_functions!(m::Optimizer, evaluator::Nothing) = nothing -add_nonlinear_functions!(m::Optimizer, evaluator::EmptyNLPEvaluator) = nothing -function add_nonlinear_functions!(m::Optimizer, evaluator::JuMP.NLPEvaluator) - - nlp_data = m._input_problem._nlp_data - MOI.initialize(evaluator, Symbol[:Grad, :ExprGraph]) - - # set nlp data structure - m._working_problem._nlp_data = nlp_data - - # add subexpressions (assumes they are already ordered by JuMP) - # creates a dictionary that lists the subexpression sparsity - # by search each node for variables dict[2] = [2,3] indicates - # that subexpression 2 depends on variables 2 and 3 - # this is referenced when subexpressions are called by other - # subexpressions or functions to determine overall sparsity - # the sparsity of a function is the collection of indices - # in all participating subexpressions and the function itself - # it is necessary to define this as such to enable reverse - # McCormick constraint propagation - relax_evaluator = m._working_problem._relaxed_evaluator - has_subexpressions = length(evaluator.m.nlp_data.nlexpr) > 0 - dict_sparsity = Dict{Int64,Vector{Int64}}() - if has_subexpressions - for i = 1:length(evaluator.subexpressions) - subexpr = evaluator.subexpressions[i] - push!(relax_evaluator.subexpressions, NonlinearExpression!(subexpr, dict_sparsity, i, - evaluator.subexpression_linearity, - m._parameters.relax_tag)) - end +function variable_load_parse!(m::Optimizer, ::Type{VI}, ::Type{T}) where T + wp = m._global_optimizer._working_problem = m._working_problem + for (i, v) in enumerate(values(_constraints(m, VI, T))) + wp._variable_info[v[1].value] = VariableInfo(wp._variable_info[v[1].value], v[2]) end - - # scrubs udf functions using Cassette to remove odd data structures... - # alternatively convert udfs to JuMP scripts... - m._parameters.presolve_scrubber_flag && Script.scrub!(m._working_problem._nlp_data) - if m._parameters.presolve_to_JuMP_flag - Script.udf_loader!(m) - end - - parameter_values = copy(evaluator.parameter_values) - - # add nonlinear objective - if evaluator.has_nlobj - m._working_problem._objective_nl = BufferedNonlinearFunction(evaluator.objective, MOI.NLPBoundsPair(-Inf, Inf), - dict_sparsity, evaluator.subexpression_linearity, - m._parameters.relax_tag) - end - - # add nonlinear constraints - constraint_bounds = m._working_problem._nlp_data.constraint_bounds - for i = 1:length(evaluator.constraints) - constraint = evaluator.constraints[i] - bnds = constraint_bounds[i] - push!(m._working_problem._nonlinear_constr, BufferedNonlinearFunction(constraint, bnds, dict_sparsity, - evaluator.subexpression_linearity, - m._parameters.relax_tag)) - end - - m._input_problem._nonlinear_count = length(m._working_problem._nonlinear_constr) - m._working_problem._nonlinear_count = length(m._working_problem._nonlinear_constr) - - return nothing -end - -function add_nonlinear_evaluator!(m::Optimizer) - evaluator = m._input_problem._nlp_data.evaluator - add_nonlinear_evaluator!(m, evaluator) - return nothing -end - -add_nonlinear_evaluator!(m::Optimizer, evaluator::Nothing) = nothing -add_nonlinear_evaluator!(m::Optimizer, evaluator::EmptyNLPEvaluator) = nothing -function add_nonlinear_evaluator!(m::Optimizer, evaluator::JuMP.NLPEvaluator) - m._working_problem._relaxed_evaluator = Evaluator() - - relax_evaluator = m._working_problem._relaxed_evaluator - relax_evaluator.variable_count = length(m._working_problem._variable_info) - relax_evaluator.user_operators = evaluator.m.nlp_data.user_operators - - relax_evaluator.lower_variable_bounds = zeros(relax_evaluator.variable_count) - relax_evaluator.upper_variable_bounds = zeros(relax_evaluator.variable_count) - relax_evaluator.x = zeros(relax_evaluator.variable_count) - relax_evaluator.num_mv_buffer = zeros(relax_evaluator.variable_count) - relax_evaluator.treat_x_as_number = fill(false, relax_evaluator.variable_count) - relax_evaluator.ctx = GuardCtx(metadata = GuardTracker(m._parameters.domain_violation_ϵ, - m._parameters.domain_violation_guard_on)) - relax_evaluator.subgrad_tol = m._parameters.subgrad_tol - - m._nonlinear_evaluator_created = true - - return nothing -end - -function add_subexpression_buffers!(m::Optimizer) - relax_evaluator = m._working_problem._relaxed_evaluator - relax_evaluator.subexpressions_eval = fill(false, length(relax_evaluator.subexpressions)) - - return nothing + return end """ Translates input problem to working problem. Routines and checks and optional manipulation is left to the presolve stage. """ -function initial_parse!(m::Optimizer) +function initial_parse!(m::Optimizer{R,S,T}) where {R,S,T} # reset initial time and solution statistics - m._time_left = m._parameters.time_limit + m._global_optimizer._time_left = m._parameters.time_limit - # add variables to working model - ip = m._input_problem - append!(m._working_problem._variable_info, ip._variable_info) - m._working_problem._variable_count = ip._variable_count - - # add linear constraints to the working problem - linear_leq = ip._linear_leq_constraints - for i = 1:ip._linear_leq_count - linear_func, leq_set = @inbounds linear_leq[i] - push!(m._working_problem._saf_leq, AffineFunctionIneq(linear_func, leq_set)) - m._working_problem._saf_leq_count += 1 - end + ip = m._global_optimizer._input_problem = m._input_problem + wp = m._global_optimizer._working_problem = m._working_problem + m._global_optimizer._parameters = m._parameters - linear_geq = ip._linear_geq_constraints - for i = 1:ip._linear_geq_count - linear_func, geq_set = @inbounds linear_geq[i] - push!(m._working_problem._saf_leq, AffineFunctionIneq(linear_func, geq_set)) - m._working_problem._saf_leq_count += 1 + # add variables to working model + wp._variable_info = VariableInfo{Float64}[VariableInfo{Float64}() for i=1:ip._variable_count] + variable_load_parse!(m, VI, LT) + variable_load_parse!(m, VI, GT) + variable_load_parse!(m, VI, ET) + variable_load_parse!(m, VI, ZO) + variable_load_parse!(m, VI, MOI.Integer) + wp._variable_count = ip._variable_count + + for (f, s) in values(ip._linear_leq_constraints) + push!(wp._saf_leq, AffineFunctionIneq(f, s)) end - - linear_eq = ip._linear_eq_constraints - for i = 1:ip._linear_eq_count - linear_func, eq_set = @inbounds linear_eq[i] - push!(m._working_problem._saf_eq, AffineFunctionEq(linear_func, eq_set)) - m._working_problem._saf_eq_count += 1 + for (f, s) in values(ip._linear_geq_constraints) + push!(wp._saf_leq, AffineFunctionIneq(f, s)) end - - # add quadratic constraints to the working problem - quad_leq = ip._quadratic_leq_constraints - for i = 1:ip._quadratic_leq_count - quad_func, leq_set = @inbounds quad_leq[i] - push!(m._working_problem._sqf_leq, BufferedQuadraticIneq(quad_func, leq_set)) - m._working_problem._sqf_leq_count += 1 + for (f, s) in values(ip._linear_eq_constraints) + push!(wp._saf_eq, AffineFunctionEq(f, s)) end - quad_geq = ip._quadratic_geq_constraints - for i = 1:ip._quadratic_geq_count - quad_func, geq_set = @inbounds quad_geq[i] - push!(m._working_problem._sqf_leq, BufferedQuadraticIneq(quad_func, geq_set)) - m._working_problem._sqf_leq_count += 1 + for (f, s) in values(ip._quadratic_leq_constraints) + push!(wp._sqf_leq, BufferedQuadraticIneq(f, s)) end - - quad_eq = ip._quadratic_eq_constraints - for i = 1:ip._quadratic_eq_count - quad_func, eq_set = @inbounds quad_eq[i] - push!(m._working_problem._sqf_eq, BufferedQuadraticEq(quad_func, eq_set)) - m._working_problem._sqf_eq_count += 1 + for (f, s) in values(ip._quadratic_geq_constraints) + push!(wp._sqf_leq, BufferedQuadraticIneq(f, s)) end - - # add conic constraints to the working problem - soc_vec = m._input_problem._conic_second_order - for i = 1:ip._conic_second_order_count - soc_func, soc_set = @inbounds soc_vec[i] - first_variable_loc = soc_func.variables[1].value - prior_lbnd = m._working_problem._variable_info[first_variable_loc].lower_bound - m._working_problem._variable_info[first_variable_loc].lower_bound = max(prior_lbnd, 0.0) - push!(m._working_problem._conic_second_order, BufferedSOC(soc_func, soc_set)) - m._working_problem._conic_second_order_count += 1 + for (f, s) in values(ip._quadratic_eq_constraints) + push!(wp._sqf_eq, BufferedQuadraticEq(f, s)) end - # set objective function - m._working_problem._objective_type = ip._objective_type - m._working_problem._objective_sv = ip._objective_sv - m._working_problem._objective_saf = ip._objective_saf - m._working_problem._objective_saf_parsed = AffineFunctionIneq(ip._objective_saf, LT_ZERO) - m._working_problem._objective_sqf = BufferedQuadraticIneq(ip._objective_sqf, LT_ZERO) - - # add nonlinear constraints - # the nonlinear evaluator loads with populated subexpressions which are then used - # to asssess the linearity of subexpressions - add_nonlinear_evaluator!(m) - add_nonlinear_functions!(m) - add_subexpression_buffers!(m) + add_objective!(wp, ip._objective) # set objective function + add_nonlinear!(m._global_optimizer) # add nonlinear constraints, evaluator, subexpressions # converts a maximum problem to a minimum problem (internally) if necessary # this is placed after adding nonlinear functions as this prior routine # copies the nlp_block from the input_problem to the working problem - convert_to_min!(m) - reform_epigraph!(m) - - # labels the variable info and the _fixed_variable vector for each fixed variable - label_fixed_variables!(m) + reform_epigraph_min!(m._global_optimizer) + label_fixed_variables!(m._global_optimizer) + label_branch_variables!(m._global_optimizer) - # labels variables to branch on - label_branch_variables!(m) + link_subexpression_dicts!(m._global_optimizer) # updates run and parse times - new_time = time() - m._start_time - m._parse_time = new_time - m._run_time = new_time - - return nothing -end - -### Routines for parsing the full nonconvex problem -""" -[FUTURE FEATURE] Reformulates quadratic terms in SOC constraints if possible. -For <= or >=, the quadratic term is deleted if an SOCP is detected. For ==, -the SOC check is done for each >= and <=, the convex constraint is reformulated -to a SOC, the concave constraint is keep as a quadratic. -""" -function parse_classify_quadratic!(m::Optimizer) - #= - for (id, cinfo) in m._quadratic_constraint - is_soc, add_concave, cfunc, cset, qfunc, qset = check_convexity(cinfo.func, cinfo.set) - if is_soc - MOI.add_constraint(m, cfunc, cset) - deleteat!(m._quadratic_constraint, id) - if add_concave - MOI.add_constraint(m, qfunc, qset) - end - end - end - =# - return nothing -end - -""" -[FUTURE FEATURE] Parses provably convex nonlinear functions into a convex -function buffer -""" -function parse_classify_nlp(m) - return nothing + new_time = time() - m._global_optimizer._start_time + m._global_optimizer._parse_time = new_time + m._global_optimizer._run_time = new_time + return end """ Classifies the problem type """ -function parse_classify_problem!(m::Optimizer) +function parse_classify_problem!(m::GlobalOptimizer) ip = m._input_problem - integer_variable_number = count(is_integer.(ip._variable_info)) - - nl_expr_number = ip._objective_type === NONLINEAR ? 1 : 0 - nl_expr_number += ip._nonlinear_count - cone_constraint_number = ip._conic_second_order_count - quad_constraint_number = ip._quadratic_leq_count + ip._quadratic_geq_count + ip._quadratic_eq_count - linear_or_sv_objective = (ip._objective_type === SINGLE_VARIABLE || ip._objective_type === SCALAR_AFFINE) - relaxed_supports_soc = false - #TODO: relaxed_supports_soc = MOI.supports_constraint(m.relaxed_optimizer, VECOFVAR, SOC) - - if integer_variable_number === 0 - - if cone_constraint_number === 0 && quad_constraint_number === 0 && - nl_expr_number === 0 && linear_or_sv_objective - m._working_problem._problem_type = LP - #println("LP") - elseif quad_constraint_number === 0 && relaxed_supports_soc && - nl_expr_number === 0 && linear_or_sv_objective - m._working_problem._problem_type = SOCP - #println("SOCP") + nl_expr_num = 0 + if isnothing(ip._objective) && !isnothing(ip._nlp_data) + if ip._nlp_data.has_objective + nl_expr_num += 1 + has_objective = true else - #parse_classify_quadratic!(m) - #if iszero(m._input_nonlinear_constraint_number) - # if isempty(m._quadratic_constraint) - # m._problem_type = SOCP - # end - #else - # # Check if DIFF_CVX, NS_CVX, DIFF_NCVX, OR NS_NCVX - # m._problem_type = parse_classify_nlp(m) - #end - m._working_problem._problem_type = MINCVX - #println("MINCVX") + has_objective = false end - else - #= - if cone_constraint_number === 0 && quad_constraint_number === 0 && linear_or_sv_objective - elseif quad_constraint_number === 0 && relaxed_supports_soc && linear_or_sv_objective - m._working_problem._problem_type = MISOCP - else - #parse_classify_quadratic!(m) - #= - if iszero(m._nonlinear_constraint_number) - if iszero(m._quadratic_constraint_number) - m._problem_type = MISOCP - end - else - # Performs parsing - _ = parse_classify_nlp(m) - end - =# - m._problem_type = MINCVX - end - =# + elseif isnothing(ip._objective) && isnothing(ip._nlp_data) + has_objective = false end + nl_expr_num += length(m._working_problem._nonlinear_constr) + cone_constr_num = length(ip._conic_second_order) + quad_constr_num = length(ip._quadratic_leq_constraints) + + length(ip._quadratic_geq_constraints) + + length(ip._quadratic_eq_constraints) + + if !isnothing(ip._objective) + has_objective = true + end + has_int_var = !iszero(length(ip._vi_zo_constraints) + length(ip._vi_int_constraints)) - return nothing -end - -""" + lin_or_sv_obj = (ip._objective isa VI || ip._objective isa SAF || !has_objective) + relaxed_supports_soc = false -Basic parsing for global solutions (no extensive manipulation) -""" -function parse_global!(t::ExtensionType, m::Optimizer) - return nothing -end + if (cone_constr_num == 0) && (quad_constr_num == 0) && (nl_expr_num == 0) && lin_or_sv_obj && !has_int_var + m._working_problem._problem_type = LP() + elseif (cone_constr_num == 0) && (quad_constr_num == 0) && (nl_expr_num == 0) && lin_or_sv_obj + m._working_problem._problem_type = MILP() + elseif (quad_constr_num == 0) && relaxed_supports_soc && (nl_expr_num == 0) && lin_or_sv_obj && !has_int_var + m._working_problem._problem_type = SOCP() + else + m._working_problem._problem_type = MINCVX() + end + return +end \ No newline at end of file diff --git a/src/eago_optimizer/relax.jl b/src/eago_optimizer/relax.jl deleted file mode 100644 index 3a4e7e23..00000000 --- a/src/eago_optimizer/relax.jl +++ /dev/null @@ -1,589 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/relax.jl -# Defines routines used construct the relaxed subproblem. -############################################################################# - -""" -$(FUNCTIONNAME) - -Applies the safe cut checks detailed in Khajavirad, 2018 [Khajavirad, Aida, -and Nikolaos V. Sahinidis. "A hybrid LP/NLP paradigm for global optimization -relaxations." Mathematical Programming Computation 10.3 (2018): 383-421] to -ensure that only numerically safe affine relaxations are added. Checks that -i) ``|b| <= safe b`, ii) `safe_l <= abs(ai) <= safe u`, and iii) violates -`safe_l <= abs(ai/aj) <= safe_u`. -""" -function is_safe_cut!(m::Optimizer, f::SAF) - - safe_l = m._parameters.cut_safe_l - safe_u = m._parameters.cut_safe_u - safe_b = m._parameters.cut_safe_b - - # violates |b| <= safe_b - (abs(f.constant) > safe_b) && return false - - term_count = length(f.terms) - for i = 1:term_count - - ai = (@inbounds f.terms[i]).coefficient - if ai !== 0.0 - - # violates safe_l <= abs(ai) <= safe_u - ai_abs = abs(ai) - !(safe_l <= abs(ai) <= safe_u) && return false - - # violates safe_l <= abs(ai/aj) <= safe_u - for j = i:term_count - aj = (@inbounds f.terms[j]).coefficient - if aj !== 0.0 - !(safe_l <= abs(ai/aj) <= safe_u) && return false - end - end - end - end - - return true -end - -""" -$(FUNCTIONNAME) - -Relaxs the constraint by adding an affine constraint to the model. -""" -function relax! end - -""" -$(FUNCTIONNAME) - -Default routine for relaxing quadratic constraint `func < 0.0` on node `n`. -Takes affine bounds of convex part at point `x0` and secant line bounds on -concave parts. -""" -function affine_relax_quadratic!(func::SQF, buffer::Dict{Int,Float64}, saf::SAF, - n::NodeBB, sol_to_branch_map::Vector{Int}, - x::Vector{Float64}) - - lower_bounds = n.lower_variable_bounds - upper_bounds = n.upper_variable_bounds - quadratic_constant = func.constant - - # Affine terms only contribute coefficients, so the respective - # values do not contribute to the cut. Since all quadratic terms - # are considered to be branch variables we exclude any potential - # need to retrieve variable bounds from locations other than - # the node. - for term in func.quadratic_terms - - a = term.coefficient - idx1 = term.variable_index_1.value - idx2 = term.variable_index_2.value - sol_idx1 = sol_to_branch_map[idx1] - sol_idx2 = sol_to_branch_map[idx2] - x0_1 = x[sol_idx1] - xL_1 = lower_bounds[sol_idx1] - xU_1 = upper_bounds[sol_idx1] - - if idx1 === idx2 - - if a > 0.0 - buffer[idx1] += a*x0_1 - quadratic_constant -= 0.5*a*x0_1*x0_1 - - else - if !isinf(xL_1) && !isinf(xU_1) - buffer[idx1] += 0.5*a*(xL_1 + xU_1) - quadratic_constant -= 0.5*a*xL_1*xU_1 - else - return false - end - end - - else - x0_2 = x[sol_idx2] - xL_2 = lower_bounds[sol_idx2] - xU_2 = upper_bounds[sol_idx2] - - if a > 0.0 - if (!isinf(xL_1) && !isinf(xL_2)) && - ((xU_1 - xL_1)*x0_2 + (xU_2 - xL_2)*x0_1 <= xU_1*xU_2 - xL_1*xL_2) - buffer[idx1] += a*xL_2 - buffer[idx2] += a*xL_1 - quadratic_constant -= a*xL_1*xL_2 - - elseif !isinf(xU_1) && !isinf(xU_2) - buffer[idx1] += a*xU_2 - buffer[idx2] += a*xU_1 - quadratic_constant -= a*xU_1*xU_2 - - else - return false - - end - else - if (!isinf(xU_1) && !isinf(xL_2)) && - ((xU_1 - xL_1)*x0_2 - (xU_2 - xL_2)*x0_1 <= xU_1*xL_2 - xL_1*xU_2) - - buffer[idx1] += a*xL_2 - buffer[idx2] += a*xU_1 - quadratic_constant -= a*xU_1*xL_2 - - elseif !isinf(xL_1) && !isinf(xU_2) - buffer[idx1] += a*xU_2 - buffer[idx2] += a*xL_1 - quadratic_constant -= a*xL_1*xU_2 - - else - return false - end - end - end - end - - for term in func.affine_terms - a0 = term.coefficient - idx = term.variable_index.value - buffer[idx] += a0 - end - - count = 1 - for (key, value) in buffer - saf.terms[count] = SAT(value, VI(key)) - buffer[key] = 0.0 - count += 1 - end - saf.constant = quadratic_constant - - return true -end - -""" -$(TYPEDSIGNATURES) -""" -function relax!(m::Optimizer, f::BufferedQuadraticIneq, indx::Int, check_safe::Bool) - - constraint_tol = m._parameters.absolute_constraint_feas_tolerance - finite_cut_generated = affine_relax_quadratic!(f.func, f.buffer, f.saf, m._current_node, m._sol_to_branch_map, m._current_xref) - if finite_cut_generated - if !check_safe || is_safe_cut!(m, f.saf) - lt = LT(-f.saf.constant + constraint_tol) - f.saf.constant = 0.0 - ci = MOI.add_constraint(m.relaxed_optimizer, f.saf, lt) - push!(m._buffered_quadratic_ineq_ci, ci) - end - end - #m.relaxed_to_problem_map[ci] = indx - - return nothing -end - -""" -$(TYPEDSIGNATURES) -""" -function relax!(m::Optimizer, f::BufferedQuadraticEq, indx::Int, check_safe::Bool) - - constraint_tol = m._parameters.absolute_constraint_feas_tolerance - finite_cut_generated = affine_relax_quadratic!(f.func, f.buffer, f.saf, m._current_node, m._sol_to_branch_map, m._current_xref) - if finite_cut_generated - if !check_safe || is_safe_cut!(m, f.saf) - lt = LT(-f.saf.constant + constraint_tol) - f.saf.constant = 0.0 - ci = MOI.add_constraint(m.relaxed_optimizer, f.saf, lt) - push!(m._buffered_quadratic_eq_ci, ci) - end - end - #m.relaxed_to_problem_map[ci] = indx - - finite_cut_generated = affine_relax_quadratic!(f.minus_func, f.buffer, f.saf, m._current_node, m._sol_to_branch_map, m._current_xref) - if finite_cut_generated - if !check_safe || is_safe_cut!(m, f.saf) - lt = LT(-f.saf.constant + constraint_tol) - f.saf.constant = 0.0 - ci = MOI.add_constraint(m.relaxed_optimizer, f.saf, lt) - push!(m._buffered_quadratic_eq_ci, ci) - end - end - #m.relaxed_to_problem_map[ci] = indx - - return nothing -end - -""" -$(FUNCTIONNAME) -""" -function affine_relax_nonlinear!(f::BufferedNonlinearFunction{MC{N,T}}, evaluator::Evaluator, - use_cvx::Bool, new_pass::Bool, is_constraint::Bool) where {N,T<:RelaxTag} - - if new_pass - forward_pass!(evaluator, f) - end - x = evaluator.x - finite_cut = true - - expr = f.expr - grad_sparsity = expr.grad_sparsity - if expr.isnumber[1] - f.saf.constant = expr.numberstorage[1] - for i = 1:N - vval = @inbounds grad_sparsity[i] - f.saf.terms[i] = SAT(0.0, VI(vval)) - end - - else - setvalue = expr.setstorage[1] - finite_cut &= !(isempty(setvalue) || isnan(setvalue)) - - if finite_cut - value = f.expr.setstorage[1] - f.saf.constant = use_cvx ? value.cv : -value.cc - for i = 1:N - vval = @inbounds grad_sparsity[i] - if use_cvx - coef = @inbounds value.cv_grad[i] - else - coef = @inbounds -value.cc_grad[i] - end - f.saf.terms[i] = SAT(coef, VI(vval)) - xv = @inbounds x[vval] - f.saf.constant = sub_round(f.saf.constant , mul_round(coef, xv, RoundUp), RoundDown) - end - if is_constraint - bnd_used = use_cvx ? -f.upper_bound : f.lower_bound - f.saf.constant = add_round(f.saf.constant, bnd_used, RoundDown) - end - end - end - - return finite_cut -end - -""" -$(TYPEDSIGNATURES) -""" -function check_set_affine_nl!(m::Optimizer, f::BufferedNonlinearFunction{MC{N,T}}, finite_cut_generated::Bool, check_safe::Bool) where {N,T<:RelaxTag} - - constraint_tol = m._parameters.absolute_constraint_feas_tolerance - if finite_cut_generated - if !check_safe || is_safe_cut!(m, f.saf) - lt = LT(-f.saf.constant + constraint_tol) - f.saf.constant = 0.0 - ci = MOI.add_constraint(m.relaxed_optimizer, f.saf, lt) - push!(m._buffered_nonlinear_ci, ci) - end - end - - return nothing -end - -""" -$(TYPEDSIGNATURES) -""" -function relax!(m::Optimizer, f::BufferedNonlinearFunction{MC{N,T}}, indx::Int, check_safe::Bool) where {N,T<:RelaxTag} - evaluator = m._working_problem._relaxed_evaluator - - finite_cut_generated = affine_relax_nonlinear!(f, evaluator, true, true, true) - check_set_affine_nl!(m, f, finite_cut_generated, check_safe) - - finite_cut_generated = affine_relax_nonlinear!(f, evaluator, false, false, true) - check_set_affine_nl!(m, f, finite_cut_generated, check_safe) - - return nothing -end - -""" -$(TYPEDSIGNATURES) -""" -function bound_objective(t::ExtensionType, m::Optimizer) - - n = m._current_node - sb_map = m._sol_to_branch_map - wp = m._working_problem - obj_type = wp._objective_type - - if obj_type === NONLINEAR - - # assumes current node has already been loaded into evaluator - objective_lo = lower_interval_bound(m, wp._objective_nl, n) - - elseif obj_type === SINGLE_VARIABLE - obj_indx = @inbounds sb_map[wp._objective_sv.variable.value] - objective_lo = @inbounds n.lower_variable_bounds[obj_indx] - - elseif obj_type === SCALAR_AFFINE - objective_lo = lower_interval_bound(wp._objective_saf_parsed, n) - - elseif obj_type === SCALAR_QUADRATIC - objective_lo = lower_interval_bound(wp._objective_sqf, n) - - end - - return objective_lo -end -bound_objective(m::Optimizer) = bound_objective(m.ext_type, m) - -""" -$(TYPEDSIGNATURES) -""" -function relax_objective_nonlinear!(m::Optimizer, wp::ParsedProblem, check_safe::Bool) - - relaxed_optimizer = m.relaxed_optimizer - relaxed_evaluator = wp._relaxed_evaluator - buffered_nl = wp._objective_nl - - new_flag = m._new_eval_objective - relaxed_evaluator.is_first_eval = new_flag - finite_cut_generated = affine_relax_nonlinear!(buffered_nl, relaxed_evaluator, true, new_flag, false) - relaxed_evaluator.is_first_eval = false - - if finite_cut_generated - if !check_safe || is_safe_cut!(m, buffered_nl.saf) - copyto!(wp._objective_saf.terms, buffered_nl.saf.terms) - wp._objective_saf.constant = buffered_nl.saf.constant - MOI.set(relaxed_optimizer, MOI.ObjectiveFunction{SAF}(), wp._objective_saf) - end - end - - return nothing -end - -""" -$(TYPEDSIGNATURES) - -Triggers an evaluation of the objective function and then updates -the affine relaxation of the objective function. -""" -function relax_objective!(t::ExtensionType, m::Optimizer, q::Int64) - - relaxed_optimizer = m.relaxed_optimizer - m._working_problem._relaxed_evaluator - - # Add objective - wp = m._working_problem - obj_type = wp._objective_type - check_safe = (q === 1) ? false : m._parameters.cut_safe_on - - if obj_type === SINGLE_VARIABLE - MOI.set(relaxed_optimizer, MOI.ObjectiveFunction{SV}(), wp._objective_sv) - - elseif obj_type === SCALAR_AFFINE - MOI.set(relaxed_optimizer, MOI.ObjectiveFunction{SAF}(), wp._objective_saf) - - elseif obj_type === SCALAR_QUADRATIC - buffered_sqf = wp._objective_sqf - finite_cut_generated = affine_relax_quadratic!(buffered_sqf.func, buffered_sqf.buffer, buffered_sqf.saf, - m._current_node, m._sol_to_branch_map, m._current_xref) - if finite_cut_generated - if !check_safe || is_safe_cut!(m, buffered_sqf.saf) - copyto!(wp._objective_saf.terms, buffered_sqf.saf.terms) - wp._objective_saf.constant = buffered_sqf.saf.constant - MOI.set(relaxed_optimizer, MOI.ObjectiveFunction{SAF}(), wp._objective_saf) - end - end - - elseif obj_type === NONLINEAR - relax_objective_nonlinear!(m, wp, check_safe) - end - - m._new_eval_objective = false - - return nothing -end -relax_objective!(m::Optimizer, q::Int64) = relax_objective!(m.ext_type, m, q) - -""" - -Triggers an evaluation of the nonlinear objective function (if necessary) -and adds the corresponding ` <= b` objective cut. -""" -function objective_cut_nonlinear!(m::Optimizer, wp::ParsedProblem, UBD::Float64, check_safe::Bool) - - relaxed_optimizer = m.relaxed_optimizer - relaxed_evaluator = wp._relaxed_evaluator - buffered_nl = wp._objective_nl - - # if the objective cut is the first evaluation of the objective expression - # then perform a a forward pass - new_flag = m._new_eval_objective - relaxed_evaluator.is_first_eval = new_flag - finite_cut_generated = affine_relax_nonlinear!(buffered_nl, relaxed_evaluator, true, new_flag, false) - - constraint_tol = m._parameters.absolute_constraint_feas_tolerance - if finite_cut_generated - copyto!(wp._objective_saf.terms, buffered_nl.saf.terms) - wp._objective_saf.constant = 0.0 - if !check_safe || is_safe_cut!(m, buffered_nl.saf) - # TODO: When we introduce numerically safe McCormick operators we'll need to replace - # the UBD - buffered_nl.saf.constant with a correctly rounded version. For now, - # a small factor is added to the UBD calculation initially which should be sufficient. - ci_saf = MOI.add_constraint(m.relaxed_optimizer, wp._objective_saf, LT(UBD - buffered_nl.saf.constant + constraint_tol)) - push!(m._objective_cut_ci_saf, ci_saf) - end - end - - m._new_eval_objective = false - - return nothing -end - -""" -$(FUNCTIONNAME) - -Adds linear objective cut constraint to the `x.relaxed_optimizer`. -""" -function objective_cut!(m::Optimizer, check_safe::Bool) - - UBD = m._global_upper_bound - constraint_tol = m._parameters.absolute_constraint_feas_tolerance - if m._parameters.objective_cut_on && m._global_upper_bound < Inf - - wp = m._working_problem - obj_type = wp._objective_type - - if obj_type === SINGLE_VARIABLE - if !isinf(UBD) && (m._objective_cut_ci_sv.value === -1) - m._objective_cut_ci_sv = CI{SV,LT}(wp._objective_sv.variable.value) - MOI.set(m.relaxed_optimizer, MOI.ConstraintSet(), m._objective_cut_ci_sv, LT(UBD)) - else - MOI.set(m.relaxed_optimizer, MOI.ConstraintSet(), m._objective_cut_ci_sv, LT(UBD)) - end - - elseif obj_type === SCALAR_AFFINE - formulated_constant = wp._objective_saf.constant - wp._objective_saf.constant = 0.0 - if check_safe && is_safe_cut!(m, wp._objective_saf) - ci_saf = MOI.add_constraint(m.relaxed_optimizer, wp._objective_saf, LT(UBD - wp._objective_saf.constant + constraint_tol)) - push!(m._objective_cut_ci_saf, ci_saf) - end - wp._objective_saf.constant = formulated_constant - - elseif obj_type === SCALAR_QUADRATIC - buffered_sqf = wp._objective_sqf - finite_cut_generated = affine_relax_quadratic!(buffered_sqf.func, buffered_sqf.buffer, - buffered_sqf.saf, m._current_node, m._sol_to_branch_map, - m._current_xref) - - if finite_cut_generated - if !check_safe || is_safe_cut!(m, buffered_sqf.saf) - copyto!(wp._objective_saf.terms, buffered_sqf.saf.terms) - wp._objective_saf.constant = 0.0 - ci_saf = MOI.add_constraint(m.relaxed_optimizer, wp._objective_saf, LT(UBD - buffered_sqf.saf.constant + constraint_tol)) - push!(m._objective_cut_ci_saf, ci_saf) - end - end - - elseif obj_type === NONLINEAR - objective_cut_nonlinear!(m, wp, UBD, check_safe) - end - - m._new_eval_objective = false - end - - return nothing -end - -""" -$(TYPEDSIGNATURES) - -A routine that adds relaxations for all nonlinear constraints and quadratic constraints -corresponding to the current node to the relaxed problem. This adds an objective cut -(if specified by `objective_cut_on`) and then sets the `_new_eval_constraint` flag -to false indicating that an initial evaluation of the constraints has occurred. If -the `objective_cut_on` flag is `true` then the `_new_eval_objective` flag is also -set to `false` indicating that the objective expression was evaluated. -""" -function relax_all_constraints!(t::ExtensionType, m::Optimizer, q::Int64) - - check_safe = (q === 1) ? false : m._parameters.cut_safe_on - m._working_problem._relaxed_evaluator.is_first_eval = m._new_eval_constraint - - sqf_leq_list = m._working_problem._sqf_leq - for i = 1:m._working_problem._sqf_leq_count - sqf_leq = @inbounds sqf_leq_list[i] - relax!(m, sqf_leq, i, check_safe) - end - - sqf_eq_list = m._working_problem._sqf_eq - for i = 1:m._working_problem._sqf_eq_count - sqf_eq = @inbounds sqf_eq_list[i] - relax!(m, sqf_eq, i, check_safe) - end - - nl_list = m._working_problem._nonlinear_constr - for i = 1:m._working_problem._nonlinear_count - nl = @inbounds nl_list[i] - relax!(m, nl, i, check_safe) - end - - m._new_eval_constraint = false - - objective_cut!(m, check_safe) - - return nothing -end -relax_constraints!(t::ExtensionType, m::Optimizer, q::Int64) = relax_all_constraints!(t, m, q) -relax_constraints!(m::Optimizer, q::Int64) = relax_constraints!(m.ext_type, m, q) - -""" -$(FUNCTIONNAME) - -Deletes all nonlinear constraints added to the relaxed optimizer. -""" -function delete_nl_constraints!(m::Optimizer) - - # delete affine relaxations added from quadratic inequality - for ci in m._buffered_quadratic_ineq_ci - MOI.delete(m.relaxed_optimizer, ci) - end - empty!(m._buffered_quadratic_ineq_ci) - - # delete affine relaxations added from quadratic equality - for ci in m._buffered_quadratic_eq_ci - MOI.delete(m.relaxed_optimizer, ci) - end - empty!(m._buffered_quadratic_eq_ci) - - # delete affine relaxations added from nonlinear inequality - for ci in m._buffered_nonlinear_ci - MOI.delete(m.relaxed_optimizer, ci) - end - empty!(m._buffered_nonlinear_ci) - - return nothing -end - -""" -$(FUNCTIONNAME) - -Deletes all scalar-affine objective cuts added to the relaxed optimizer. -""" -function delete_objective_cuts!(m::Optimizer) - - for ci in m._objective_cut_ci_saf - MOI.delete(m.relaxed_optimizer, ci) - end - empty!(m._objective_cut_ci_saf) - - return nothing -end - -""" -$(FUNCTIONNAME) - -""" -function set_first_relax_point!(m::Optimizer) - - m._working_problem._relaxed_evaluator.is_first_eval = true - m._new_eval_constraint = true - m._new_eval_objective = true - - n = m._current_node - @__dot__ m._current_xref = 0.5*(n.upper_variable_bounds + n.lower_variable_bounds) - unsafe_check_fill!(isnan, m._current_xref, 0.0, length(m._current_xref)) - - return nothing -end diff --git a/src/eago_optimizer/subsolver_config/config.jl b/src/eago_optimizer/subsolver_config/config.jl deleted file mode 100644 index c38c1044..00000000 --- a/src/eago_optimizer/subsolver_config/config.jl +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/subsolver_config/config.jl -# Contains subroutines used to set default configuration for select supported -# solvers along with routines needed to adjust tolerances to mirror tolerance -# adjustments in the global solve. -############################################################################# - -function set_default_config_udf!(ext::ExtensionType, m::MOI.AbstractOptimizer) end - -include("config/cbc.jl") -include("config/clp.jl") -include("config/cosmo.jl") -include("config/cplex.jl") -include("config/ecos.jl") -include("config/glpk.jl") -include("config/gurobi.jl") -include("config/ipopt.jl") -include("config/knitro.jl") -include("config/mosek.jl") -include("config/scs.jl") -include("config/tulip.jl") - -function set_default_config!(ext::DefaultExt, m::T) where {T <: MOI.AbstractOptimizer} - solver_name = MOI.get(m, MOI.SolverName()) - if solver_name == "Ipopt" - set_default_config!(Val{:ipopt}(), ext, m) - elseif solver_name == "GLPK" - set_default_config!(Val{:glpk}(), ext, m) - elseif solver_name == "CPLEX" - set_default_config!(Val{:cplex}(), ext, m) - elseif solver_name == "Clp" - set_default_config!(Val{:clp}(), ext, m) - elseif solver_name == "COIN Branch-and-Cut (Cbc)" - set_default_config!(Val{:cbc}(), ext, m) - elseif solver_name == "SCS" - set_default_config!(Val{:scs}(), ext, m) - elseif solver_name == "Mosek" - set_default_config!(Val{:mosek}(), ext, m) - elseif solver_name == "ECOS" - set_default_config!(Val{:ecos}(), ext, m) - elseif solver_name == "COSMO" - set_default_config!(Val{:cosmo}(), ext, m) - elseif solver_name == "Tulip" - set_default_config!(Val{:tulip}(), ext, m) - elseif solver_name == "Gurobi" - set_default_config!(Val{:gurobi}(), ext, m) - elseif solver_name == "Knitro" - set_default_config!(Val{:knitro}(), ext, m) - else - set_default_config_udf!(ext, m) - end - return nothing -end - -function set_default_config!(ext::ExtensionType, m::T) where {T <: MOI.AbstractOptimizer} - set_default_config!(DefaultExt(), m::T) -end diff --git a/src/eago_optimizer/subsolver_config/config/cbc.jl b/src/eago_optimizer/subsolver_config/config/cbc.jl deleted file mode 100644 index c35d5e3a..00000000 --- a/src/eago_optimizer/subsolver_config/config/cbc.jl +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/subsolver_config/config/cbc.jl -# Configuration adjustment subroutines for Cbc. -############################################################################# - -function set_default_config!(::Val{:cbc}, ext::ExtensionType, m::T) where T <: MOI.AbstractOptimizer - - return nothing -end diff --git a/src/eago_optimizer/subsolver_config/config/clp.jl b/src/eago_optimizer/subsolver_config/config/clp.jl deleted file mode 100644 index cea18438..00000000 --- a/src/eago_optimizer/subsolver_config/config/clp.jl +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/subsolver_config/config/clp.jl -# Configuration adjustment subroutines for Clp. -############################################################################# - -function set_default_config!(::Val{:clp}, ext::ExtensionType, m::T) where T <: MOI.AbstractOptimizer - - MOI.set(m, MOI.RawParameter("PrimalTolerance"), 1E-7) - MOI.set(m, MOI.RawParameter("DualTolerance"), 1E-7) - MOI.set(m, MOI.RawParameter("DualObjectiveLimit"), 1e308) - MOI.set(m, MOI.RawParameter("MaximumIterations"), 2147483647) - MOI.set(m, MOI.RawParameter("PresolveType"), 0) - MOI.set(m, MOI.RawParameter("SolveType"), 5) - MOI.set(m, MOI.RawParameter("InfeasibleReturn"), 1) - MOI.set(m, MOI.RawParameter("Scaling"), 3) - MOI.set(m, MOI.RawParameter("Perturbation"), 100) - - return nothing -end diff --git a/src/eago_optimizer/subsolver_config/config/cosmo.jl b/src/eago_optimizer/subsolver_config/config/cosmo.jl deleted file mode 100644 index 72afc518..00000000 --- a/src/eago_optimizer/subsolver_config/config/cosmo.jl +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/subsolver_config/config/cosmo.jl -# Configuration adjustment subroutines for COSMO. -############################################################################# - -function set_default_config!(::Val{:cosmo}, ext::ExtensionType, m::T) where T <: MOI.AbstractOptimizer - - return nothing -end diff --git a/src/eago_optimizer/subsolver_config/config/cplex.jl b/src/eago_optimizer/subsolver_config/config/cplex.jl deleted file mode 100644 index fecdb7a0..00000000 --- a/src/eago_optimizer/subsolver_config/config/cplex.jl +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/subsolver_config/config/cplex.jl -# Configuration adjustment subroutines for CPLEX. -############################################################################# - -function set_default_config!(::Val{:cplex}, ext::ExtensionType, m::T) where {T <: MOI.AbstractOptimizer} - - return nothing -end diff --git a/src/eago_optimizer/subsolver_config/config/ecos.jl b/src/eago_optimizer/subsolver_config/config/ecos.jl deleted file mode 100644 index 2c41ed86..00000000 --- a/src/eago_optimizer/subsolver_config/config/ecos.jl +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/subsolver_config/config/ecos.jl -# Configuration adjustment subroutines for ECOS. -############################################################################# - -function set_default_config!(::Val{:ecos}, ext::ExtensionType, m::T) where {T <: MOI.AbstractOptimizer} - - return nothing -end diff --git a/src/eago_optimizer/subsolver_config/config/glpk.jl b/src/eago_optimizer/subsolver_config/config/glpk.jl deleted file mode 100644 index 93a1f721..00000000 --- a/src/eago_optimizer/subsolver_config/config/glpk.jl +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/subsolver_config/config/glpk.jl -# Configuration adjustment subroutines for GLPK. -############################################################################# - -function set_default_config!(::Val{:glpk}, ext::ExtensionType, m::T) where {T <: MOI.AbstractOptimizer} - - return nothing -end diff --git a/src/eago_optimizer/subsolver_config/config/gurobi.jl b/src/eago_optimizer/subsolver_config/config/gurobi.jl deleted file mode 100644 index 0bb15ed9..00000000 --- a/src/eago_optimizer/subsolver_config/config/gurobi.jl +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/subsolver_config/config/gurobi.jl -# Configuration adjustment subroutines for Gurobi. -############################################################################# - -function set_default_config!(::Val{:gurobi}, ext::ExtensionType, m::T) where {T <: MOI.AbstractOptimizer} - - return nothing -end diff --git a/src/eago_optimizer/subsolver_config/config/ipopt.jl b/src/eago_optimizer/subsolver_config/config/ipopt.jl deleted file mode 100644 index 5eafcc27..00000000 --- a/src/eago_optimizer/subsolver_config/config/ipopt.jl +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/subsolver_config/config/ipopt.jl -# Configuration adjustment subroutines for Ipopt. -############################################################################# - -function set_default_config!(::Val{:ipopt}, ext::ExtensionType, m::T) where {T <: MOI.AbstractOptimizer} - - MOI.set(m, MOI.RawParameter("max_iter"),3000) - MOI.set(m, MOI.RawParameter("acceptable_tol"), 1E30) - MOI.set(m, MOI.RawParameter("acceptable_iter"), 300) - MOI.set(m, MOI.RawParameter("constr_viol_tol"), 0.000001) - MOI.set(m, MOI.RawParameter("acceptable_compl_inf_tol"), 0.000001) - MOI.set(m, MOI.RawParameter("acceptable_dual_inf_tol"), 1.0) - MOI.set(m, MOI.RawParameter("acceptable_constr_viol_tol"), 0.000001) - MOI.set(m, MOI.RawParameter("print_level"), 0) - - return nothing -end diff --git a/src/eago_optimizer/subsolver_config/config/knitro.jl b/src/eago_optimizer/subsolver_config/config/knitro.jl deleted file mode 100644 index 707853d9..00000000 --- a/src/eago_optimizer/subsolver_config/config/knitro.jl +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/subsolver_config/config/knitro.jl -# Configuration adjustment subroutines for KNITRO. -############################################################################# - -function set_default_config!(::Val{:knitro}, ext::ExtensionType, m::T) where {T <: MOI.AbstractOptimizer} - - return nothing -end diff --git a/src/eago_optimizer/subsolver_config/config/mosek.jl b/src/eago_optimizer/subsolver_config/config/mosek.jl deleted file mode 100644 index 7da44732..00000000 --- a/src/eago_optimizer/subsolver_config/config/mosek.jl +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/subsolver_config/config/mosek.jl -# Configuration adjustment subroutines for Mosek. -############################################################################# - -function set_default_config!(::Val{:mosek}, ext::ExtensionType, m::T) where {T <: MOI.AbstractOptimizer} - - return nothing -end diff --git a/src/eago_optimizer/subsolver_config/config/scs.jl b/src/eago_optimizer/subsolver_config/config/scs.jl deleted file mode 100644 index c557d16a..00000000 --- a/src/eago_optimizer/subsolver_config/config/scs.jl +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/subsolver_config/config/scs.jl -# Configuration adjustment subroutines for SCS. -############################################################################# - -function set_default_config!(::Val{:scs}, ext::ExtensionType, m::T) where {T <: MOI.AbstractOptimizer} - - return nothing -end diff --git a/src/eago_optimizer/subsolver_config/config/tulip.jl b/src/eago_optimizer/subsolver_config/config/tulip.jl deleted file mode 100644 index 0d3bccca..00000000 --- a/src/eago_optimizer/subsolver_config/config/tulip.jl +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/subsolver_config/config/tulip.jl -# Configuration adjustment subroutines for Tulip. -############################################################################# - -function set_default_config!(::Val{:tulip}, ext::ExtensionType, m::T) where {T <: MOI.AbstractOptimizer} - - return nothing -end diff --git a/src/eago_optimizer/types/extension.jl b/src/eago_optimizer/types/extension.jl new file mode 100644 index 00000000..446cb5c2 --- /dev/null +++ b/src/eago_optimizer/types/extension.jl @@ -0,0 +1,11 @@ +""" +$(TYPEDEF) + +An abstract type the subtypes of which are associated with functions method +overloaded for for new extensions. An instance of the `DefaultExt <:ExtensionType` +structure to the `Optimizer` in the `ext_type` field. +""" +abstract type ExtensionType end +struct DefaultExt <: ExtensionType end +MOIU.map_indices(::Function, x::ExtensionType) = x +MOIU.map_indices(::Function, x::DefaultExt) = x diff --git a/src/eago_optimizer/types/global_optimizer.jl b/src/eago_optimizer/types/global_optimizer.jl new file mode 100644 index 00000000..09079770 --- /dev/null +++ b/src/eago_optimizer/types/global_optimizer.jl @@ -0,0 +1,769 @@ +@enum(BranchCost, BC_INFEASIBLE, BC_INTERVAL, BC_INTERVAL_REV, BC_INTERVAL_LP, BC_INTERVAL_LP_REV) + +Base.@kwdef mutable struct BranchCostStorage{T<:Real} + cost::BranchCost = BC_INTERVAL + 𝛹n::Vector{T} = T[] + 𝛹p::Vector{T} = T[] + δn::Vector{T} = T[] + δp::Vector{T} = T[] + ηn::Vector{T} = T[] + ηp::Vector{T} = T[] + μ1::T = 0.1 + μ2::T = 1.3 + μ3::T = 0.8 + β::T = 0.05 + μ_score::T = 0.15 +end +function initialize!(d::BranchCostStorage{T}, n::Int) where T <:AbstractFloat + append!(d.𝛹n, ones(T,n)); append!(d.𝛹p, ones(T,n)) + append!(d.δn, zeros(T,n)); append!(d.δp, zeros(T,n)) + append!(d.ηn, zeros(T,n)); append!(d.ηp, zeros(T,n)) + return +end + +#= +LP -> COPY TO RELAXED SOLVER AND SOLVE +MILP -> COPY TO RELAXED SOLVER AND SOLVE +SOCP -> COPY TO RELAXED SOLVER AND SOLVE +MISOCP -> COPY TO RELAXED SOLVER AND SOLVE +DIFF_CVX -> COPY TO NLP SOLVER AND SOLVE (POTENTIAL MULTISTART) +NS_CVX -> COPY TO NLP SOLVER AND SOLVE (POTENTIAL MULTISTART) +DIFF_NCVX -> APPLY GLOBAL SOLVER (UNLESS USER REQUEST LOCAL SOLVE THEN NLP) +NS_NCVX -> APPLY GLOBAL SOLVER (UNLESS USER REQUEST LOCAL SOLVE THEN NLP) +MINCVX -> APPLY GLOBAL SOLVER (LOCAL SOLVE OPTION FUTURE FEATURE) +=# + +abstract type AbstractProblemType end +struct LP <: AbstractProblemType end +struct MILP <: AbstractProblemType end +struct SOCP <: AbstractProblemType end +struct MISOCP <: AbstractProblemType end +struct DIFF_CVX <: AbstractProblemType end +struct MINCVX <: AbstractProblemType end + +const ANY_PROBLEM_TYPE = Union{Nothing, LP, MILP, SOCP, MISOCP, DIFF_CVX, MINCVX} + +@enum(GlobalEndState, GS_OPTIMAL, GS_INFEASIBLE, GS_NODE_LIMIT, + GS_ITERATION_LIMIT, GS_RELATIVE_TOL, + GS_ABSOLUTE_TOL, GS_TIME_LIMIT, GS_UNSET) + +export EAGOParameters +""" +$(TYPEDEF) + +Storage for parameters that do not change during a global solve. + +$(TYPEDFIELDS) +""" +Base.@kwdef mutable struct EAGOParameters + + # Presolving options + "Should EAGO attempt to remove type assert issues for user-defined functions (default = false)" + presolve_scrubber_flag::Bool = false + "Create and use DAG representations of user-defined function (default = false)." + presolve_to_JuMP_flag::Bool = false + "Rerranges the DAG using registered transformations (default = false)" + presolve_flatten_flag::Bool = false + + # Conic reformulations + "Attempt to bridge convex constraint to second order cone" + conic_convert_quadratic::Bool = false + + # Iteration logging options + "Turns logging on records global bounds, node count and run time. Additional + options are available for recording information specific to subproblems (default = false)." + log_on::Bool = false + "Turns on logging of times and feasibility of subproblems (default = false)" + log_subproblem_info::Bool = false + "Log data every `log_interval` iterations (default = 1)." + log_interval::Int = 1 + + # Optimizer display options + "The amount of information that should be printed to console while solving + values range from 0 - 4: 0 is silent, 1 shows iteration summary statistics + only, 2-4 show varying degrees of details about calculations within each + iteration (default = 1)." + verbosity::Int = 1 + "Display summary of iteration to console every `output_iterations` (default = 1000)" + output_iterations::Int = 1000 + "Display header for summary to console every `output_iterations` (default = 10000)" + header_iterations::Int = 100000 + + # Node branching options + "Convex coefficient used to select branch point. Branch point is given by + `branch_cvx_factor*xmid + (1-branch_cvx_factor)*xsol` (default = 0.5)" + branch_cvx_factor::Float64 = 0.5 + "Minimum distance from bound to have branch point normalized by width of + dimension to branch on (default = 0.2)" + branch_offset::Float64 = 0.2 + "Indicates that pseudocost branching should be used" + branch_pseudocost_on::Bool = false + "Variables to branch on (default is all nonlinear)." + branch_variable::Vector{Bool} = Bool[] + "[FUTURE FEATURE, NOT CURRENTLY IMPLEMENTED] Number of times repeat node + processing priorto branching (default = 4)." + branch_max_repetitions::Int = 4 + "[FUTURE FEATURE, NOT CURRENTLY IMPLEMENTED] Volume ratio tolerance required + to repeat processing the current node (default = 0.9)" + branch_repetition_tol::Float64 = 0.9 + + # Termination limits + "Maximum number of nodes (default = 1E-7)" + node_limit::Int = 1*10^7 + "Maximum CPU time in seconds (default = 1000)" + time_limit::Float64 = 100.0 + "Maximum number of iterations (default 3E6)" + iteration_limit::Int = 1E9 #2*10^5 + "Absolute tolerance for termination (default = 1E-3)" + absolute_tolerance::Float64 = 1E-4 + "Relative tolerance for termination (default = 1E-3)" + relative_tolerance::Float64 = 1E-4 + "Absolute constraint feasibility tolerance" + absolute_constraint_feas_tolerance::Float64 = 1E-8 + + # Options for constraint propagation + "Depth in B&B tree above which constraint propagation should be disabled (default = 1000)" + cp_depth::Int = 0 + "Number of times to repeat forward-reverse pass routine (default = 3)" + cp_repetitions::Int = 0 + "Disable constraint propagation if the ratio of new node volume to beginning node volume exceeds + this number (default = 0.99)" + cp_tolerance::Float64 = 0.99 + "Use only valid interval bounds during constraint propagation (default = false)" + cp_interval_only::Bool = false + + # obbt options + "Depth in B&B tree above which OBBT should be disabled (default = 6)" + obbt_depth::Int = 0 + "Number of repetitions of OBBT to perform in preprocessing (default = 3)" + obbt_repetitions::Int = 1 + "Turn aggresive OBBT on (default = false)" + obbt_aggressive_on::Bool = true + "Maximum iteration to perform aggresive OBBT (default = 2)" + obbt_aggressive_max_iteration::Int = 2 + "Minimum dimension to perform aggresive OBBT (default = 2)" + obbt_aggressive_min_dimension::Int = 2 + "Tolerance to consider bounds equal (default = 1E-10)" + obbt_tolerance::Float64 = 1E-10 + + # Options for linear bound tightening + "Depth in B&B tree above which linear FBBT should be disabled (default = 1000)" + fbbt_lp_depth::Int = 1000 + "Number of repetitions of linear FBBT to perform in preprocessing (default = 3)" + fbbt_lp_repetitions::Int = 3 + + # Duality-based bound tightening (DBBT) options + "Depth in B&B tree above which duality-based bound tightening should be disabled (default = 1E10)" + dbbt_depth::Int = 10^10 + "New bound is considered equal to the prior bound if within dbbt_tolerance (default = 1E-9)." + dbbt_tolerance::Float64 = 1E-8 + + # Subgradient tightening flag + "Relax Tag used to specify type of McCormick operator" + relax_tag::RelaxTag = NS() + "Perform tightening of interval bounds using subgradients at each factor in + each nonlinear tape during a forward pass (default = true)." + subgrad_tighten::Bool = true + "Perform tightening of interval bounds using subgradients at each factor in + each nonlinear tape during a reverse pass (default = false)." + reverse_subgrad_tighten::Bool = false + "Outer round computed subgradient bounds by this amount" + subgrad_tol::Float64 = 1E-10 + mul_relax_style::Int = 0 + + # Tolerance to add cuts and max number of cuts + "Minimum number of cuts at each node to attempt (unsafe cuts not necessarily added)" + cut_min_iterations::Int = 2 + "Maximum number of cuts at each node to attempt" + cut_max_iterations::Int = 8 + "Absolute tolerance checked for continuing cut" + cut_tolerance_abs::Float64 = 1E-6 + "Relative tolerance checked for continuing cut" + cut_tolerance_rel::Float64 = 1E-2 + + "Use tolerances to determine safe cuts in a Khajavirad 2018 manner" + cut_safe_on::Bool = true + "Lower tolerance for safe-lp cut, Khajavirad 2018" + cut_safe_l::Float64 = 1E-7 + "Upper tolerance for safe-lp cut, Khajavirad 2018" + cut_safe_u::Float64 = 1E7 + "Constant tolerance for safe-lp cut, Khajavirad 2018" + cut_safe_b::Float64 = 1E9 + + "Solve upper problem for every node with depth less than `upper_bounding_depth` + and with a probabilityof (1/2)^(depth-upper_bounding_depth) otherwise (default = 8)" + upper_bounding_depth::Int = 8 + + # handling for domain violations + "Amount about a domain violation to ignore when propagating bounds." + domain_violation_guard_on::Bool = false + "Amount about a domain violation to ignore when propagating bounds." + domain_violation_ϵ::Float64 = 1E-9 + + "If true, then EAGO forgos its default configuration process for subsolvers" + user_solver_config::Bool = false + + integer_abs_tol::Float64 = 1E-9 + integer_rel_tol::Float64 = 1E-9 +end +const EAGO_PARAMETERS = fieldnames(EAGOParameters) + +""" +$(TYPEDEF) + +A structure used to hold objectives and constraints added to EAGO model. +The constraints generally aren't used for relaxations. +""" +Base.@kwdef mutable struct InputProblem + + # variables (set by MOI.add_variable in variables.jl) + #_variable_info::Vector{VariableInfo{Float64}} = VariableInfo{Float64}[] + _variable_count::Int = 0 + _constraint_count::Int = 0 + + # constraint index to function and set storage + _vi_leq_constraints::Dict{CI{VI,LT}, Tuple{VI,LT}} = Dict{CI{VI,LT}, Tuple{VI,LT}}() + _vi_geq_constraints::Dict{CI{VI,GT}, Tuple{VI,GT}} = Dict{CI{VI,GT}, Tuple{VI,GT}}() + _vi_eq_constraints::Dict{CI{VI,ET}, Tuple{VI,ET}} = Dict{CI{VI,ET}, Tuple{VI,ET}}() + _vi_it_constraints::Dict{CI{VI,IT}, Tuple{VI,IT}} = Dict{CI{VI,IT}, Tuple{VI,IT}}() + _vi_zo_constraints::Dict{CI{VI,ZO}, Tuple{VI,ZO}} = Dict{CI{VI,ZO}, Tuple{VI,ZO}}() + _vi_int_constraints::Dict{CI{VI,MOI.Integer}, Tuple{VI,MOI.Integer}} = Dict{CI{VI,MOI.Integer}, Tuple{VI,MOI.Integer}}() + + _linear_leq_constraints::Dict{CI{SAF,LT}, Tuple{SAF,LT}} = Dict{CI{SAF,LT}, Tuple{SAF,LT}}() + _linear_geq_constraints::Dict{CI{SAF,GT}, Tuple{SAF,GT}} = Dict{CI{SAF,GT}, Tuple{SAF,GT}}() + _linear_eq_constraints::Dict{CI{SAF,ET}, Tuple{SAF,ET}} = Dict{CI{SAF,ET}, Tuple{SAF,ET}}() + + _quadratic_leq_constraints::Dict{CI{SQF,LT}, Tuple{SQF,LT}} = Dict{CI{SQF,LT}, Tuple{SQF,LT}}() + _quadratic_geq_constraints::Dict{CI{SQF,GT}, Tuple{SQF,GT}} = Dict{CI{SQF,GT}, Tuple{SQF,GT}}() + _quadratic_eq_constraints::Dict{CI{SQF,ET}, Tuple{SQF,ET}} = Dict{CI{SQF,ET}, Tuple{SQF,ET}}() + + _conic_second_order::Dict{CI{VECOFVAR,SOC}, Tuple{VECOFVAR,SOC}} = Dict{CI{VECOFVAR,SOC}, Tuple{VECOFVAR,SOC}}() + + # primal storage + _linear_leq_primal::Dict{CI{SAF,LT},Float64} = Dict{CI{SAF,LT},Float64}() + _linear_geq_primal::Dict{CI{SAF,GT},Float64} = Dict{CI{SAF,GT},Float64}() + _linear_eq_primal::Dict{CI{SAF,ET},Float64} = Dict{CI{SAF,ET},Float64}() + + _quadratic_leq_primal::Dict{CI{SQF,LT},Float64} = Dict{CI{SQF,LT},Float64}() + _quadratic_geq_primal::Dict{CI{SQF,GT},Float64} = Dict{CI{SQF,GT},Float64}() + _quadratic_eq_primal::Dict{CI{SQF,ET},Float64} = Dict{CI{SQF,ET},Float64}() + + _linear_leq_prob_to_ip::Dict{CI{SAF,LT},CI{SAF,LT}} = Dict{CI{SAF,LT},CI{SAF,LT}}() + _linear_geq_prob_to_ip::Dict{CI{SAF,GT},CI{SAF,GT}} = Dict{CI{SAF,GT},CI{SAF,GT}}() + _linear_eq_prob_to_ip::Dict{CI{SAF,ET},CI{SAF,ET}} = Dict{CI{SAF,ET},CI{SAF,ET}}() + + _quadratic_leq_prob_to_ip::Dict{CI{SQF,LT},CI{SQF,LT}} = Dict{CI{SQF,LT},CI{SQF,LT}}() + _quadratic_geq_prob_to_ip::Dict{CI{SQF,GT},CI{SQF,GT}} = Dict{CI{SQF,GT},CI{SQF,GT}}() + _quadratic_eq_prob_to_ip::Dict{CI{SQF,ET},CI{SQF,ET}} = Dict{CI{SQF,ET},CI{SQF,ET}}() + + # nonlinear constraint storage + _objective::Union{VI,SAF,SQF,Nothing} = nothing + + # nlp constraints (set by MOI.set(m, ::NLPBlockData...) in optimizer.jl) + _nlp_data::Union{MOI.NLPBlockData,Nothing} = nothing + + # objective sense information (set by MOI.set(m, ::ObjectiveSense...) in optimizer.jl) + _optimization_sense::MOI.OptimizationSense = MOI.MIN_SENSE +end + +function MOI.empty!(ip::InputProblem) + + for field in fieldnames(InputProblem) + field_value = getfield(ip, field) + if (field_value isa Array) || (field_value isa Dict) + empty!(field_value) + end + end + + ip._variable_count = 0 + ip._constraint_count = 0 + ip._objective = nothing + ip._nlp_data = nothing + ip._optimization_sense = MOI.MIN_SENSE + return +end + +function Base.isempty(x::InputProblem) + + is_empty_flag = true + new_input_problem = InputProblem() + + for field in fieldnames(InputProblem) + field_value = getfield(x, field) + if (field_value isa Array) || (field_value isa Dict) + if !isempty(field_value) + is_empty_flag = false + break + end + elseif field_value isa Number + if getfield(new_input_problem, field) != field_value + is_empty_flag = false + break + end + end + end + is_empty_flag &= isnothing(x._nlp_data) + is_empty_flag &= isnothing(x._objective) + return is_empty_flag +end + +_constraints(m::InputProblem, ::Type{VI}, ::Type{LT}) = m._vi_leq_constraints +_constraints(m::InputProblem, ::Type{VI}, ::Type{GT}) = m._vi_geq_constraints +_constraints(m::InputProblem, ::Type{VI}, ::Type{ET}) = m._vi_eq_constraints +_constraints(m::InputProblem, ::Type{VI}, ::Type{IT}) = m._vi_it_constraints +_constraints(m::InputProblem, ::Type{VI}, ::Type{ZO}) = m._vi_zo_constraints +_constraints(m::InputProblem, ::Type{VI}, ::Type{MOI.Integer}) = m._vi_int_constraints + +_constraints(m::InputProblem, ::Type{SAF}, ::Type{LT}) = m._linear_leq_constraints +_constraints(m::InputProblem, ::Type{SAF}, ::Type{GT}) = m._linear_geq_constraints +_constraints(m::InputProblem, ::Type{SAF}, ::Type{ET}) = m._linear_eq_constraints + +_constraints(m::InputProblem, ::Type{SQF}, ::Type{LT}) = m._quadratic_leq_constraints +_constraints(m::InputProblem, ::Type{SQF}, ::Type{GT}) = m._quadratic_geq_constraints +_constraints(m::InputProblem, ::Type{SQF}, ::Type{ET}) = m._quadratic_eq_constraints + +_constraint_primal(m::InputProblem, ::Type{SAF}, ::Type{LT}) = m._linear_leq_primal +_constraint_primal(m::InputProblem, ::Type{SAF}, ::Type{GT}) = m._linear_geq_primal +_constraint_primal(m::InputProblem, ::Type{SAF}, ::Type{ET}) = m._linear_eq_primal + +_constraint_primal(m::InputProblem, ::Type{SQF}, ::Type{LT}) = m._quadratic_leq_primal +_constraint_primal(m::InputProblem, ::Type{SQF}, ::Type{GT}) = m._quadratic_geq_primal +_constraint_primal(m::InputProblem, ::Type{SQF}, ::Type{ET}) = m._quadratic_eq_primal + +""" +Extracts primal constraint value from local problem and saves result to appropriate +field of the global optimizer. +""" +function _extract_primal!(d, m::InputProblem, ::Type{F}, ::Type{S}) where {F,S} + for (k, v) in _constraint_index_to_ip(m, F, S) + _constraint_primal(m, F, S)[v] = MOI.get(d, MOI.ConstraintPrimal(), k) + end + return nothing +end + +function _extract_primal_linear!(d, ip::InputProblem) + _extract_primal!(d, ip, SAF, LT) + _extract_primal!(d, ip, SAF, GT) + _extract_primal!(d, ip, SAF, ET) +end +function _extract_primal_quadratic!(d, ip::InputProblem) + _extract_primal!(d, ip, SQF, LT) + _extract_primal!(d, ip, SQF, GT) + _extract_primal!(d, ip, SQF, ET) +end + +_constraint_index_to_ip(m::InputProblem, ::Type{SAF}, ::Type{LT}) = m._linear_leq_prob_to_ip +_constraint_index_to_ip(m::InputProblem, ::Type{SAF}, ::Type{GT}) = m._linear_geq_prob_to_ip +_constraint_index_to_ip(m::InputProblem, ::Type{SAF}, ::Type{ET}) = m._linear_eq_prob_to_ip + +_constraint_index_to_ip(m::InputProblem, ::Type{SQF}, ::Type{LT}) = m._quadratic_leq_prob_to_ip +_constraint_index_to_ip(m::InputProblem, ::Type{SQF}, ::Type{GT}) = m._quadratic_geq_prob_to_ip +_constraint_index_to_ip(m::InputProblem, ::Type{SQF}, ::Type{ET}) = m._quadratic_eq_prob_to_ip + +""" +Adds a constraint to the local problem storing the new constraint index and the associated +index in the input problem. +""" +function _add_constraint_store_ci!(d, m::InputProblem, ::Type{F}, ::Type{S}) where {F,S} + for (ci_ip, fs) in _constraints(m, F, S) + ci_wp = MOI.add_constraint(d, fs[1], fs[2]) + _constraint_index_to_ip(m, F, S)[ci_wp] = ci_ip + end + return nothing +end + +function _add_constraint_store_ci_linear!(d, ip::InputProblem) + _add_constraint_store_ci!(d, ip, SAF, LT) + _add_constraint_store_ci!(d, ip, SAF, GT) + _add_constraint_store_ci!(d, ip, SAF, ET) +end +function _add_constraint_store_ci_quadratic!(d, ip::InputProblem) + _add_constraint_store_ci!(d, ip, SQF, LT) + _add_constraint_store_ci!(d, ip, SQF, GT) + _add_constraint_store_ci!(d, ip, SQF, ET) +end + +""" +$(TYPEDEF) + +A structure used to expressions and problem descriptions EAGO uses to formulate +relaxed problems. +""" +Base.@kwdef mutable struct ParsedProblem + + # Problem classification (set in parse_classify_problem!) + _problem_type::ANY_PROBLEM_TYPE = nothing + + "_objective_saf stores the objective and is used for constructing linear affine cuts" + _objective_saf::SAF = SAF(SAT[], 0.0) + _objective::Union{VI,AffineFunctionIneq,BufferedQuadraticIneq,BufferedNonlinearFunction,Nothing} = nothing + + # objective sense information (set by convert_to_min in parse.jl) + _optimization_sense::MOI.OptimizationSense = MOI.MIN_SENSE + + # non-single variable constraints (set in initial_parse) + _saf_leq::Vector{AffineFunctionIneq} = AffineFunctionIneq[] + _saf_eq::Vector{AffineFunctionEq} = AffineFunctionEq[] + _sqf_leq::Vector{BufferedQuadraticIneq} = BufferedQuadraticIneq[] + _sqf_eq::Vector{BufferedQuadraticEq} = BufferedQuadraticEq[] + _conic_second_order::Vector{BufferedSOC} = BufferedSOC[] + + # nlp constraints + _nlp_data::Union{MOI.NLPBlockData,Nothing} = nothing + _nonlinear_constr::Vector{BufferedNonlinearFunction} = BufferedNonlinearFunction[] + _relaxed_evaluator::Evaluator = Evaluator() + + # variables (set in initial_parse) + _variable_info::Vector{VariableInfo{Float64}} = VariableInfo{Float64}[] + _variable_count::Int = 0 +end + +function MOI.empty!(x::ParsedProblem) + + for field in fieldnames(ParsedProblem) + field_value = getfield(x, field) + if (field_value isa Array) || (field_value isa Dict) + empty!(field_value) + end + end + + x._objective = nothing + x._problem_type = nothing + x._nlp_data = nothing + + x._optimization_sense = MOI.MIN_SENSE + x._relaxed_evaluator = Evaluator() + x._objective_saf = SAF(SAT[], 0.0) + x._variable_count = 0 + return +end + +function Base.isempty(x::ParsedProblem) + + is_empty_flag = true + + new_input_problem = ParsedProblem() + for field in fieldnames(ParsedProblem) + + field_value = getfield(x, field) + if field_value isa Array + if !isempty(field_value) + is_empty_flag = false + break + end + + elseif field_value isa Number + if getfield(new_input_problem, field) != field_value + is_empty_flag = false + break + end + end + end + + is_empty_flag &= isempty(x._objective_saf.terms) + is_empty_flag &= iszero(x._objective_saf.constant) + is_empty_flag &= isnothing(x._objective) + is_empty_flag &= isnothing(x._nlp_data) + + return is_empty_flag +end + +Base.@kwdef mutable struct GlobalOptimizer{R,Q,S<:ExtensionType} <: MOI.AbstractOptimizer + + _subsolvers::SubSolvers{R,Q,S} = SubSolvers{R,Q,S}() + _parameters::EAGOParameters = EAGOParameters() + _input_problem::InputProblem = InputProblem() + _working_problem::ParsedProblem = ParsedProblem() + _auxillary_variable_info::Union{Nothing,_AuxVarData} = nothing + + obbt_variable_values::Vector{Bool} = Bool[] + + enable_optimize_hook::Bool = false + ext + + # set as user-specified option + _end_state::GlobalEndState = GS_UNSET + _termination_status_code::MOI.TerminationStatusCode = MOI.OPTIMIZE_NOT_CALLED + _result_status_code::MOI.ResultStatusCode = MOI.OTHER_RESULT_STATUS + _obj_mult::Float64 = 1.0 + _obj_var_slack_added::Bool = false + + _stack::BinaryMinMaxHeap{NodeBB} = BinaryMinMaxHeap{NodeBB}() + _current_node::NodeBB = NodeBB() + + _first_relax_point_set::Bool = false + _current_xref::Vector{Float64} = Float64[] + _candidate_xref::Vector{Float64} = Float64[] + + _use_prior_objective_xref::Bool = false + _current_objective_xref::Vector{Float64} = Float64[] + _prior_objective_xref::Vector{Float64} = Float64[] + + _user_branch_variables::Bool = false + _fixed_variable::Vector{Bool} = Bool[] + _branch_variable_count::Int = 0 + _branch_to_sol_map::Vector{Int} = Int[] + _sol_to_branch_map::Vector{Int} = Int[] + + _continuous_solution::Vector{Float64} = Float64[] + + _preprocess_feasibility::Bool = true + _preprocess_primal_status::MOI.ResultStatusCode = MOI.OTHER_RESULT_STATUS + _preprocess_dual_status::MOI.ResultStatusCode = MOI.OTHER_RESULT_STATUS + _preprocess_termination_status::MOI.TerminationStatusCode = MOI.OPTIMIZE_NOT_CALLED + + _lower_primal_status::MOI.ResultStatusCode = MOI.OTHER_RESULT_STATUS + _lower_dual_status::MOI.ResultStatusCode = MOI.OTHER_RESULT_STATUS + _lower_termination_status::MOI.TerminationStatusCode = MOI.OPTIMIZE_NOT_CALLED + _lower_feasibility::Bool = true + _lower_objective_value::Float64 = -Inf + _lower_solution::Vector{Float64} = Float64[] + _lower_lvd::Vector{Float64} = Float64[] + _lower_uvd::Vector{Float64} = Float64[] + + _last_cut_objective::Float64 = -Inf + + _upper_result_status::MOI.ResultStatusCode = MOI.OTHER_RESULT_STATUS + _upper_termination_status::MOI.TerminationStatusCode = MOI.OPTIMIZE_NOT_CALLED + _upper_feasibility::Bool = true + _upper_objective_value::Float64 = Inf + _upper_variables::Vector{VI} = VI[] + _upper_solution::Vector{Float64} = Float64[] + + _postprocess_feasibility::Bool = true + + # set to time limit in initial_parse! in parse.jl, decremented throughout global_solve in optimize_nonconvex.jl + _time_left::Float64 = 1000.0 + + # set constructor reset on empty! and to zero in initial parse! in parse.jl + _start_time::Float64 = 0.0 + _run_time::Float64 = 0.0 + _parse_time::Float64 = 0.0 + _presolve_time::Float64 = 0.0 + _last_preprocess_time::Float64 = 0.0 + _last_lower_problem_time::Float64 = 0.0 + _last_upper_problem_time::Float64 = 0.0 + _last_postprocessing_time::Float64 = 0.0 + + # reset in initial_parse! in parse.jl + _min_converged_value::Float64 = Inf + _global_lower_bound::Float64 = -Inf + _global_upper_bound::Float64 = Inf + _maximum_node_id::Int = 0 + _iteration_count::Int = 0 + _node_count::Int = 0 + + # Storage for output, reset in initial_parse! in parse.jl + _solution_value::Float64 = 0.0 + _feasible_solution_found::Bool = false + _first_solution_node::Int = -1 + _best_upper_value::Float64 = Inf + + # Optimality-Based Bound Tightening (OBBT) Options + _obbt_working_lower_index::Vector{Bool} = Bool[] + _obbt_working_upper_index::Vector{Bool} = Bool[] + _lower_indx_diff::Vector{Bool} = Bool[] + _upper_indx_diff::Vector{Bool} = Bool[] + _old_low_index::Vector{Bool} = Bool[] + _old_upp_index::Vector{Bool} = Bool[] + _new_low_index::Vector{Bool} = Bool[] + _new_upp_index::Vector{Bool} = Bool[] + _obbt_variables::Vector{VI} = VI[] + _obbt_variable_count::Int = 0 + _obbt_performed_flag::Bool = false + + # Buffers for fbbt, set in presolve, used in preprocess + _lower_fbbt_buffer::Vector{Float64} = Float64[] + _upper_fbbt_buffer::Vector{Float64} = Float64[] + _cp_improvement::Float64 = 0.0 + _cp_evaluation_reverse::Bool = false + + _cut_iterations::Int = 0 + _cut_add_flag::Bool = false + _node_repetitions::Int = 0 + + _log::Log = Log() + + _affine_relax_ci::Vector{CI{SAF,LT}} = CI{SAF,LT}[] + _affine_objective_cut_ci::Union{CI{VI,LT},CI{SAF,LT},Nothing} = nothing + + _relaxed_variable_number::Int = 0 + _relaxed_variable_index::Vector{VI} = VI[] + _relaxed_variable_et::Vector{Tuple{CI{VI, ET}, Int}} = Tuple{CI{VI, ET}, Int}[] + _relaxed_variable_lt::Vector{Tuple{CI{VI, LT}, Int}} = Tuple{CI{VI, LT}, Int}[] + _relaxed_variable_gt::Vector{Tuple{CI{VI, GT}, Int}} = Tuple{CI{VI, GT}, Int}[] + _relaxed_variable_integer::Vector{CI{VI, MOI.Integer}} = CI{VI, MOI.Integer}[] + + _branch_variables::Vector{Bool} = Bool[] + _nonbranching_int::Bool = false + + _new_eval_constraint::Bool = false + _new_eval_objective::Bool = false + + _node_to_sv_leq_ci::Dict{Int,CI{VI,LT}} = Dict{Int,CI{VI,LT}}() + _node_to_sv_geq_ci::Dict{Int,CI{VI,GT}} = Dict{Int,CI{VI,GT}}() + _nonlinear_evaluator_created::Bool = false + + _branch_cost::BranchCostStorage{Float64} = BranchCostStorage{Float64}() + _branch_variable_sparsity::SparseMatrixCSC{Bool,Int} = spzeros(Bool,1,1) + _constraint_infeasiblity::Vector{Float64} = Float64[] +end + +const EAGO_OPTIMIZER_ATTRIBUTES = Symbol[:relaxed_optimizer, :upper_optimizer, + :enable_optimize_hook, :ext, :_parameters] +const EAGO_MODEL_STRUCT_ATTRIBUTES = Symbol[:_stack, :_log, :_current_node, :_working_problem, :_input_problem, :_branch_cost] +const EAGO_MODEL_NOT_STRUCT_ATTRIBUTES = setdiff(fieldnames(GlobalOptimizer), union(EAGO_OPTIMIZER_ATTRIBUTES, EAGO_MODEL_STRUCT_ATTRIBUTES)) + +function MOI.empty!(m::GlobalOptimizer{R,S,Q}) where {R,S,Q} + + # create a new empty optimizer and copy fields to m + new_optimizer = GlobalOptimizer{R,S,Q}(_subsolvers = m._subsolvers, + _parameters = m._parameters, + _input_problem = m._input_problem, + _working_problem = m._working_problem, + ext = nothing) + + MOI.empty!(new_optimizer._subsolvers) + MOI.empty!(new_optimizer._input_problem) + MOI.empty!(new_optimizer._working_problem) + for field in union(EAGO_MODEL_STRUCT_ATTRIBUTES, EAGO_MODEL_NOT_STRUCT_ATTRIBUTES) + setfield!(m, field, getfield(new_optimizer, field)) + end + + return nothing +end + +function MOI.is_empty(m::GlobalOptimizer{R,S,Q}) where {R,S,Q} + + is_empty_flag = uninitialized(_current_node(m)) + is_empty_flag &= isempty(m._stack) + is_empty_flag &= isempty(m._log) + is_empty_flag &= isempty(m._input_problem) + is_empty_flag &= isempty(m._working_problem) + + new_optimizer = GlobalOptimizer{R,S,Q}(_subsolvers = m._subsolvers, + _parameters = m._parameters, + _input_problem = m._input_problem, + _working_problem = m._working_problem, + ext = nothing) + for field in EAGO_MODEL_NOT_STRUCT_ATTRIBUTES + if getfield(m, field) != getfield(new_optimizer, field) + is_empty_flag = false + break + end + end + + return is_empty_flag +end + +MOI.get(m::GlobalOptimizer, ::MOI.ObjectiveBound) = _is_input_min(m) ? m._global_lower_bound : -m._global_upper_bound +MOI.get(m::GlobalOptimizer, ::MOI.ObjectiveValue) = _is_input_min(m) ? m._global_upper_bound : -m._global_lower_bound + +_relaxed_optimizer(m::GlobalOptimizer{R,S,Q}) where {R,S,Q} = m._subsolvers.relaxed_optimizer +_upper_optimizer(m::GlobalOptimizer{R,S,Q}) where {R,S,Q} = m._subsolvers.upper_optimizer +_ext(m::GlobalOptimizer{R,S,Q}) where {R,S,Q} = m._subsolvers.ext + +@inline function _is_input_min(m::GlobalOptimizer) + return m._obj_mult == 1.0 +end +@inline _branch_cost(m::GlobalOptimizer) = m._branch_cost.cost +@inline _cost_offset_β(m::GlobalOptimizer) = m._branch_cost.β +@inline _branch_cvx_α(m::GlobalOptimizer) = m._parameters.branch_cvx_factor +@inline _branch_offset_β(m::GlobalOptimizer) = m._parameters.branch_offset +@inline _branch_pseudocost_on(m::GlobalOptimizer) = m._parameters.branch_pseudocost_on +@inline _cut_ϵ_abs(m::GlobalOptimizer) = m._parameters.cut_tolerance_abs +@inline _cut_ϵ_rel(m::GlobalOptimizer) = m._parameters.cut_tolerance_rel +@inline _cut_max_iterations(m::GlobalOptimizer) = m._parameters.cut_max_iterations + +@inline _integer_abs_tol(m::GlobalOptimizer) = m._parameters.integer_abs_tol +@inline _integer_rel_tol(m::GlobalOptimizer) = m._parameters.integer_rel_tol + +@inline _absolute_tol(m::GlobalOptimizer) = m._parameters.absolute_tolerance +@inline _relative_tol(m::GlobalOptimizer) = m._parameters.relative_tolerance +@inline _constraint_tol(m::GlobalOptimizer) = m._parameters.absolute_constraint_feas_tolerance + +@inline _fbbt_lp_depth(m::GlobalOptimizer) = m._parameters.fbbt_lp_depth +@inline _fbbt_lp_repetitions(m::GlobalOptimizer) = m._parameters.fbbt_lp_repetitions + +@inline _obbt_depth(m::GlobalOptimizer) = m._parameters.obbt_depth +@inline _obbt_repetitions(m::GlobalOptimizer) = m._parameters.obbt_repetitions +@inline _obbt_tolerance(m::GlobalOptimizer) = m._parameters.obbt_repetitions +@inline _obbt_aggressive_on(m::GlobalOptimizer) = m._parameters.obbt_aggressive_on +@inline _obbt_aggressive_max_iteration(m::GlobalOptimizer) = m._parameters.obbt_aggressive_max_iteration + +@inline _user_solver_config(m::GlobalOptimizer) = m._parameters.user_solver_config +@inline _verbosity(m::GlobalOptimizer) = m._parameters.verbosity + +@inline _cp_depth(m::GlobalOptimizer) = m._parameters.cp_depth +@inline _cp_repetitions(m::GlobalOptimizer) = m._parameters.cp_repetitions + +@inline _iteration_count(m::GlobalOptimizer) = m._iteration_count +@inline _obj_var_slack_added(m::GlobalOptimizer) = m._obj_var_slack_added + +@inline _bvi(m::GlobalOptimizer, i::Int) = m._branch_to_sol_map[i] +@inline _svi(m::GlobalOptimizer, i::Int) = m._sol_to_branch_map[i] + +@inline _is_branch_var(m::GlobalOptimizer, i) = m._branch_variables[i] + +@inline _current_node(m::GlobalOptimizer) = m._current_node +@inline _variable_info(m::GlobalOptimizer, i::Int) = m._input_problem._variable_info[i] +@inline _working_variable_info(m::GlobalOptimizer, i::Int) = m._working_problem._variable_info[i] + +@inline _sparsity(::BranchVar, m::GlobalOptimizer, i::Int) = view(m._branch_variable_sparsity,1,:) + +@inline _variable_num(::BranchVar, m::GlobalOptimizer) = m._branch_variable_count +@inline _variable_num(::FullVar, m::GlobalOptimizer) = m._working_problem._variable_count + +@inline is_integer(::BranchVar, m::GlobalOptimizer, i::Int) = is_integer(_current_node(m), i) +@inline function is_integer(::FullVar, m::GlobalOptimizer, i::Int) + if _is_branch_var(m,i) + return is_integer(_current_node(m), _svi(m, i)) + end + is_integer(_working_variable_info(m,i)) +end + +@inline _lower_bound(::BranchVar, m::GlobalOptimizer, i::Int) = lower_variable_bounds(_current_node(m), i) +@inline function _lower_bound(::FullVar, m::GlobalOptimizer, i::Int) + if _is_branch_var(m,i) + return lower_variable_bounds(_current_node(m), _svi(m, i)) + end + lower_bound(_working_variable_info(m,i)) +end + +@inline _upper_bound(::BranchVar, m::GlobalOptimizer, i::Int) = upper_variable_bounds(_current_node(m), i) +@inline function _upper_bound(::FullVar, m::GlobalOptimizer, i::Int) + if _is_branch_var(m,i) + return upper_variable_bounds(_current_node(m), _svi(m, i)) + end + upper_bound(_working_variable_info(m,i)) +end + +@inline _mid(::BranchVar, m::GlobalOptimizer, i::Int) = mid(_current_node(m), i) +@inline function _mid(::FullVar, m::GlobalOptimizer, i::Int) + if _is_branch_var(m,i) + return mid(_current_node(m), _svi(m, i)) + end + mid(_working_variable_info(m,i)) +end + +@inline _diam(::BranchVar, m::GlobalOptimizer, i::Int) = diam(_current_node(m), i) +@inline function _diam(::FullVar, m::GlobalOptimizer, i::Int) + if _is_branch_var(m,i) + return diam(_current_node(m), _svi(m, i)) + end + diam(_working_variable_info(m,i)) +end + +@inline _lower_solution(::BranchVar, m::GlobalOptimizer, i::Int) = m._lower_solution[_bvi(m, i)] +@inline _lower_solution(::FullVar, m::GlobalOptimizer, i::Int) = m._lower_solution[i] + +@inline function _set_lower_solution!(::BranchVar, m::GlobalOptimizer, v::Float64, i::Int) + m._lower_solution[_bvi(m, i)] = v + return +end +@inline function _set_lower_solution!(::FullVar, m::GlobalOptimizer, v::Float64, i::Int) + m._lower_solution[i] = v + return +end + +_constraint_primal(m::GlobalOptimizer, ::Type{F}, ::Type{S}) where {F,S} = _constraint_primal(m._input_problem, F, S) \ No newline at end of file diff --git a/src/eago_optimizer/types/incremental.jl b/src/eago_optimizer/types/incremental.jl new file mode 100644 index 00000000..e9efbe73 --- /dev/null +++ b/src/eago_optimizer/types/incremental.jl @@ -0,0 +1,176 @@ +# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. +# This code is licensed under MIT license (see LICENSE.md for full details) +############################################################################# +# EAGO +# A development environment for robust and global optimization +# See https://github.com/PSORLab/EAGO.jl +############################################################################# +# src/eago_optimizer/types/incremental.jl +# A type-stable wrapper with for optimizers used by EAGO to enable bridging and +# incremental loading. This is taylored to the internal routines used by EAGO.jl +# so methods may be specialized by optimizer types and error checking is often +# avoided. +############################################################################# + +#= +mutable struct IncrementalCache{S <: MOI.AbstractOptimizer} <: MOI.AbstractOptimizer} +end +=# + +""" + Incremental{Q,S} + +A type-stable cache used to wrapper for an optimizer that enables incremental +modification of solvers that don't inherently suppport this. Explicitly checks +support of MOI functionality used in EAGO. For `Q = Val{true}`, the subsolver +supports incremental loading. For `Q = Val{false}`, the subsolver does not. +""" +mutable struct Incremental{S <: MOI.AbstractOptimizer} <: MOI.AbstractOptimizer + optimizer::MOIB.LazyBridgeOptimizer{S} + cache::MOIB.LazyBridgeOptimizer{MOIU.CachingOptimizer{S,MOIU.Model{Float64}}} +end +function Incremental(m::S) where {S <: MOI.AbstractOptimizer} + b = MOIB.full_bridge_optimizer(m, Float64) + cache = MOIB.full_bridge_optimizer(MOIU.CachingOptimizer(MOIU.Model{Float64}(), m), Float64) + return Incremental{S}(b, cache) +end + +function MOI.copy_to(model::Incremental{S}, src::MOI.ModelLike) where S <: MOI.AbstractOptimizer + return MOI.Utilities.default_copy_to(model, src, copy_names) +end + +_is_incremental(x) = false +_get_storage(d::Incremental{S}) where S = _is_incremental(S) ? d.optimizer : d.cache + +# Set attributes +MOI.set(d::Incremental, ::MOI.ObjectiveFunction{VI}, f::VI) = (MOI.set(_get_storage(d), MOI.ObjectiveFunction{VI}(), f); nothing) +MOI.set(d::Incremental, ::MOI.ObjectiveFunction{SAF}, f::SAF) = (MOI.set(_get_storage(d), MOI.ObjectiveFunction{SAF}(), f); nothing) +MOI.set(d::Incremental, ::MOI.ObjectiveFunction{SQF}, f::SQF) = (MOI.set(_get_storage(d), MOI.ObjectiveFunction{SQF}(), f); nothing) + +function MOI.set(d::Incremental, ::MOI.ObjectiveSense, s) + MOI.set(_get_storage(d), MOI.ObjectiveSense(), s) + return +end +function MOI.set(d::Incremental{S}, ::MOI.Silent, s) where S <: MOI.AbstractOptimizer + if _is_incremental(S) + MOI.set(d.optimizer, MOI.Silent(), s) + else + MOI.set(d.cache.model.optimizer, MOI.Silent(), s) + end + return +end + +function MOI.set(d::Incremental, ::MOI.VariablePrimalStart, v::VI, x) + MOI.set(_get_storage(d), MOI.VariablePrimalStart(), v, x) + return +end + +MOI.set(d::Incremental, ::MOI.NLPBlock, ::Nothing) = nothing +function MOI.set(d::Incremental, ::MOI.NLPBlock, s) + MOI.set(_get_storage(d), MOI.NLPBlock(), s) + return +end + +function MOI.set(d::Incremental, p::MOI.RawOptimizerAttribute, s) + MOI.set(_get_storage(d), p, s) + return +end + +# Add variable/constraint +function MOI.add_variable(d::Incremental) + MOI.add_variable(_get_storage(d))::VI +end + +MOI.add_constraint(d::Incremental, f::VI, s::LT) = MOI.add_constraint(_get_storage(d), f, s)::CI{VI,LT} +MOI.add_constraint(d::Incremental, f::VI, s::GT) = MOI.add_constraint(_get_storage(d), f, s)::CI{VI,GT} +MOI.add_constraint(d::Incremental, f::VI, s::ET) = MOI.add_constraint(_get_storage(d), f, s)::CI{VI,ET} +MOI.add_constraint(d::Incremental, f::VI, s::IT) = MOI.add_constraint(_get_storage(d), f, s)::CI{VI,IT} +MOI.add_constraint(d::Incremental, f::VI, s::ZO) = MOI.add_constraint(_get_storage(d), f, s)::CI{VI,ZO} +MOI.add_constraint(d::Incremental, f::VI, s::MOI.Integer) = MOI.add_constraint(_get_storage(d), f, s)::CI{VI,MOI.Integer} + +MOI.add_constraint(d::Incremental, f::SAF, s::LT) = MOI.add_constraint(_get_storage(d), f, s)::CI{SAF,LT} +MOI.add_constraint(d::Incremental, f::SAF, s::GT) = MOI.add_constraint(_get_storage(d), f, s)::CI{SAF,GT} +MOI.add_constraint(d::Incremental, f::SAF, s::ET) = MOI.add_constraint(_get_storage(d), f, s)::CI{SAF,ET} +MOI.add_constraint(d::Incremental, f::SAF, s::IT) = MOI.add_constraint(_get_storage(d), f, s)::CI{SAF,IT} + +MOI.add_constraint(d::Incremental, f::SQF, s::LT) = MOI.add_constraint(_get_storage(d), f, s)::CI{SQF,LT} +MOI.add_constraint(d::Incremental, f::SQF, s::GT) = MOI.add_constraint(_get_storage(d), f, s)::CI{SQF,GT} +MOI.add_constraint(d::Incremental, f::SQF, s::ET) = MOI.add_constraint(_get_storage(d), f, s)::CI{SQF,ET} +MOI.add_constraint(d::Incremental, f::SQF, s::IT) = MOI.add_constraint(_get_storage(d), f, s)::CI{SQF,IT} + +# Delete +function MOI.delete(d::Incremental, ci::CI{VI,T}) where T <: Union{LT,GT,ET,IT,MOI.Integer} + MOI.delete(_get_storage(d), ci) + return +end +function MOI.delete(d::Incremental, ci::CI{SAF,LT}) + MOI.delete(_get_storage(d), ci) + return +end + +# Set modifications +function MOI.set(d::Incremental, ::MOI.ConstraintSet, ci::CI{VI,T}, s::T) where T <: Union{LT,GT,ET,IT} + MOI.set(_get_storage(d), MOI.ConstraintSet(), ci, s) + return +end + +# Get attributes +function MOI.get(d::Incremental{S}, ::MOI.TerminationStatus) where S + MOI.get(d.optimizer, MOI.TerminationStatus())::MOI.TerminationStatusCode +end +function MOI.get(d::Incremental{S}, ::MOI.PrimalStatus) where S + MOI.get(d.optimizer, MOI.PrimalStatus())::MOI.ResultStatusCode +end +function MOI.get(d::Incremental{S}, ::MOI.DualStatus) where S + MOI.get(d.optimizer, MOI.DualStatus())::MOI.ResultStatusCode +end +function MOI.get(d::Incremental{S}, ::MOI.RawStatusString) where S + MOI.get(d.optimizer, MOI.RawStatusString())::MOI.String +end + +function MOI.get(d::Incremental{S}, ::MOI.ObjectiveBound) where S + MOI.get(d.optimizer, MOI.ObjectiveBound())::Float64 +end +function MOI.get(d::Incremental{S}, ::MOI.ObjectiveValue) where S + MOI.get(d.optimizer, MOI.ObjectiveValue())::Float64 +end +function MOI.get(d::Incremental{S}, ::MOI.DualObjectiveValue) where S + MOI.get(d.optimizer, MOI.DualObjectiveValue())::Float64 +end + +function MOI.get(d::Incremental{S}, ::MOI.VariablePrimal, vi::VI) where S + MOI.get(d.optimizer, MOI.VariablePrimal(), vi)::Float64 +end + +const SAF_CI_TYPES = Union{CI{SAF,LT},CI{SAF,GT},CI{SAF,ET}} +function MOI.get(d::Incremental{S}, ::MOI.ConstraintPrimal, ci::SAF_CI_TYPES) where S + MOI.get(d.optimizer, MOI.ConstraintPrimal(), ci)::Float64 +end + +const SQF_CI_TYPES = Union{CI{SQF,LT},CI{SQF,GT},CI{SQF,ET}} +function MOI.get(d::Incremental{S}, ::MOI.ConstraintPrimal, ci::SQF_CI_TYPES) where S + MOI.get(d.optimizer, MOI.ConstraintPrimal(), ci)::Float64 +end + +function MOI.get(d::Incremental{S}, ::MOI.ConstraintDual, ci::Union{CI{VI,LT},CI{VI,GT}}) where S + MOI.get(d.optimizer, MOI.ConstraintDual(), ci)::Float64 +end +function MOI.get(d::Incremental{S}, ::MOI.ResultCount) where S + MOI.get(d.optimizer, MOI.ResultCount())::Int +end +function MOI.get(d::Incremental{S}, n::MOI.SolverName) where S + MOI.get(d.optimizer, n)::String +end + +# define optimize! +function MOI.optimize!(d::Incremental{S}) where S + MOI.optimize!(_get_storage(d)) + return +end + +function MOI.empty!(d::Incremental{S}) where S + MOI.empty!(_get_storage(d)) + return +end + +MOI.is_empty(d::Incremental{S}) where S = MOI.is_empty(_get_storage(d)) \ No newline at end of file diff --git a/src/eago_optimizer/logging/log.jl b/src/eago_optimizer/types/log.jl similarity index 100% rename from src/eago_optimizer/logging/log.jl rename to src/eago_optimizer/types/log.jl diff --git a/src/eago_optimizer/types/node_bb.jl b/src/eago_optimizer/types/node_bb.jl new file mode 100644 index 00000000..7286f44e --- /dev/null +++ b/src/eago_optimizer/types/node_bb.jl @@ -0,0 +1,127 @@ +# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. +# This code is licensed under MIT license (see LICENSE.md for full details) +############################################################################# +# EAGO +# A development environment for robust and global optimization +# See https://github.com/PSORLab/EAGO.jl +############################################################################# +# src/eago_optimizer/node_bb.jl +# Defines storage for a node in the B&B tree & utilities functions +############################################################################# + +@enum(BranchDirection, BD_NONE, BD_NEG, BD_POS) + +# Used internally to set & get variables in full problem space or just branch variables +struct FullVar end +struct BranchVar end +Base.Broadcast.broadcastable(d::FullVar) = Ref(d) +Base.Broadcast.broadcastable(d::BranchVar) = Ref(d) + +""" +$(TYPEDEF) + +Stores information associated with each node in Branch & Bound tree. + +$(TYPEDFIELDS) +""" +struct NodeBB + "Lower bounds of variable box." + lower_variable_bounds::Vector{Float64} + "Upper bounds of variable box." + upper_variable_bounds::Vector{Float64} + "Is dimension integer valued" + is_integer::BitVector + "Are all dimensions continuous or fixed" + continuous::Bool + "Lower bound of problem solution on nodeBB" + lower_bound::Float64 + "Upper bound of problem solution on nodeBB" + upper_bound::Float64 + "Depth of node in B&B tree." + depth::Int + "Depth of first parent in B&B tree that was continuously valued" + cont_depth::Int + "Unique id for each node." + id::Int + "Whether last branch was negative or positive in direction" + branch_direction::BranchDirection + "Dimension of last branch" + last_branch::Int + "Extent of last branch (using for psuedocost calculation)" + branch_extent::Float64 +end + +# Constructors +function NodeBB(l::Vector{Float64}, u::Vector{Float64}, d::BitVector) + NodeBB(l, u, d, any(d), -Inf, Inf, 1, -1, 1, BD_NONE, -1, 0.0) +end +NodeBB() = NodeBB(Float64[], Float64[], BitVector(), false, -Inf, Inf, 0, -1, 1, BD_NONE, -1, 0.0) +NodeBB(x::NodeBB) = NodeBB(copy(x.lower_variable_bounds), copy(x.upper_variable_bounds), copy(x.is_integer), + x.continuous, x.lower_bound, x.upper_bound, x.depth, x.cont_depth, x.id, + x.branch_direction, x.last_branch, x.branch_extent) + +# Copy utilities +Base.copy(x::NodeBB) = NodeBB(copy(x.lower_variable_bounds), copy(x.upper_variable_bounds), copy(x.is_integer), + x.continuous, x.lower_bound, x.upper_bound, x.depth, x.cont_depth, x.id, + x.branch_direction, x.last_branch, x.branch_extent) + +# using alternative name as to not interfere with ordering... +function uninitialized(x::NodeBB) + flag = isempty(x.lower_variable_bounds) + flag &= isempty(x.upper_variable_bounds) + flag &= isempty(x.is_integer) + flag &= x.lower_bound == -Inf + flag &= x.upper_bound == Inf + flag &= x.depth == 0 + flag &= x.cont_depth == -1 + flag &= x.id == 1 + return flag +end + +# Access functions for broadcasting data easily +@inline lower_variable_bounds(x::NodeBB) = x.lower_variable_bounds +@inline upper_variable_bounds(x::NodeBB) = x.upper_variable_bounds +@inline lower_variable_bounds(x::NodeBB, i::Int) = x.lower_variable_bounds[i] +@inline upper_variable_bounds(x::NodeBB, i::Int) = x.upper_variable_bounds[i] +@inline lower_variable_bounds(x::NodeBB, id::Int, nid::Int) = x.lower_variable_bounds[id:nid] +@inline upper_variable_bounds(x::NodeBB, id::Int, nid::Int) = x.upper_variable_bounds[id:nid] +@inline is_integer(x::NodeBB) = x.is_integer +@inline is_integer(x::NodeBB, id::Int) = x.is_integer[id] +@inline continuous(x::NodeBB) = x.continuous +@inline lower_bound(x::NodeBB) = x.lower_bound +@inline upper_bound(x::NodeBB) = x.upper_bound +@inline depth(x::NodeBB) = x.depth +@inline cont_depth(x::NodeBB) = x.cont_depth + +# Iterations Functions +Base.isless(x::NodeBB, y::NodeBB) = x.lower_bound < y.lower_bound +Base.length(x::NodeBB) = length(x.lower_variable_bounds) +function Base.isempty(x::NodeBB) + for i = 1:length(x) + lower = @inbounds x.lower_variable_bounds[i] + upper = @inbounds x.upper_variable_bounds[i] + (lower > upper) && (return true) + end + return false +end + +""" +$(FUNCTIONNAME) + +Checks that node `x` and `y` have equal domains withing a tolerance of `atol`. +""" +function same_box(x::NodeBB, y::NodeBB, r::Float64) + (isempty(x.lower_variable_bounds) ⊻ isempty(y.lower_variable_bounds)) && (return false) + (isempty(x.upper_variable_bounds) ⊻ isempty(y.upper_variable_bounds)) && (return false) + for i = 1:length(x) + ~isapprox(x.lower_variable_bounds[i], y.lower_variable_bounds[i], atol=r) && (return false) + ~isapprox(x.upper_variable_bounds[i], y.upper_variable_bounds[i], atol=r) && (return false) + end + return true +end + +# Compute middle & diameter +@inline diam(x::NodeBB) = upper_variable_bounds(x) - lower_variable_bounds(x) +@inline diam(x::NodeBB, i::Int) = upper_variable_bounds(x,i) - lower_variable_bounds(x,i) +@inline mid(x::NodeBB) = 0.5*(upper_variable_bounds(x) + lower_variable_bounds(x)) +@inline mid(x::NodeBB, i::Int) = 0.5*(upper_variable_bounds(x,i) + lower_variable_bounds(x,i)) \ No newline at end of file diff --git a/src/eago_optimizer/types/subsolver_block.jl b/src/eago_optimizer/types/subsolver_block.jl new file mode 100644 index 00000000..32a52727 --- /dev/null +++ b/src/eago_optimizer/types/subsolver_block.jl @@ -0,0 +1,38 @@ +""" + +A structure containing the +""" +mutable struct SubSolvers{Q<:MOI.AbstractOptimizer, S<:MOI.AbstractOptimizer, T<:ExtensionType} + relaxed_optimizer::Q + upper_optimizer::S + ext::T +end +SubSolvers{Q,S,T}(; r::Q = Cbc.Optimizer(), u::S = Ipopt.Optimizer(), t::T = DefaultExt()) where {Q,S,T} = SubSolvers{Q,S,T}(r, u, t) +SubSolvers(; r::Q = Cbc.Optimizer(), u::S = Ipopt.Optimizer(), t::T = DefaultExt()) where {Q,S,T} = SubSolvers{Q,S,T}(r,u,t) + +function _relaxed_optimizer(d::SubSolvers{Q,S,T}) where {Q <: MOI.AbstractOptimizer, + S <: MOI.AbstractOptimizer, + T <: ExtensionType} + return d.relaxed_optimizer +end + +function _upper_optimizer(d::SubSolvers{Q,S,T}) where {Q <: MOI.AbstractOptimizer, + S <: MOI.AbstractOptimizer, + T <: ExtensionType} + return d.upper_optimizer +end + +function _ext(d::SubSolvers{Q,S,T}) where {Q <: MOI.AbstractOptimizer, + S <: MOI.AbstractOptimizer, + T <: ExtensionType} + return d.ext +end + +function isempty(d::SubSolvers{Q,S,T}) where {Q,S,T} + MOI.is_empty(d.relaxed_optimizer) && MOI.is_empty(d.upper_optimizer) +end +function MOI.empty!(d::SubSolvers{Q,S,T}) where {Q,S,T} + MOI.empty!(d.relaxed_optimizer) + MOI.empty!(d.upper_optimizer) + return +end \ No newline at end of file diff --git a/src/eago_optimizer/types/variable_info.jl b/src/eago_optimizer/types/variable_info.jl new file mode 100644 index 00000000..349ad11a --- /dev/null +++ b/src/eago_optimizer/types/variable_info.jl @@ -0,0 +1,193 @@ +""" +$(TYPEDEF) + +A structure used to store information related to the bounds assigned to each +variable. + +$(TYPEDFIELDS) +""" +Base.@kwdef struct VariableInfo{T<:AbstractFloat} + "Is the variable integer valued?" + is_integer::Bool = false + "Boolean indicating whether finite lower bound exists." + has_lower_bound::Bool = false + "Boolean indicating whether finite upper bound exists." + has_upper_bound::Bool = false + "Boolean indicating variable is fixed to a finite value." + is_fixed::Bool = false + "Indicates that constraints have been set" + has_constraints::Bool = false + "Lower bounds. May be -Inf." + lower_bound::T = typemin(T) + "Upper bounds. May be Inf." + upper_bound::T = typemax(T) +end +is_integer(x::VariableInfo) = x.is_integer +has_lower_bound(x::VariableInfo) = x.has_lower_bound +has_upper_bound(x::VariableInfo) = x.has_upper_bound +lower_bound(x::VariableInfo{T}) where {T <: AbstractFloat} = x.lower_bound +upper_bound(x::VariableInfo{T}) where {T <: AbstractFloat} = x.upper_bound + +is_fixed(x::VariableInfo) = x.is_fixed +function is_less_than(x::VariableInfo) + flag = x.has_upper_bound + flag &= !x.has_lower_bound + return flag +end +function is_greater_than(x::VariableInfo) + flag = x.has_lower_bound + flag &= !x.has_upper_bound + return flag +end +function is_interval(x::VariableInfo) + flag = x.has_lower_bound + flag &= x.has_upper_bound + return flag +end +function is_int_interval(x::VariableInfo) + flag = x.has_lower_bound + flag &= x.has_upper_bound + flag &= x.is_integer + return flag +end +function is_real_interval(x::VariableInfo) + flag = x.has_lower_bound + flag &= x.has_upper_bound + flag &= !x.is_integer + return flag +end +function is_zero_one(x::VariableInfo{T}) where {T <: AbstractFloat} + flag = iszero(x.lower_bound) + flag &= isone(x.upper_bound) + flag &= x.is_integer + return flag +end + +mid(x::VariableInfo{T}) where {T <: AbstractFloat} = 0.5*(upper_bound(x) + lower_bound(x)) +diam(x::VariableInfo{T}) where {T <: AbstractFloat} = upper_bound(x) - lower_bound(x) +empty_variable_info(::Type{T}) where T = VariableInfo{T}(lower_bound = Inf, + upper_bound = -Inf) + +Base.isempty(v::VariableInfo{T}) where {T <: AbstractFloat} = lower_bound(v) > upper_bound(v) +function check_isempty(l, u, b) + flag = l <= u + if b + flag &= (l <= 0.0) | (u >= 1.0) + end + return !flag +end + +function VariableInfo(::Type{T}, ::ZO) where {T <: AbstractFloat} + return VariableInfo{T}(is_integer = true, + has_lower_bound = true, + has_upper_bound = true, + has_constraints = true, + lower_bound = zero(T), + upper_bound = one(T)) +end + +function VariableInfo(it::MOI.Interval{T}) where {T <: AbstractFloat} + VariableInfo{T}(has_lower_bound = !isinf(it.lower), + has_upper_bound = !isinf(it.upper), + has_constraints = !isinf(it.lower) | !isinf(it.upper), + is_fixed = it.lower == it.upper, + lower_bound = it.lower, + upper_bound = it.upper) +end + +function VariableInfo(v::VariableInfo{T}, ::ZO) where {T <: AbstractFloat} + isempty(v) && (return v) + l = max(zero(T), lower_bound(v)) + u = min(one(T), upper_bound(v)) + check_isempty(l, u, is_integer(v)) && return empty_variable_info(T) + return VariableInfo{T}(is_integer = true, + has_lower_bound = true, + has_upper_bound = true, + has_constraints = true, + is_fixed = l == u, + lower_bound = l, + upper_bound = u) +end + +function VariableInfo(v::VariableInfo{T}, it::MOI.Interval{T}) where {T <: AbstractFloat} + isempty(v) && return v + l = max(it.lower, lower_bound(v)) + u = min(it.upper, upper_bound(v)) + check_isempty(l, u, is_integer(v)) && return empty_variable_info(T) + return VariableInfo(is_integer = is_integer(v), + has_lower_bound = !isinf(l), + has_upper_bound = !isinf(u), + has_constraints = !isinf(l) | !isinf(u), + is_fixed = l == u, + lower_bound = l, + upper_bound = u) +end + +function VariableInfo(v::VariableInfo{T}, et::MOI.EqualTo{T}) where {T <: AbstractFloat} + isempty(v) && return v + l = max(et.value, lower_bound(v)) + u = min(et.value, upper_bound(v)) + check_isempty(l, u, is_integer(v)) && return empty_variable_info(T) + out = VariableInfo(is_integer = is_integer(v), + has_lower_bound = !isinf(l), + has_upper_bound = !isinf(u), + has_constraints = !isinf(l) | !isinf(u), + is_fixed = true, + lower_bound = l, + upper_bound = u) + return VariableInfo(is_integer = is_integer(v), + has_lower_bound = !isinf(l), + has_upper_bound = !isinf(u), + has_constraints = !isinf(l) | !isinf(u), + is_fixed = true, + lower_bound = l, + upper_bound = u) +end + +function VariableInfo(v::VariableInfo{T}, gt::MOI.GreaterThan{T}) where {T <: AbstractFloat} + isempty(v) && return v + l = max(gt.lower, lower_bound(v)) + u = upper_bound(v) + check_isempty(l, u, is_integer(v)) && return empty_variable_info(T) + return VariableInfo(is_integer = is_integer(v), + has_lower_bound = !isinf(l), + has_upper_bound = !isinf(u), + has_constraints = !isinf(l) | !isinf(u), + is_fixed = l == u, + lower_bound = l, + upper_bound = u) +end +function VariableInfo(v::VariableInfo{T}, lt::MOI.LessThan{T}) where {T <: AbstractFloat} + isempty(v) && return v + l = lower_bound(v) + u = min(lt.upper, upper_bound(v)) + check_isempty(l, u, is_integer(v)) && return empty_variable_info(T) + return VariableInfo(is_integer = is_integer(v), + has_lower_bound = !isinf(l), + has_upper_bound = !isinf(u), + has_constraints = !isinf(l) | !isinf(u), + is_fixed = l == u, + lower_bound = l, + upper_bound = u) +end + +function VariableInfo(v::VariableInfo{T}, s::MOI.Integer) where {T <: AbstractFloat} + isempty(v) && return v + l = lower_bound(v) + u = upper_bound(v) + check_isempty(l, u, true) && return empty_variable_info(T) + return VariableInfo(is_integer = true, + has_lower_bound = has_lower_bound(v), + has_upper_bound = has_upper_bound(v), + has_constraints = v.has_constraints, + is_fixed = is_fixed(v), + lower_bound = l, + upper_bound = u) +end + +ZO(v::VariableInfo) = MOI.ZeroOne() +ET(v::VariableInfo{T}) where {T <: AbstractFloat} = MOI.EqualTo{T}(v.lower_bound) +IT(v::VariableInfo{T}) where {T <: AbstractFloat} = MOI.Interval{T}(v.lower_bound,v.upper_bound) +GT(v::VariableInfo{T}) where {T <: AbstractFloat} = MOI.GreaterThan{T}(v.lower_bound) +LT(v::VariableInfo{T}) where {T <: AbstractFloat} = MOI.LessThan{T}(v.upper_bound) +INT(v::VariableInfo{T}) where {T <: AbstractFloat} = MOI.Semiinteger{T}(v.lower_bound, v.upper_bound) diff --git a/src/eago_optimizer/unsafe_utilities.jl b/src/eago_optimizer/unsafe_utilities.jl deleted file mode 100644 index 57d9e483..00000000 --- a/src/eago_optimizer/unsafe_utilities.jl +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/unsafe_utilities.jl -# Inbounds and non-allocating versions of simple Julia functions. -############################################################################# - -""" -$(TYPEDSIGNATURES) - -Performs `map!(f, y, x)` in an unsafe manner if y[i] is true, else no-op. -Assumes `n == length(x) == length(y)`. About 2x faster for small arrays (n < 1000). -""" -function unsafe_check_fill!(f, y::Vector{T}, x::T, n::Int) where T - i = 1 - m = n + 1 - while i < m - flag = f(@inbounds y[i]) - if flag - @inbounds y[i] = x - end - i += 1 - end - nothing -end diff --git a/src/eago_optimizer/utilities.jl b/src/eago_optimizer/utilities.jl new file mode 100644 index 00000000..ac5af70f --- /dev/null +++ b/src/eago_optimizer/utilities.jl @@ -0,0 +1,60 @@ +# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. +# This code is licensed under MIT license (see LICENSE.md for full details) +############################################################################# +# EAGO +# A development environment for robust and global optimization +# See https://github.com/PSORLab/EAGO.jl +############################################################################# +# src/eago_optimizer/unsafe_utilities.jl +# Inbounds and non-allocating versions of simple Julia functions. +############################################################################# + +""" +$(TYPEDSIGNATURES) + +Performs `map!(f, y, x)` in an unsafe manner if y[i] is true, else no-op. +Assumes `n == length(x) == length(y)`. About 2x faster for small arrays (n < 1000). +""" +function unsafe_check_fill!(f, y::Vector{T}, x::T, n::Int) where T + i = 1 + m = n + 1 + while i < m + flag = f(@inbounds y[i]) + if flag + @inbounds y[i] = x + end + i += 1 + end + nothing +end + +_rf_findmax((fm, m), (fx, x)) = Base.isless(fm, fx) ? (fx, x) : (fm, m) +map_findmax(f, itr) = mapfoldl(((k, v),) -> (f(v), k), _rf_findmax, pairs(itr)) +map_argmax(f, itr) = map_findmax(f, itr)[2] + +argmax(f, domain) = mapfoldl(x -> (f(x), x), _rf_findmax, domain)[2] + +relative_gap(L::Float64, U::Float64) = ((L > -Inf) && (U < Inf)) ? abs(U - L)/(max(abs(L), abs(U))) : Inf +relative_tolerance(L::Float64, U::Float64, tol::Float64) = relative_gap(L, U) > tol || ~(L > -Inf) + +""" + gershgorin_λmin + +Computes a lower bound on the smallest eigenvalue of `x` by means of Gershgorin's +Circle Theorem. See the link provided for information +https://mathworld.wolfram.com/GershgorinCircleTheorem.html. +""" +function gershgorin_λmin(x::AbstractMatrix{T}) where T + xsum = -sum(i -> abs(@inbounds x[i]), diagind(x)) + xmin = typemax(T) + @inbounds for k in diagind(x) + xk = x[k] + xadd = abs(xk) + xk + xsum += xadd + if xsum < xmin + xmin = xsum + end + xsum -= xadd + end + return xmin +end diff --git a/src/eago_optimizer/variables.jl b/src/eago_optimizer/variables.jl deleted file mode 100644 index b34526e8..00000000 --- a/src/eago_optimizer/variables.jl +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (c) 2018: Matthew Wilhelm & Matthew Stuber. -# This code is licensed under MIT license (see LICENSE.md for full details) -############################################################################# -# EAGO -# A development environment for robust and global optimization -# See https://github.com/PSORLab/EAGO.jl -############################################################################# -# src/eago_optimizer/variables.jl -# Defines single variable constraints supported by optimizer and how to store. -############################################################################# - -##### Access variable information from MOI variable index -has_upper_bound(m::Optimizer, vi::MOI.VariableIndex) = m._input_problem._variable_info[vi.value].has_upper_bound -has_lower_bound(m::Optimizer, vi::MOI.VariableIndex) = m._input_problem._variable_info[vi.value].has_lower_bound -is_fixed(m::Optimizer, vi::MOI.VariableIndex) = m._input_problem._variable_info[vi.value].is_fixed -is_integer(m::Optimizer, i::Int64) = is_integer(m._input_problem._variable_info[i]) - -##### Add unconstrained variables -function MOI.add_variable(m::Optimizer) - m._input_problem._variable_count += 1 - push!(m._input_problem._variable_info, VariableInfo()) - return VI(m._input_problem._variable_count) -end -MOI.add_variables(m::Optimizer, n::Int) = [MOI.add_variable(m) for i in 1:n] - -##### Supports function and add_constraint for single variable functions -const INEQ_SETS = Union{LT, GT, ET} -MOI.supports_constraint(::Optimizer, ::Type{SV}, ::Type{S}) where {S <: INEQ_SETS} = true - -#= -function MOI.add_constraint(m::Optimizer, v::SV, zo::ZO) - vi = v.variable - check_inbounds!(m, vi) - has_upper_bound(m, vi) && error("Upper bound on variable $vi already exists.") - has_lower_bound(m, vi) && error("Lower bound on variable $vi already exists.") - is_fixed(m, vi) && error("Variable $vi is fixed. Cannot also set upper bound.") - m._input_problem._variable_info[vi.value].lower_bound = 0.0 - m._input_problem._variable_info[vi.value].upper_bound = 1.0 - m._input_problem._variable_info[vi.value].has_lower_bound = true - m._input_problem._variable_info[vi.value].has_upper_bound = true - m._input_problem._variable_info[vi.value].is_integer = true - return CI{SV, ZO}(vi.value) -end -=# - -function MOI.add_constraint(m::Optimizer, v::SV, lt::LT) - vi = v.variable - check_inbounds!(m, vi) - if isnan(lt.upper) - error("Invalid upper bound value $(lt.upper).") - end - if has_upper_bound(m, vi) - error("Upper bound on variable $vi already exists.") - end - if is_fixed(m, vi) - error("Variable $vi is fixed. Cannot also set upper bound.") - end - m._input_problem._variable_info[vi.value].upper_bound = lt.upper - m._input_problem._variable_info[vi.value].has_upper_bound = true - return CI{SV, LT}(vi.value) -end - -function MOI.add_constraint(m::Optimizer, v::SV, gt::GT) - vi = v.variable - check_inbounds!(m, vi) - if isnan(gt.lower) - error("Invalid lower bound value $(gt.lower).") - end - if has_lower_bound(m, vi) - error("Lower bound on variable $vi already exists.") - end - if is_fixed(m, vi) - error("Variable $vi is fixed. Cannot also set lower bound.") - end - m._input_problem._variable_info[vi.value].lower_bound = gt.lower - m._input_problem._variable_info[vi.value].has_lower_bound = true - return CI{SV, GT}(vi.value) -end - -function MOI.add_constraint(m::Optimizer, v::SV, eq::ET) - vi = v.variable - check_inbounds!(m, vi) - if isnan(eq.value) - error("Invalid fixed value $(gt.lower).") - end - if has_lower_bound(m, vi) - error("Variable $vi has a lower bound. Cannot be fixed.") - end - if has_upper_bound(m, vi) - error("Variable $vi has an upper bound. Cannot be fixed.") - end - if is_fixed(m, vi) - error("Variable $vi is already fixed.") - end - m._input_problem._variable_info[vi.value].lower_bound = eq.value - m._input_problem._variable_info[vi.value].upper_bound = eq.value - m._input_problem._variable_info[vi.value].has_lower_bound = true - m._input_problem._variable_info[vi.value].has_upper_bound = true - m._input_problem._variable_info[vi.value].is_fixed = true - return CI{SV, ET}(vi.value) -end diff --git a/src/eago_script/loader.jl b/src/eago_script/loader.jl index 9f38997a..45e2b1ab 100644 --- a/src/eago_script/loader.jl +++ b/src/eago_script/loader.jl @@ -21,7 +21,7 @@ function tape_to_list(tape::Tape) node_count = 1 while !isempty(queue) - (active_node_num, prior_prt) = popfirst!(queue) + (active_node_num, _) = popfirst!(queue) @inbounds active_node = tape.nd[active_node_num] @inbounds active_node_child1 = active_node.children[1] @@ -59,7 +59,7 @@ function remove_subexpr_children!(expr::_NonlinearExprData) node_count = 1 while !isempty(queue) - (node_num, prior_prt) = popfirst!(queue) + (node_num, _) = popfirst!(queue) @inbounds active_node = nd[node_num] if (active_node.nodetype !== SUBEXPRESSION && active_node.nodetype !== MOIVARIABLE && @@ -177,7 +177,6 @@ function udf_loader!(x::AbstractOptimizer) x._nlp_data = NLPBlockData(x._nlp_data.constraint_bounds, evaluator, x._nlp_data.has_objective) # reinitialize evaluator - features = features_available(x._nlp_data.evaluator) init_feat = Symbol[:Grad, :Hess] num_nlp_constraints = length(x._nlp_data.constraint_bounds) num_nlp_constraints > 0 && push!(init_feat, :Jac) diff --git a/src/eago_script/script.jl b/src/eago_script/script.jl index c93e92f3..018e9205 100644 --- a/src/eago_script/script.jl +++ b/src/eago_script/script.jl @@ -22,12 +22,13 @@ module Script USER_OPERATOR_ID_START, USER_UNIVAR_OPERATOR_ID_START, UserOperatorRegistry, adjmat import Cassette: @context, Cassette.overdub, Cassette.prehook + import Cassette #import CodeTransformation: addmethod! TODO: add this later import ForwardDiff: derivative, gradient! export dag_flattening! include("codetransformation.jl") # TODO: delete this later - include("scrubber.jl") + #include("scrubber.jl") include("substitute.jl") include("patterns.jl") include("tracer.jl") diff --git a/src/eago_script/scrubber.jl b/src/eago_script/scrubber.jl index a591cd7d..25431c0e 100644 --- a/src/eago_script/scrubber.jl +++ b/src/eago_script/scrubber.jl @@ -13,8 +13,8 @@ @context ScrubCtx # Cassette specific functions for scrubbing udfs of objects that interfere with overloading -overdub(ctx::ScrubCtx, ::typeof(typeassert), x::Real, type::Type) = x -function overdub(ctx::ScrubCtx, ::typeof(zeros), t, dims...) +Cassette.overdub(ctx::ScrubCtx, ::typeof(typeassert), x::Real, type::Type) = x +function Cassette.overdub(ctx::ScrubCtx, ::typeof(zeros), t, dims...) if t <: AbstractFloat return zeros(Real, dims...) elseif t <: Integer @@ -23,14 +23,14 @@ function overdub(ctx::ScrubCtx, ::typeof(zeros), t, dims...) return zeros(t, dims...) end #overdub(ctx::ScrubCtx, ::typeof(zero), type::Type) = zero(type) -function overdub(ctx::ScrubCtx, ::typeof(hcat), A...) +function Cassette.overdub(ctx::ScrubCtx, ::typeof(hcat), A...) vA = hcat(A...) sz = size(vA) vR = zeros(Real, sz...) vR[:] = vA[:] return vR end -function overdub(ctx::ScrubCtx, ::typeof(vcat), A...) +function Cassette.overdub(ctx::ScrubCtx, ::typeof(vcat), A...) vA = vcat(A...) sz = size(vA) vR = zeros(Real, sz...) diff --git a/src/eago_script/substitute.jl b/src/eago_script/substitute.jl index fcde2685..af6f9bac 100644 --- a/src/eago_script/substitute.jl +++ b/src/eago_script/substitute.jl @@ -153,8 +153,6 @@ function is_match(pattern::Template_Graph, indx::Int, nd::Vector{NodeData}, dag_ const_values::Vector{Float64}, parameter_values::Vector{Float64}) match_flag = true match_dict = Dict{Int,Int}() - pattern_length = pattern.ndlen - dag_length = pattern.daglen pattern_adj = pattern.adj pat_children_arr = rowvals(pattern_adj) dag_children_arr = rowvals(dag_adj) @@ -282,7 +280,7 @@ function substitute!(match_num::Int, node_num::Int, prior_prt::Int, nd::Vector{N queue = Tuple{Int,Int,Int}[(prior_prt, 1, -1)] # node_num, prior_prt, dag_num, dag_prt inner_node_count = node_count while ~isempty(queue) - (num_prt, num_sub, num_sub_prt) = popfirst!(queue) + (num_prt, num_sub, _) = popfirst!(queue) @inbounds active_node = subs_template.nd[num_sub] active_type = active_node.type if active_type === :op diff --git a/src/eago_script/tracer.jl b/src/eago_script/tracer.jl index 13286684..4a15e9ff 100644 --- a/src/eago_script/tracer.jl +++ b/src/eago_script/tracer.jl @@ -81,78 +81,67 @@ for i in (abs, sin, cos, tan, sec, csc, cot, asin, acos, atan, asec, acsc, acsch, coth, acoth, sqrt, log, log2, log10, log1p, exp, exp2, expm1, +, -, inv) id = univariate_operator_to_id[Symbol(i)] - @eval function overdub(ctx::TraceCtx, ::typeof($i), x::SetTrace) - node = NodeInfo(CALLUNIVAR, $id, [val(x)]) - add_set_node!(ctx.metadata, node) + @eval function Cassette.overdub(ctx::TraceCtx, ::typeof($i), x::SetTrace) + add_set_node!(ctx.metadata, NodeInfo(CALLUNIVAR, $id, [val(x)])) return SetTrace(ctx.metadata.set_trace_count) end - @eval overdub(ctx::TraceCtx, ::typeof($i), x::Real) = ($i)(x) + @eval Cassette.overdub(ctx::TraceCtx, ::typeof($i), x::Real) = ($i)(x) end # defines primitives for bivariate CALL operators (NEED TO ADD ^) -for i in (+, -, *, ^, /, max, min) +for i in (+, -, *, ^, /) # TODO ADD :max, :min id = operator_to_id[Symbol(i)] - @eval function overdub(ctx::TraceCtx, ::typeof($i), x::SetTrace, y::SetTrace) - node = NodeInfo(CALL, $id, [val(x), val(y)]) - add_set_node!(ctx.metadata, node) + @eval function Cassette.overdub(ctx::TraceCtx, ::typeof($i), x::SetTrace, y::SetTrace) + add_set_node!(ctx.metadata, NodeInfo(CALL, $id, [val(x), val(y)])) SetTrace(ctx.metadata.set_trace_count) end for j in (Int16,Int32,Int64,Float16,Float32,Float64,Irrational) - @eval function overdub(ctx::TraceCtx, ::typeof($i), x::SetTrace, y::($j)) - c_val = add_constant(ctx.metadata, y) - node = NodeInfo(CALL, $id, [val(x),c_val]) - add_set_node!(ctx.metadata, node) + @eval function Cassette.overdub(ctx::TraceCtx, ::typeof($i), x::SetTrace, y::($j)) + add_set_node!(ctx.metadata, NodeInfo(CALL, $id, [val(x), add_constant(ctx.metadata, y)])) SetTrace(ctx.metadata.set_trace_count) end - @eval function overdub(ctx::TraceCtx, ::typeof($i), x::($j), y::SetTrace) - c_val = add_constant(ctx.metadata, x) - node = NodeInfo(CALL, $id, [c_val,val(y)]) - add_set_node!(ctx.metadata, node) + @eval function Cassette.overdub(ctx::TraceCtx, ::typeof($i), x::($j), y::SetTrace) + add_set_node!(ctx.metadata, NodeInfo(CALL, $id, [add_constant(ctx.metadata, x),val(y)])) SetTrace(ctx.metadata.set_trace_count) end end - @eval overdub(ctx::TraceCtx, ::typeof($i), x::Real, y::Real) = ($i)(x,y) + @eval Cassette.overdub(ctx::TraceCtx, ::typeof($i), x::Real, y::Real) = ($i)(x,y) end # defines primitives for bivariate COMPARISON operators for i in (>,<,==,>=,<=) id = comparison_operator_to_id[Symbol(i)] - @eval function overdub(ctx::TraceCtx, ::typeof($i), x::SetTrace, y::SetTrace) - node = NodeInfo(COMPARISON, $id, [val(x), val(y)]) - add_set_node!(ctx.metadata, node) + @eval function Cassette.overdub(ctx::TraceCtx, ::typeof($i), x::SetTrace, y::SetTrace) + add_set_node!(ctx.metadata, NodeInfo(COMPARISON, $id, [val(x), val(y)])) SetTrace(ctx.metadata.set_trace_count) end for j in (Int16,Int32,Int64,Float16,Float32,Float64,Irrational) - @eval function overdub(ctx::TraceCtx, ::typeof($i), x::SetTrace, y::($j)) - c_val = add_constant(ctx.metadata, y) - node = NodeInfo(COMPARISON, $id, [val(x),c_val]) - add_set_node!(ctx.metadata, node) + @eval function Cassette.overdub(ctx::TraceCtx, ::typeof($i), x::SetTrace, y::($j)) + add_set_node!(ctx.metadata, NodeInfo(COMPARISON, $id, [val(x),add_constant(ctx.metadata, y)])) SetTrace(ctx.metadata.set_trace_count) end - @eval function overdub(ctx::TraceCtx, ::typeof($i), x::($j), y::SetTrace) - c_val = add_constant(ctx.metadata, x) - node = NodeInfo(COMPARISON, $id, [c_val, val(y)]) - add_set_node!(ctx.metadata, node) + @eval function Cassette.overdub(ctx::TraceCtx, ::typeof($i), x::($j), y::SetTrace) + add_set_node!(ctx.metadata, NodeInfo(COMPARISON, $id, [add_constant(ctx.metadata, x), val(y)])) SetTrace(ctx.metadata.set_trace_count) end end - @eval overdub(ctx::TraceCtx, ::typeof($i), x::Real, y::Real) = ($i)(x, y) + @eval Cassette.overdub(ctx::TraceCtx, ::typeof($i), x::Real, y::Real) = ($i)(x, y) end # define primitives for associative terms -overdub(ctx::TraceCtx, ::typeof(*), x, y) = afoldl(x, y) -overdub(ctx::TraceCtx, ::typeof(afoldl), x, y, z) = afoldl(x, y, z) -overdub(ctx::TraceCtx, ::typeof(afoldl), a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p...) = afoldl(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p...) +Cassette.overdub(ctx::TraceCtx, ::typeof(*), x, y) = afoldl(x, y) +Cassette.overdub(ctx::TraceCtx, ::typeof(afoldl), x, y, z) = afoldl(x, y, z) +Cassette.overdub(ctx::TraceCtx, ::typeof(afoldl), a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p...) = afoldl(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p...) # conversion -overdub(ctx::TraceCtx, ::typeof(float), x) = x -overdub(ctx::TraceCtx, ::typeof(AbstractFloat), x) = x +Cassette.overdub(ctx::TraceCtx, ::typeof(float), x) = x +Cassette.overdub(ctx::TraceCtx, ::typeof(AbstractFloat), x) = x # primitive for array access -overdub(ctx::TraceCtx, ::typeof(getindex), A::Array, i::Int) = getindex(A,i) -overdub(ctx::TraceCtx, ::typeof(getindex), A::SetTraceSto, i::Int) = getindex(A,i) +Cassette.overdub(ctx::TraceCtx, ::typeof(getindex), A::Array, i::Int) = getindex(A,i) +Cassette.overdub(ctx::TraceCtx, ::typeof(getindex), A::SetTraceSto, i::Int) = getindex(A,i) -function overdub(ctx::TraceCtx, ::typeof(typeassert), x::Real, type::Type) +function Cassette.overdub(ctx::TraceCtx, ::typeof(typeassert), x::Real, type::Type) if !isa(x,SetTrace) typeassert(x, type) end @@ -160,7 +149,7 @@ function overdub(ctx::TraceCtx, ::typeof(typeassert), x::Real, type::Type) end # prehook for debugging mainly -function prehook(ctx::TraceCtx, f::Function, args...) +function Cassette.prehook(ctx::TraceCtx, f::Function, args...) #println(f, args) end diff --git a/src/eago_semiinfinite/nonconvex_algorithms/sip_res.jl b/src/eago_semiinfinite/nonconvex_algorithms/sip_res.jl index bd5715f8..66e3a506 100644 --- a/src/eago_semiinfinite/nonconvex_algorithms/sip_res.jl +++ b/src/eago_semiinfinite/nonconvex_algorithms/sip_res.jl @@ -97,6 +97,8 @@ function sip_solve!(t::ExtensionType, alg::SIPRes, buffer::SIPSubResult, prob::S buffer.eps_g ./= buffer.r_g end + @show "FINISHED ONE ITERATION..." + # print iteration information and advance print_int!(verb, prob, result, buffer.r_g) result.iteration_number += 1 diff --git a/src/eago_semiinfinite/subproblems.jl b/src/eago_semiinfinite/subproblems.jl index 7fea5422..c645c62a 100644 --- a/src/eago_semiinfinite/subproblems.jl +++ b/src/eago_semiinfinite/subproblems.jl @@ -26,7 +26,7 @@ function build_model(t::DefaultExt, a::A, s::S, p::SIPProblem) where {A <: Abstr return model, v end function build_model(t::ExtensionType, a::A, s::S, p::SIPProblem) where {A <: AbstractSIPAlgo, S <: AbstractSubproblemType} - build_model(DefaultExt(), s, p) + build_model(DefaultExt(), a, s, p) end ### @@ -36,6 +36,8 @@ function set_tolerance_inner!(t::DefaultExt, alg, s, m::JuMP.Model, abs_tol::Flo optimizer_name = JuMP.solver_name(m) if optimizer_name === "EAGO: Easy Advanced Global Optimization" set_optimizer_attribute(m, "absolute_tolerance", abs_tol) + #set_optimizer_attribute(m, "constr_viol_tol", c_tol) + #set_optimizer_attribute(m, "relative_tolerance", rel_tol) elseif optimizer_name === "SCIP" set_optimizer_attribute(m, "limits/absgap", abs_tol) elseif optimizer_name === "Alpine" @@ -88,20 +90,18 @@ function add_uncertainty_constraint!(m::JuMP.Model, prob::SIPProblem) return nothing end -for (p, s) in ((:LowerLevel1,:lbd), (:LowerLevel2, :ubd), (:LowerLevel3,:res)) - @eval function get_xbar(t::DefaultExt, alg::A, s::$p, sr::SIPSubResult) where {A <: AbstractSIPAlgo} - sr.$s.sol - end -end +get_xbar(t::DefaultExt, alg::AbstractSIPAlgo, s::LowerLevel1, sr::SIPSubResult) = sr.lbd.sol +get_xbar(t::DefaultExt, alg::AbstractSIPAlgo, s::LowerLevel2, sr::SIPSubResult) = sr.ubd.sol +get_xbar(t::DefaultExt, alg::AbstractSIPAlgo, s::LowerLevel3, sr::SIPSubResult) = sr.lbd.res function llp_check(islocal::Bool, t::MOI.TerminationStatusCode, r::MOI.ResultStatusCode) - valid, feasible = is_globally_optimal(t, r) + flag = true if islocal && ((t != MOI.LOCALLY_SOLVED) && (t != MOI.ALMOST_LOCALLY_SOLVED)) error("Lower problem did not solve to local optimality.") - elseif !valid + elseif t !== MOI.OPTIMAL error("Error in lower level problem. Termination status = $t, primal status = $r.") end - return feasible + return flag end """ @@ -124,16 +124,15 @@ function sip_llp!(t::DefaultExt, alg::A, s::S, result::SIPResult, g(p...) = cb.gSIP[i](xbar, p) register(m, :g, prob.np, g, autodiff=true) if isone(prob.np) - nl_obj = :(-g($(p[1]))) + nl_obj = :(g($(p[1]))) else nl_obj = Expr(:call) push!(nl_obj.args, :g) for i in 1:prob.np push!(nl_obj.args, p[i]) end - nl_obj = :(-$nl_obj) end - set_NL_objective(m, MOI.MIN_SENSE, nl_obj) + set_NL_objective(m, MOI.MAX_SENSE, nl_obj) # add uncertainty constraints add_uncertainty_constraint!(m, prob) @@ -146,7 +145,7 @@ function sip_llp!(t::DefaultExt, alg::A, s::S, result::SIPResult, # fill buffer with subproblem result info psol = JuMP.value.(p) - load!(s, sr, feas, -JuMP.objective_value(m), -JuMP.objective_bound(m), psol) + load!(s, sr, feas, JuMP.objective_value(m), JuMP.objective_bound(m), psol) result.solution_time += MOI.get(m, MOI.SolveTime()) return nothing @@ -154,7 +153,7 @@ end function sip_llp!(t::ExtensionType, alg::A, s::S, result::SIPResult, sr::SIPSubResult, prob::SIPProblem, cb::SIPCallback, i::Int64) where {A <: AbstractSIPAlgo, S <: AbstractSubproblemType} - sip_llp!(DefaultSubproblem(), s, result, sr, prob, cb, i) + sip_llp!(DefaultSubproblem(), alg, s, result, sr, prob, cb, i) end """ @@ -303,7 +302,7 @@ algorithm `alg::AbstractSIPAlgo` in subproblem `s::AbstractSubproblemType` via t command `get_sip_optimizer(t::ExtensionType, alg::AbstractSIPAlgo, s::AbstractSubproblemType)`. """ function get_sip_optimizer(t::ExtensionType, alg::A, s::AbstractSubproblemType) where {A <: AbstractSIPAlgo} - return get_sip_optimizer(DefaultExt(), s) + return get_sip_optimizer(DefaultExt(), alg, s) end # Printing @@ -391,15 +390,14 @@ end get_bnds(s::Union{LowerLevel1,LowerLevel2,LowerLevel3}, p::SIPProblem) = p.p_l, p.p_u, p.np get_bnds(s::Union{LowerProblem,UpperProblem, ResProblem}, p::SIPProblem) = p.x_l, p.x_u, p.nx -function bnd_check(is_local::Bool, t::MOI.TerminationStatusCode, - r::MOI.ResultStatusCode) - valid_result, is_feasible = is_globally_optimal(t, r) - if !(valid_result && is_feasible) && !is_local +function bnd_check(is_local::Bool, t::MOI.TerminationStatusCode, r::MOI.ResultStatusCode) + if t === MOI.OPTIMAL + return true + elseif !is_local error("Lower problem did not solve to global optimality. Termination status = $t. Primal status = $r") - elseif !(valid_result && is_feasible) && is_local && - !((t == MOI.LOCALLY_SOLVED) || (t == MOI.ALMOST_LOCALLY_SOLVED)) + elseif is_local && !((t == MOI.LOCALLY_SOLVED) || (t == MOI.ALMOST_LOCALLY_SOLVED)) error("Lower problem did not solve to local optimality.") end - return is_feasible + return false end diff --git a/src/eago_semiinfinite/types.jl b/src/eago_semiinfinite/types.jl index 1a30b6ee..f5c4784a 100644 --- a/src/eago_semiinfinite/types.jl +++ b/src/eago_semiinfinite/types.jl @@ -139,13 +139,14 @@ end mutable struct SubProblemInfo sol::Vector{Float64} + res::Vector{Float64} obj_val::Float64 obj_bnd::Float64 feas::Bool tol::Vector{Float64} end function SubProblemInfo(nd::Int, ng::Int, tol::Float64) - SubProblemInfo(zeros(nd), 0.0, 0.0, false, fill(tol, ng)) + SubProblemInfo(zeros(nd), zeros(nd), 0.0, 0.0, false, fill(tol, ng)) end """ diff --git a/src/precompile.jl b/src/precompile.jl new file mode 100644 index 00000000..dd1a9647 --- /dev/null +++ b/src/precompile.jl @@ -0,0 +1,27 @@ +function _precompile_() + ccall(:jl_generating_output, Cint, ()) == 1 || return nothing + Base.precompile(Tuple{typeof(MathOptInterface.optimize!),Optimizer{Incremental{GLPK.Optimizer}, Incremental{Ipopt.Optimizer}, DefaultExt}}) # time: 11.415445 + Base.precompile(Tuple{typeof(reform_epigraph_min!),GlobalOptimizer{Incremental{GLPK.Optimizer}, Incremental{Ipopt.Optimizer}, DefaultExt},ParsedProblem,BufferedNonlinearFunction{1, NS}}) # time: 1.4057246 + Base.precompile(Tuple{typeof(forward_pass!),Evaluator,BufferedNonlinearFunction{2, NS}}) # time: 0.881403 + Base.precompile(Tuple{Type{Optimizer}}) # time: 0.5019401 + Base.precompile(Tuple{typeof(MathOptInterface.empty!),Optimizer{Incremental{GLPK.Optimizer}, Incremental{Ipopt.Optimizer}, DefaultExt}}) # time: 0.0625456 + Base.precompile(Tuple{typeof(initialize!),RelaxCache{1, NS},DirectedTree}) # time: 0.0522575 + Base.precompile(Tuple{Type{BufferedNonlinearFunction},JuMP._FunctionStorage,MathOptInterface.NLPBoundsPair,Dict{Int64, Vector{Int64}},Vector{JuMP._Derivatives.Linearity},OperatorRegistry,Vector{Float64},NS}) # time: 0.030106 + Base.precompile(Tuple{typeof(MathOptInterface.is_empty),Optimizer{Incremental{GLPK.Optimizer}, Incremental{Ipopt.Optimizer}, DefaultExt}}) # time: 0.0280181 + Base.precompile(Tuple{typeof(initialize!),RelaxCache{2, NS},DirectedTree}) # time: 0.0236066 + Base.precompile(Tuple{typeof(_propagate_constraint!),Evaluator,BufferedNonlinearFunction{1, NS}}) # time: 0.0191075 + Base.precompile(Tuple{typeof(rprop!),Relax,Evaluator,BufferedNonlinearFunction{2, NS}}) # time: 0.0188975 + isdefined(EAGO, Symbol("#157#160")) && Base.precompile(Tuple{getfield(EAGO, Symbol("#157#160")),BufferedNonlinearFunction{2, NS}}) # time: 0.0069586 + Base.precompile(Tuple{typeof(relax!),GlobalOptimizer{Incremental{GLPK.Optimizer}, Incremental{Ipopt.Optimizer}, DefaultExt},BufferedNonlinearFunction{1, NS},Int64,Bool}) # time: 0.005456 + Base.precompile(Tuple{typeof(MathOptInterface.add_variable),Optimizer{Incremental{GLPK.Optimizer}, Incremental{Ipopt.Optimizer}, DefaultExt}}) # time: 0.0048171 + Base.precompile(Tuple{typeof(MathOptInterface.add_constraint),Optimizer{Incremental{GLPK.Optimizer}, Incremental{Ipopt.Optimizer}, DefaultExt},MathOptInterface.SingleVariable,MathOptInterface.LessThan{Float64}}) # time: 0.0040936 + Base.precompile(Tuple{typeof(MathOptInterface.add_constraint),Optimizer{Incremental{GLPK.Optimizer}, Incremental{Ipopt.Optimizer}, DefaultExt},MathOptInterface.SingleVariable,MathOptInterface.GreaterThan{Float64}}) # time: 0.0038925 + Base.precompile(Tuple{typeof(MathOptInterface.set),Optimizer{Incremental{GLPK.Optimizer}, Incremental{Ipopt.Optimizer}, DefaultExt},MathOptInterface.RawParameter,Int64}) # time: 0.0033517 + Base.precompile(Tuple{typeof(MathOptInterface.set),Optimizer{Incremental{GLPK.Optimizer}, Incremental{Ipopt.Optimizer}, DefaultExt},MathOptInterface.NLPBlock,MathOptInterface.NLPBlockData}) # time: 0.0032988 + Base.precompile(Tuple{typeof(MathOptInterface.set),Optimizer{Incremental{GLPK.Optimizer}, Incremental{Ipopt.Optimizer}, DefaultExt},MathOptInterface.ObjectiveFunction{MathOptInterface.SingleVariable},MathOptInterface.SingleVariable}) # time: 0.0022907 + Base.precompile(Tuple{Type{RelaxCache{1, NS}}}) # time: 0.0020124 + Base.precompile(Tuple{Type{RelaxCache{2, NS}}}) # time: 0.0017719 + isdefined(EAGO, Symbol("#133#135")) && Base.precompile(Tuple{getfield(EAGO, Symbol("#133#135")),BufferedNonlinearFunction{2, NS}}) # time: 0.0014442 + Base.precompile(Tuple{typeof(MathOptInterface.set),Optimizer{Incremental{GLPK.Optimizer}, Incremental{Ipopt.Optimizer}, DefaultExt},MathOptInterface.ObjectiveSense,MathOptInterface.OptimizationSense}) # time: 0.0012987 + return +end diff --git a/src/subsolvers/cbc.jl b/src/subsolvers/cbc.jl new file mode 100644 index 00000000..3769edcc --- /dev/null +++ b/src/subsolvers/cbc.jl @@ -0,0 +1,9 @@ +_is_incremental(::Type{Cbc.Optimizer}) = false + +function set_default_config!(ext::ExtensionType, d::GlobalOptimizer, m::Incremental{Cbc.Optimizer}, local_solver::Bool) + #MOI.set(m, MOI.RawOptimizerAttribute("allowableGap"), _absolute_tol(d)*1E-2) + #MOI.set(m, MOI.RawOptimizerAttribute("ratioGap"), _absolute_tol(d)*1E-2) + #MOI.set(m, MOI.RawOptimizerAttribute("logLevel"), 0) + #MOI.set(m, MOI.RawOptimizerAttribute("threads"), Threads.nthreads()) + return +end \ No newline at end of file diff --git a/src/subsolvers/clp.jl b/src/subsolvers/clp.jl new file mode 100644 index 00000000..8a3ffedd --- /dev/null +++ b/src/subsolvers/clp.jl @@ -0,0 +1,14 @@ +_is_incremental(::Type{Clp.Optimizer}) = false + +function set_default_config!(ext::ExtensionType, d::GlobalOptimizer, m::Clp.Optimizer, local_solver::Bool) + MOI.set(m, MOI.RawOptimizerAttribute("PrimalTolerance"), _absolute_tol(d)*1E-2) + MOI.set(m, MOI.RawOptimizerAttribute("DualTolerance"), _absolute_tol(d)*1E-2) + MOI.set(m, MOI.RawOptimizerAttribute("DualObjectiveLimit"), 1e308) + MOI.set(m, MOI.RawOptimizerAttribute("MaximumIterations"), 2147483647) + MOI.set(m, MOI.RawOptimizerAttribute("PresolveType"), 0) + MOI.set(m, MOI.RawOptimizerAttribute("SolveType"), 5) + MOI.set(m, MOI.RawOptimizerAttribute("InfeasibleReturn"), 1) + MOI.set(m, MOI.RawOptimizerAttribute("Scaling"), 3) + MOI.set(m, MOI.RawOptimizerAttribute("Perturbation"), 100) + return +end \ No newline at end of file diff --git a/src/subsolvers/cplex.jl b/src/subsolvers/cplex.jl new file mode 100644 index 00000000..1d458057 --- /dev/null +++ b/src/subsolvers/cplex.jl @@ -0,0 +1 @@ +_is_incremental(::Type{CPLEX.Optimizer}) = false \ No newline at end of file diff --git a/src/subsolvers/glpk.jl b/src/subsolvers/glpk.jl new file mode 100644 index 00000000..91ec80cc --- /dev/null +++ b/src/subsolvers/glpk.jl @@ -0,0 +1,5 @@ +_is_incremental(::Type{GLPK.Optimizer}) = true + +function set_default_config!(ext::ExtensionType, d::GlobalOptimizer, m::Incremental{GLPK.Optimizer}, local_solver::Bool) + return +end \ No newline at end of file diff --git a/src/subsolvers/gurobi.jl b/src/subsolvers/gurobi.jl new file mode 100644 index 00000000..2a496800 --- /dev/null +++ b/src/subsolvers/gurobi.jl @@ -0,0 +1 @@ +_is_incremental(::Type{Gurobi.Optimizer}) = true \ No newline at end of file diff --git a/src/subsolvers/hypatia.jl b/src/subsolvers/hypatia.jl new file mode 100644 index 00000000..84ff3b08 --- /dev/null +++ b/src/subsolvers/hypatia.jl @@ -0,0 +1 @@ +_is_incremental(::Type{Hypatia.Optimizer}) = false \ No newline at end of file diff --git a/src/subsolvers/ipopt.jl b/src/subsolvers/ipopt.jl new file mode 100644 index 00000000..e9bff34e --- /dev/null +++ b/src/subsolvers/ipopt.jl @@ -0,0 +1,20 @@ +_is_incremental(::Type{Ipopt.Optimizer}) = true + +function set_default_config!(ext::ExtensionType, d::GlobalOptimizer, m::Incremental{Ipopt.Optimizer}, local_solver::Bool) + c_tol = _constraint_tol(d)*1E-3 + MOI.set(m, MOI.RawOptimizerAttribute("tol"), _absolute_tol(d)*1E-3) + MOI.set(m, MOI.RawOptimizerAttribute("print_level"), 0) + MOI.set(m, MOI.RawOptimizerAttribute("constr_viol_tol"), c_tol) + if local_solver + MOI.set(m, MOI.RawOptimizerAttribute("max_iter"), 20000) + MOI.set(m, MOI.RawOptimizerAttribute("acceptable_tol"), 1E30) + MOI.set(m, MOI.RawOptimizerAttribute("acceptable_iter"), 10000) + MOI.set(m, MOI.RawOptimizerAttribute("acceptable_compl_inf_tol"), c_tol) + MOI.set(m, MOI.RawOptimizerAttribute("acceptable_dual_inf_tol"), 1.0) + MOI.set(m, MOI.RawOptimizerAttribute("acceptable_constr_viol_tol"), c_tol) + else + MOI.set(m, MOI.RawOptimizerAttribute("max_iter"), 1E5) + MOI.set(m, MOI.RawOptimizerAttribute("acceptable_iter"), 1E5+1) + end + return +end \ No newline at end of file diff --git a/src/subsolvers/knitro.jl b/src/subsolvers/knitro.jl new file mode 100644 index 00000000..e69de29b diff --git a/src/subsolvers/mosek.jl b/src/subsolvers/mosek.jl new file mode 100644 index 00000000..e69de29b diff --git a/src/subsolvers/xpress.jl b/src/subsolvers/xpress.jl new file mode 100644 index 00000000..e69de29b diff --git a/test/branch_bound.jl b/test/branch_bound.jl index 4d39d27a..1a3d62d6 100644 --- a/test/branch_bound.jl +++ b/test/branch_bound.jl @@ -1,69 +1,72 @@ @testset "Test Stack Management Functions" begin - x = EAGO.Optimizer() + m = EAGO.Optimizer() + x = m._global_optimizer x._parameters.verbosity = 0 x._branch_variable_count = 2 x._branch_to_sol_map = [1; 2] x._fixed_variable = [false, false] x._parameters.branch_variable = [true, true] - x._working_problem._variable_info = [EAGO.VariableInfo(false,1.0,false,2.0,false,false,EAGO.BRANCH), - EAGO.VariableInfo(false,2.0,false,6.0,false,false,EAGO.BRANCH)] + x._working_problem._variable_info = [EAGO.VariableInfo(false, true, true, false, true, 1.0, 2.0), + EAGO.VariableInfo(false, true, true, false, true, 2.0, 6.0)] x._lower_solution = [1.4, 5.3] - y = EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], -Inf, Inf, 2, 1) + y = EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], Bool[false, false], true, -Inf, Inf, 2, 1,1, EAGO.BD_NONE, 1, 0.1) x._current_node = y EAGO.branch_node!(x) - EAGO.node_selection!(x.ext_type, x) + EAGO.node_selection!(x) @test isapprox(x._current_node.lower_variable_bounds[1], 1.0; atol = 1E-4) - @test isapprox(x._current_node.upper_variable_bounds[1], 1.475; atol = 1E-2) + @test isapprox(x._current_node.upper_variable_bounds[1], 1.45; atol = 1E-2) - EAGO.node_selection!(x.ext_type, x) - @test isapprox(x._current_node.lower_variable_bounds[1], 1.475; atol = 1E-2) + EAGO.node_selection!(x) + @test isapprox(x._current_node.lower_variable_bounds[1], 1.45; atol = 1E-2) @test isapprox(x._current_node.upper_variable_bounds[1], 2.0; atol = 1E-4) x._global_upper_bound = -4.5 empty!(x._stack) - push!(x._stack, EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], -4.0, 1.0, 2, 1)) - push!(x._stack, EAGO.NodeBB(Float64[2.0,5.0], Float64[5.0,6.0], -5.0, 4.0, 2, 1)) - push!(x._stack, EAGO.NodeBB(Float64[2.0,3.0], Float64[4.0,5.0], -2.0, 3.0, 2, 1)) + push!(x._stack, EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], Bool[false, false], true, -4.0, 1.0, 2, 1, 1, EAGO.BD_NONE, 1, 0.1)) + push!(x._stack, EAGO.NodeBB(Float64[2.0,5.0], Float64[5.0,6.0], Bool[false, false], true, -5.0, 4.0, 2, 1, 1, EAGO.BD_NONE, 1, 0.1)) + push!(x._stack, EAGO.NodeBB(Float64[2.0,3.0], Float64[4.0,5.0], Bool[false, false], true, -2.0, 3.0, 2, 1, 1, EAGO.BD_NONE, 1, 0.1)) EAGO.node_selection!(x) @test x._current_node.lower_bound == -5.0 x._global_upper_bound = -4.5 - push!(x._stack, EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], -4.0, 1.0, 2, 1)) - push!(x._stack, EAGO.NodeBB(Float64[2.0,5.0], Float64[5.0,6.0], -5.0, 4.0, 2, 1)) - push!(x._stack, EAGO.NodeBB(Float64[2.0,3.0], Float64[4.0,5.0], -2.0, 3.0, 2, 1)) + push!(x._stack, EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], Bool[false, false], true, -4.0, 1.0, 2, 1, 1, EAGO.BD_NONE, 1, 0.1)) + push!(x._stack, EAGO.NodeBB(Float64[2.0,5.0], Float64[5.0,6.0], Bool[false, false], true, -5.0, 4.0, 2, 1, 1, EAGO.BD_NONE, 1, 0.1)) + push!(x._stack, EAGO.NodeBB(Float64[2.0,3.0], Float64[4.0,5.0], Bool[false, false], true, -2.0, 3.0, 2, 1, 1, EAGO.BD_NONE, 1, 0.1)) EAGO.set_global_lower_bound!(x) @test x._global_lower_bound == -5.0 empty!(x._stack) x._global_upper_bound = -4.5 - push!(x._stack, EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], -4.0, 1.0, 2, 1)) - push!(x._stack, EAGO.NodeBB(Float64[2.0,5.0], Float64[5.0,6.0], -5.0, 4.0, 2, 1)) - push!(x._stack, EAGO.NodeBB(Float64[2.0,3.0], Float64[4.0,5.0], -2.0, 3.0, 2, 1)) + push!(x._stack, EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], Bool[false, false], true, -4.0, 1.0, 2, 1, 1, EAGO.BD_NONE, 1, 0.1)) + push!(x._stack, EAGO.NodeBB(Float64[2.0,5.0], Float64[5.0,6.0], Bool[false, false], true, -5.0, 4.0, 2, 1, 1, EAGO.BD_NONE, 1, 0.1)) + push!(x._stack, EAGO.NodeBB(Float64[2.0,3.0], Float64[4.0,5.0], Bool[false, false], true, -2.0, 3.0, 2, 1, 1, EAGO.BD_NONE, 1, 0.1)) EAGO.fathom!(x) @test length(x._stack) == 1 @inferred EAGO.branch_node!(x) - @inferred EAGO.node_selection!(x.ext_type, x) + @inferred EAGO.node_selection!(EAGO._ext(x), x) @inferred EAGO.set_global_lower_bound!(x) @inferred EAGO.fathom!(x) - @test ~isless(EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], -4.0, 1.0, 2, 1), EAGO.NodeBB(Float64[2.0,5.0], Float64[5.0,6.0], -5.0, 4.0, 2, 1)) - @test length(EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], -4.0, 1.0, 2, 1)) == 2 - diamx = EAGO.diam(EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,7.0], -4.0, 1.0, 2, 1)) + @test ~isless(EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], Bool[false, false], true, -4.0, 1.0, 2, 1, 1, EAGO.BD_NONE, 1, 0.1), + EAGO.NodeBB(Float64[2.0,5.0], Float64[5.0,6.0], Bool[false, false], true, -5.0, 4.0, 2, 1, 1, EAGO.BD_NONE, 1, 0.1)) + @test length(EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], Bool[false, false], true, -4.0, 1.0, 2, 1, 1, EAGO.BD_NONE, 1, 0.1)) == 2 + diamx = EAGO.diam(EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,7.0], Bool[false, false], true, -4.0, 1.0, 2, 1, 1, EAGO.BD_NONE, 1, 0.1)) @test diamx[1] == 1.0 @test diamx[2] == 2.0 #create_initial_node!(x) end @testset "Test B&B Checks" begin - x = EAGO.Optimizer() + m = EAGO.Optimizer() + x = m._global_optimizer x._parameters.verbosity = 0 x._branch_variable_count = 2 - x._working_problem._variable_info = [EAGO.VariableInfo(false,1.0,false,2.0,false,false,EAGO.BRANCH), - EAGO.VariableInfo(false,2.0,false,6.0,false,false,EAGO.BRANCH)] - y = EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], -Inf, Inf, 2, 1) + x._working_problem._variable_info = [EAGO.VariableInfo(false, true, true, false, true, 1.0, 2.0), + EAGO.VariableInfo(false, true, true, false, true, 2.0, 6.0)] + y = EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], Bool[false, false], true, -Inf, Inf, 2, 1, 1, EAGO.BD_NONE, 1, 0.1) @test EAGO.repeat_check(EAGO.DefaultExt(), x) == false @@ -102,32 +105,11 @@ end x._run_time = 100.0 x._parameters.time_limit = 10.0 @test EAGO.termination_check(EAGO.DefaultExt(),x) - @test x._termination_status_code === MOI.TIME_LIMIT - - @test @inferred EAGO.is_feasible_solution(MOI.OPTIMAL, MOI.FEASIBLE_POINT) - @test EAGO.is_feasible_solution(MOI.LOCALLY_SOLVED, MOI.FEASIBLE_POINT) - @test EAGO.is_feasible_solution(MOI.ALMOST_LOCALLY_SOLVED, MOI.NEARLY_FEASIBLE_POINT) - @test ~EAGO.is_feasible_solution(MOI.INFEASIBLE, MOI.FEASIBLE_POINT) - - valid, feas = @inferred EAGO.is_globally_optimal(MOI.INFEASIBLE, MOI.INFEASIBILITY_CERTIFICATE) - @test (valid & ~feas) - valid, feas = EAGO.is_globally_optimal(MOI.INFEASIBLE, MOI.INFEASIBILITY_CERTIFICATE) - @test (valid & ~feas) - valid, feas = EAGO.is_globally_optimal(MOI.INFEASIBLE, MOI.NO_SOLUTION) - @test (valid & ~feas) - valid, feas = EAGO.is_globally_optimal(MOI.INFEASIBLE, MOI.UNKNOWN_RESULT_STATUS) - @test (valid & ~feas) - valid, feas = EAGO.is_globally_optimal(MOI.OPTIMAL, MOI.FEASIBLE_POINT) - @test (valid & feas) - valid, feas = EAGO.is_globally_optimal(MOI.INFEASIBLE_OR_UNBOUNDED, MOI.NO_SOLUTION) - @test (valid & ~feas) - valid, feas = EAGO.is_globally_optimal(MOI.SLOW_PROGRESS, MOI.REDUCTION_CERTIFICATE) - @test ~valid end @testset "Node Access Functions" begin - x = EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], -3.4, 2.1, 2, 1) - emptyx = EAGO.NodeBB(Float64[7.0,5.0], Float64[1.0,6.0], -3.4, 2.1, 2, 1) + x = EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], Bool[false, false], true, -3.4, 2.1, 2, 1, 1, EAGO.BD_NONE, 1, 0.1) + emptyx = EAGO.NodeBB(Float64[7.0,5.0], Float64[1.0,6.0], Bool[false, false], true, -3.4, 2.1, 2, 1, 1, EAGO.BD_NONE, 1, 0.1) @test isempty(emptyx) @@ -143,8 +125,8 @@ end @test_nowarn @inferred EAGO.upper_bound(x) @test_nowarn @inferred EAGO.depth(x) - x1 = EAGO.NodeBB(Float64[1.0,5.0], Float64[3.0,6.0], -3.4, 2.1, 2, 1) - x2 = EAGO.NodeBB(Float64[0.9,5.0], Float64[2.0,6.0], -3.4, 2.1, 2, 1) + x1 = EAGO.NodeBB(Float64[1.0,5.0], Float64[3.0,6.0], Bool[false, false], true, -3.4, 2.1, 2, 1, 1, EAGO.BD_NONE, 1, 0.1) + x2 = EAGO.NodeBB(Float64[0.9,5.0], Float64[2.0,6.0], Bool[false, false], true, -3.4, 2.1, 2, 1, 1, EAGO.BD_NONE, 1, 0.1) x3 = copy(x) x4 = EAGO.NodeBB(x) @test ~EAGO.same_box(x, x1, 0.0) diff --git a/test/domain_reduction.jl b/test/domain_reduction.jl index 021f2528..ac8e86f3 100644 --- a/test/domain_reduction.jl +++ b/test/domain_reduction.jl @@ -4,8 +4,8 @@ yupper = Float64[4.0, 4.0, 4.0, 4.0] ymult_lo = Float64[50.0, 0.0, 1.0, 0.0] ymult_hi = Float64[0.0, 0.0, 0.8, 3.0] - - n = EAGO.NodeBB(ylower, yupper, -Inf, Inf, 3, 2) + isint = Bool[false, false] + n = EAGO.NodeBB(ylower, yupper, isint, true, -Inf, Inf, 2, 1, 1, EAGO.BD_NONE, 1, 0.1) @inferred EAGO.variable_dbbt!(n, ymult_lo, ymult_hi, 1.0, 3.0, 4) lvb = n.lower_variable_bounds uvb = n.upper_variable_bounds diff --git a/test/minlp_tests.jl b/test/minlp_tests.jl new file mode 100644 index 00000000..0d246b94 --- /dev/null +++ b/test/minlp_tests.jl @@ -0,0 +1,109 @@ +using MINLPTests + +solver = JuMP.optimizer_with_attributes(EAGO.Optimizer, "relative_tolerance" => 1E-9) + +minlp_nlp_exclude = String[ + "001_010", # Unbounded box, check solution bad if not gradient-based.... + "002_010", # Unbounded box + "004_010", # Unbounded box + "004_011", # Unbounded box + "005_010", # Unbounded box + "006_010", # Unbounded box + "007_010", # Unbounded box + "008_010", # Unbounded box + "008_011", # Unbounded box + "005_011" # \ operator not in JuMP +] +MINLPTests.test_nlp(solver, exclude = minlp_nlp_exclude, + objective_tol = 1E-3, + #primal_tol = PRIMAL_TOL, + dual_tol = NaN, + termination_target = MINLPTests.TERMINATION_TARGET_GLOBAL, + primal_target = MINLPTests.PRIMAL_TARGET_GLOBAL) + +minlp_nlp_cvx_exclude = String[ + + "001_011", # convex quadratic objective... (linear unbounded...) + "002_011", # unbounded linear problem & convex quadratic objective + "101_010", # convex quadratic nl constraints... + "101_011", + "101_012", + "102_010", + "102_011", + "102_012", + "102_013", + "102_014", + "103_010", + "103_011", + "103_012", + "103_013", + "103_014", + "104_010", + "105_010", + "105_011", + "105_012", + "105_013", + "106_010", # simple bounded domain + "106_011", # + "107_010", + "107_011", + "107_012", + "108_010", + "108_011", + "108_012", + "108_013", + "109_010", + "109_011", + "109_012", + "110_010", + "110_011", + "110_012", + "201_010", + "201_011", + "202_010", + "202_011", + "202_012", + "202_013", + "202_014", + "203_010", + "204_010", + "205_010", + "206_010", + "210_010", + "210_011", + "210_012", + "501_010", + "501_011" +] +MINLPTests.test_nlp_cvx(solver, exclude = minlp_nlp_cvx_exclude, + objective_tol = 1E-3, + #primal_tol = PRIMAL_TOL, + dual_tol = NaN, + termination_target = MINLPTests.TERMINATION_TARGET_GLOBAL, + primal_target = MINLPTests.PRIMAL_TARGET_GLOBAL) + +minlp_nlp_mi_exclude = String[ + "001_010", # no box constraints + "002_010", + + #"003_010", # TODO: Fix 003_010 - 003_016 + "003_011", # FAIL + "003_012", # FAIL + "003_013", # FAIL + "003_014", # FAIL Never converges... + "003_015", #FAIL + "003_016", + + "004_010", + "004_011", + "004_012", + "005_010", + "005_011", # \ operator not in JuMP + "006_010", + "007_010", + #"007_020" # no way of specifying +] +MINLPTests.test_nlp_mi(solver, exclude = minlp_nlp_mi_exclude, +objective_tol = 1E-3, +termination_target = MINLPTests.TERMINATION_TARGET_GLOBAL, +primal_target = MINLPTests.PRIMAL_TARGET_GLOBAL) \ No newline at end of file diff --git a/test/moit_tests.jl b/test/moit_tests.jl new file mode 100644 index 00000000..96ded055 --- /dev/null +++ b/test/moit_tests.jl @@ -0,0 +1,218 @@ +#= +const unit_excludes = String[ + "number_threads", # EAGO won't support number of threads in near future + "raw_status_string", # TODO: ADD internal status states to EAGO + "solve_unbounded_model", # CBC returns infeasible or unbounded for linear... + "solve_zero_one_with_bounds_3", # GLPK has a non-standard return code + "solve_result_index", # TODO: Should throw error when querying for multiple results... (expected behavior?) + "solve_qcp_edge_cases", # Not box constrained NLP type problems... + "solve_qp_zero_offdiag", + "solve_qp_edge_cases", + "solve_affine_deletion_edge_cases" # TODO: Fix this +] + +const contlinear_excludes = String[ + "partial_start", # EAGO doesn't support VariablePrimalStart + "linear1", # TODO: Fix this + + #= + "linear13", + "linear8a", + "linear14", + "linear6", + "linear4", + "linear3", + "linear9", + "linear8c", + "linear2", + "linear12", + "linear7", + "linear8b", + "linear10b", + "linear10", + "linear15", + "linear5", + "linear11" + =# +] + +const intlinear_excludes = String[ + "indicator1", # doesn't currently support indicator sets + "indicator2", # can't check using Cbc until https://github.com/jump-dev/Cbc.jl/issues/151 is resolved + "indicator3", + "indicator4", + + "int2", # currently doesn't support sos1 or sos2 constraints + "int3", # TODO: Fix this + + # Passing + #"semiinttest", + #"semiconttest", + #"int1", + #"knapsack" +] + +const contconic_excludes = String[ + "dualexp", # Not directly bridged to common cones + "dualpow", + "logdet", + "rootdet", + "sdp", + "normnuc", + "exp", + "soc", + "normspec", + "relentr", + "rsoc", + "pow", + "geomean" +] + +const contquadratic_excludes = String[ + "ncqcp", + "qp", + "socp", + "qcp", +] + +function test_moi(T::Type{<:Real}; solver_options...) + + optimizer = MOIU.CachingOptimizer(MOIU.UniversalFallback(MOIU.Model{T}()), EAGO.Optimizer()) + MOI.set(optimizer, MOI.RawParameter("verbosity"), 0) + + tol = 2sqrt(sqrt(eps(T))) + config = MOIT.Config(T; + atol = tol, + rtol = tol, + #solve = true, + #query = true, + #modify_lhs = false, + #duals = false, + #infeas_certificates = false, + ) + + @testset "unit tests" begin + MOIT.unittest(MOIB.full_bridge_optimizer(optimizer, T), config, unit_excludes) + end + + @testset "continuous linear tests" begin + MOIT.contlineartest(MOIB.full_bridge_optimizer(optimizer, T), config, contlinear_excludes) + end + + @testset "mixed-integer linear tests" begin + MOIT.intlineartest(MOIB.full_bridge_optimizer(optimizer, T), config, intlinear_excludes) + end + + @testset "continuous conic tests" begin + MOIT.contconictest(MOIB.full_bridge_optimizer(optimizer, T), config, contconic_excludes) + end + + @testset "continuous quadratic tests" begin + MOIT.contquadratictest(MOIB.full_bridge_optimizer(optimizer, T), config, contquadratic_excludes) + end +end + +# Test with mip_solver = Cbc as it supports SOS1 & SOS2 constraints +# TODO: Use bridges for SOS1 & SOS2 constraint if unsupported +# Need to test with GLPK as well to ensure subsolver supports constraint +# coefficient modification. +test_moi(Float64) +=# + +module TestEAGO + +import EAGO +using MathOptInterface +using Test + +const MOI = MathOptInterface +const OPTIMIZER = MOI.instantiate(MOI.OptimizerWithAttributes(EAGO.Optimizer, MOI.Silent() => true)) +const BRIDGED = MOI.instantiate(MOI.OptimizerWithAttributes(EAGO.Optimizer, MOI.Silent() => true), with_bridge_type = Float64) +const CONFIG = MOI.Test.Config(atol = 1e-3, rtol = 1e-3, optimal_status = MOI.OPTIMAL, + exclude = Any[MOI.DualObjectiveValue, MOI.ConstraintBasisStatus, MOI.VariableName, MOI.ConstraintName, MOI.delete, + MOI.ConstraintDual, MOI.ListOfModelAttributesSet, MOI.add_constrained_variables]) + +""" + runtests() + +This function runs all functions in the this Module starting with `test_`. +""" +function runtests() + for name in names(@__MODULE__; all = true) + if startswith("$(name)", "test_") + @testset "$(name)" begin + getfield(@__MODULE__, name)() + end + end + end +end + +""" + test_runtests() + +This function runs all the tests in MathOptInterface.Test. + +Pass arguments to `exclude` to skip tests for functionality that is not +implemented or that your solver doesn't support. +""" +function test_runtests() + MOI.Test.runtests(BRIDGED, CONFIG, + exclude = [# IPOPT Inherited test exclusions + "test_model_ScalarFunctionConstantNotZero", + "test_solve_TerminationStatus_DUAL_INFEASIBLE", + "test_linear_VectorAffineFunction_empty_row", + "test_solve_DualStatus_INFEASIBILITY_CERTIFICATE_", + "test_model_LowerBoundAlreadySet", + "test_model_UpperBoundAlreadySet", + "test_model_copy_to_UnsupportedAttribute", + "test_model_copy_to_UnsupportedConstraint", + "test_objective_set_via_modify", + "test_model_ModelFilter_ListOfConstraintIndices", + "test_model_ModelFilter_ListOfConstraintTypesPresent", + # Cbc default test exlucisons + "_Indicator_", + "test_linear_SOS1_integration", + "test_linear_SOS2_integration", + "test_solve_SOS2_add_and_delete", + "test_conic_NormInfinityCone_INFEASIBLE", + "test_conic_NormOneCone_INFEASIBLE", + "test_solve_TerminationStatus_DUAL_INFEASIBLE", + + # EAGO test exclusions + "test_attribute_NumberOfThreads", + "test_modification_", + "test_linear_integration_delete_variables", + "conic_NormOneCone_VectorAffineFunction", + "conic_NormOneCone_VectorOfVariables", + "conic_NormInfinityCone_VectorOfVariables", + "conic_NormInfinityCone_VectorAffineFunction", + "linear_integer_solve_twice", + "linear_integration", + "conic_linear_VectorAffineFunction", + "conic_linear_VectorAffineFunction_2", + + "test_quadratic_SecondOrderCone_basic", + + "test_quadratic_constraint_GreaterThan", + "test_quadratic_constraint_LessThan", + "test_quadratic_constraint_basic", + "test_quadratic_constraint_minimize", + "test_quadratic_duplicate_terms", + "test_quadratic_integration", + "test_quadratic_nonconvex_constraint_integration", + "test_quadratic_nonhomogeneous", + + "test_constraint_qcp_duplicate_diagonal", + "test_constraint_qcp_duplicate_off_diagonal", + "test_objective_get_ObjectiveFunction_ScalarAffineFunction", + "test_objective_qp_ObjectiveFunction_edge_cases", + "test_objective_qp_ObjectiveFunction_zero_ofdiag", + ], + exclude_tests_after = v"0.10.5") +end + +end + +@testset "MOI" begin + TestEAGO.runtests() +end \ No newline at end of file diff --git a/test/optimizer.jl b/test/optimizer.jl index dc5f5e04..43a5b6fa 100644 --- a/test/optimizer.jl +++ b/test/optimizer.jl @@ -10,7 +10,7 @@ end m = EAGO.Optimizer() @test MOI.get(m, MOI.SolverName()) === "EAGO: Easy Advanced Global Optimization" - m._maximum_node_id = 55 + m._node_count = 55 @test MOI.get(m, MOI.NodeCount()) === 55 m._result_status_code = MOI.FEASIBLE_POINT @@ -19,18 +19,9 @@ end m._result_status_code = MOI.OTHER_RESULT_STATUS @test MOI.get(m, MOI.ResultCount()) === 0 - m._global_lower_bound = 4.0 - m._global_upper_bound = 6.0 - m._input_problem._optimization_sense = MOI.MIN_SENSE - @test isapprox(MOI.get(m, MOI.RelativeGap()), 0.33333333, atol=1E-5) - - m._input_problem._optimization_sense = MOI.MAX_SENSE - @test MOI.get(m, MOI.RelativeGap()) === 0.5 - @test MOI.get(m, MOI.ObjectiveBound()) === -4.0 - m._parameters.verbosity = 2 m._parameters.log_on = true - MOI.set(m, MOI.Silent(), 1) + MOI.set(m, MOI.Silent(), true) @test m._parameters.verbosity === 0 @test m._parameters.log_on === false @@ -84,59 +75,42 @@ end @testset "Variable Bounds" begin model = EAGO.Optimizer() - @test MOI.supports_constraint(model, MOI.SingleVariable, MOI.LessThan{Float64}) - @test MOI.supports_constraint(model, MOI.SingleVariable, MOI.GreaterThan{Float64}) - @test MOI.supports_constraint(model, MOI.SingleVariable, MOI.EqualTo{Float64}) + @test MOI.supports_constraint(model, MOI.VariableIndex, MOI.LessThan{Float64}) + @test MOI.supports_constraint(model, MOI.VariableIndex, MOI.GreaterThan{Float64}) + @test MOI.supports_constraint(model, MOI.VariableIndex, MOI.EqualTo{Float64}) x = MOI.add_variables(model,3) z = MOI.add_variable(model) - @inferred MOI.add_constraint(model, MOI.SingleVariable(x[1]), MOI.GreaterThan(-1.0)) - @inferred MOI.add_constraint(model, MOI.SingleVariable(x[2]), MOI.LessThan(-1.0)) - @inferred MOI.add_constraint(model, MOI.SingleVariable(x[3]), MOI.EqualTo(2.0)) - - @test model._input_problem._variable_info[1].is_integer == false - @test model._input_problem._variable_info[1].lower_bound == -1.0 - @test model._input_problem._variable_info[1].has_lower_bound == true - @test model._input_problem._variable_info[1].upper_bound == Inf - @test model._input_problem._variable_info[1].has_upper_bound == false - @test model._input_problem._variable_info[1].is_fixed == false - - @test model._input_problem._variable_info[2].is_integer == false - @test model._input_problem._variable_info[2].lower_bound == -Inf - @test model._input_problem._variable_info[2].has_lower_bound == false - @test model._input_problem._variable_info[2].upper_bound == -1.0 - @test model._input_problem._variable_info[2].has_upper_bound == true - @test model._input_problem._variable_info[2].is_fixed == false - - @test model._input_problem._variable_info[3].is_integer == false - @test model._input_problem._variable_info[3].lower_bound == 2.0 - @test model._input_problem._variable_info[3].has_lower_bound == true - @test model._input_problem._variable_info[3].upper_bound == 2.0 - @test model._input_problem._variable_info[3].has_upper_bound == true - @test model._input_problem._variable_info[3].is_fixed == true - - @test_throws ErrorException MOI.add_constraint(model, MOI.SingleVariable(x[1]), MOI.GreaterThan(NaN)) - @test_throws ErrorException MOI.add_constraint(model, MOI.SingleVariable(x[1]), MOI.LessThan(NaN)) - - @test_throws ErrorException MOI.add_constraint(model, MOI.SingleVariable(x[1]), MOI.GreaterThan(-3.5)) - @test_throws ErrorException MOI.add_constraint(model, MOI.SingleVariable(x[1]), MOI.EqualTo(-3.5)) - #@test_throws ErrorException MOI.add_constraint(model, MOI.SingleVariable(x[1]), MOI.ZeroOne()) - - @test_throws ErrorException MOI.add_constraint(model, MOI.SingleVariable(x[2]), MOI.LessThan(-3.5)) - @test_throws ErrorException MOI.add_constraint(model, MOI.SingleVariable(x[2]), MOI.EqualTo(-3.5)) - #@test_throws ErrorException MOI.add_constraint(model, MOI.SingleVariable(x[2]), MOI.ZeroOne()) - - @test_throws ErrorException MOI.add_constraint(model, MOI.SingleVariable(x[3]), MOI.GreaterThan(-3.5)) - @test_throws ErrorException MOI.add_constraint(model, MOI.SingleVariable(x[3]), MOI.LessThan(-3.5)) - @test_throws ErrorException MOI.add_constraint(model, MOI.SingleVariable(x[3]), MOI.EqualTo(-3.5)) - #@test_throws ErrorException MOI.add_constraint(model, MOI.SingleVariable(x[3]), MOI.ZeroOne()) - - #MOI.add_constraint(model, MOI.SingleVariable(z), MOI.ZeroOne()) - #@test is_integer(model, 4) - - @test EAGO.lower_bound(EAGO.VariableInfo(false,2.0,false,6.0,false,false,EAGO.BRANCH)) == 2.0 - @test EAGO.upper_bound(EAGO.VariableInfo(false,2.0,false,6.0,false,false,EAGO.BRANCH)) == 6.0 + ci1 = @inferred MOI.add_constraint(model, x[1], MOI.GreaterThan(-1.0)) + ci2 = @inferred MOI.add_constraint(model, x[2], MOI.LessThan(-1.0)) + ci3 = @inferred MOI.add_constraint(model, x[3], MOI.EqualTo(2.0)) + + @test model._input_problem._vi_geq_constraints[ci1][1] == x[1] + @test model._input_problem._vi_geq_constraints[ci1][2] == MOI.GreaterThan(-1.0) + + @test model._input_problem._vi_leq_constraints[ci2][1] == x[2] + @test model._input_problem._vi_leq_constraints[ci2][2] == MOI.LessThan(-1.0) + + @test model._input_problem._vi_eq_constraints[ci3][1] == x[3] + @test model._input_problem._vi_eq_constraints[ci3][2] == MOI.EqualTo(2.0) + + + @test_nowarn MOI.add_constraint(model, x[1], MOI.GreaterThan(NaN)) + @test_nowarn MOI.add_constraint(model, x[1], MOI.LessThan(NaN)) + + @test_nowarn MOI.add_constraint(model, x[1], MOI.GreaterThan(-3.5)) + @test_nowarn MOI.add_constraint(model, x[1], MOI.EqualTo(-3.5)) + @test_nowarn MOI.add_constraint(model, x[1], MOI.ZeroOne()) + + @test_nowarn MOI.add_constraint(model, x[2], MOI.LessThan(-3.5)) + @test_nowarn MOI.add_constraint(model, x[2], MOI.EqualTo(-3.5)) + @test_nowarn MOI.add_constraint(model, x[2], MOI.ZeroOne()) + + @test_nowarn MOI.add_constraint(model, x[3], MOI.GreaterThan(-3.5)) + @test_nowarn MOI.add_constraint(model, x[3], MOI.LessThan(-3.5)) + @test_nowarn MOI.add_constraint(model, x[3], MOI.EqualTo(-3.5)) + @test_nowarn MOI.add_constraint(model, x[3], MOI.ZeroOne()) end @testset "Add Linear Constraint " begin @@ -157,28 +131,31 @@ end set2 = MOI.GreaterThan{Float64}(2.0) set3 = MOI.EqualTo{Float64}(3.0) - @inferred MOI.add_constraint(model, func1, set1) - @inferred MOI.add_constraint(model, func2, set2) - @inferred MOI.add_constraint(model, func3, set3) - - @test model._input_problem._linear_leq_constraints[1][1].constant == 2.0 - @test model._input_problem._linear_geq_constraints[1][1].constant == 2.1 - @test model._input_problem._linear_eq_constraints[1][1].constant == 2.2 - @test model._input_problem._linear_leq_constraints[1][1].terms[1].coefficient == 5.0 - @test model._input_problem._linear_geq_constraints[1][1].terms[1].coefficient == 4.0 - @test model._input_problem._linear_eq_constraints[1][1].terms[1].coefficient == 3.0 - @test model._input_problem._linear_leq_constraints[1][1].terms[2].coefficient == -2.3 - @test model._input_problem._linear_geq_constraints[1][1].terms[2].coefficient == -2.2 - @test model._input_problem._linear_eq_constraints[1][1].terms[2].coefficient == -3.3 - @test model._input_problem._linear_leq_constraints[1][1].terms[1].variable_index.value == 1 - @test model._input_problem._linear_geq_constraints[1][1].terms[1].variable_index.value == 2 - @test model._input_problem._linear_eq_constraints[1][1].terms[1].variable_index.value == 1 - @test model._input_problem._linear_leq_constraints[1][1].terms[2].variable_index.value == 2 - @test model._input_problem._linear_geq_constraints[1][1].terms[2].variable_index.value == 3 - @test model._input_problem._linear_eq_constraints[1][1].terms[2].variable_index.value == 3 - @test MOI.LessThan{Float64}(1.0) == model._input_problem._linear_leq_constraints[1][2] - @test MOI.GreaterThan{Float64}(2.0) == model._input_problem._linear_geq_constraints[1][2] - @test MOI.EqualTo{Float64}(3.0) == model._input_problem._linear_eq_constraints[1][2] + ci1 = @inferred MOI.add_constraint(model, func1, set1) + ci2 = @inferred MOI.add_constraint(model, func2, set2) + ci3 = @inferred MOI.add_constraint(model, func3, set3) + + @test model._input_problem._linear_leq_constraints[ci1][1].constant == 2.0 + @test model._input_problem._linear_leq_constraints[ci1][1].terms[1].coefficient == 5.0 + @test model._input_problem._linear_leq_constraints[ci1][1].terms[2].coefficient == -2.3 + @test model._input_problem._linear_leq_constraints[ci1][1].terms[1].variable.value == 1 + @test model._input_problem._linear_leq_constraints[ci1][1].terms[2].variable.value == 2 + + @test model._input_problem._linear_geq_constraints[ci2][1].constant == 2.1 + @test model._input_problem._linear_geq_constraints[ci2][1].terms[1].coefficient == 4.0 + @test model._input_problem._linear_geq_constraints[ci2][1].terms[2].coefficient == -2.2 + @test model._input_problem._linear_geq_constraints[ci2][1].terms[1].variable.value == 2 + @test model._input_problem._linear_geq_constraints[ci2][1].terms[2].variable.value == 3 + + @test model._input_problem._linear_eq_constraints[ci3][1].constant == 2.2 + @test model._input_problem._linear_eq_constraints[ci3][1].terms[1].coefficient == 3.0 + @test model._input_problem._linear_eq_constraints[ci3][1].terms[2].coefficient == -3.3 + @test model._input_problem._linear_eq_constraints[ci3][1].terms[1].variable.value == 1 + @test model._input_problem._linear_eq_constraints[ci3][1].terms[2].variable.value == 3 + + @test MOI.LessThan{Float64}(1.0) == model._input_problem._linear_leq_constraints[ci1][2] + @test MOI.GreaterThan{Float64}(2.0) == model._input_problem._linear_geq_constraints[ci2][2] + @test MOI.EqualTo{Float64}(3.0) == model._input_problem._linear_eq_constraints[ci3][2] end @testset "Add Quadratic Constraint " begin @@ -191,42 +168,45 @@ end x = MOI.add_variables(model,3) - func1 = MOI.ScalarQuadraticFunction{Float64}([MOI.ScalarAffineTerm{Float64}(5.0,x[1])], - [MOI.ScalarQuadraticTerm{Float64}(2.5,x[2],x[2])],2.0) - func2 = MOI.ScalarQuadraticFunction{Float64}([MOI.ScalarAffineTerm{Float64}(4.0,x[2])], - [MOI.ScalarQuadraticTerm{Float64}(2.2,x[1],x[2])],2.1) - func3 = MOI.ScalarQuadraticFunction{Float64}([MOI.ScalarAffineTerm{Float64}(3.0,x[3])], - [MOI.ScalarQuadraticTerm{Float64}(2.1,x[1],x[1])],2.2) + func1 = MOI.ScalarQuadraticFunction{Float64}([MOI.ScalarQuadraticTerm{Float64}(2.5,x[2],x[2])], + [MOI.ScalarAffineTerm{Float64}(5.0,x[1])], 2.0) + func2 = MOI.ScalarQuadraticFunction{Float64}([MOI.ScalarQuadraticTerm{Float64}(2.2,x[1],x[2])], + [MOI.ScalarAffineTerm{Float64}(4.0,x[2])], 2.1) + func3 = MOI.ScalarQuadraticFunction{Float64}([MOI.ScalarQuadraticTerm{Float64}(2.1,x[1],x[1])], + [MOI.ScalarAffineTerm{Float64}(3.0,x[3])], 2.2) set1 = MOI.LessThan{Float64}(1.0) set2 = MOI.GreaterThan{Float64}(2.0) set3 = MOI.EqualTo{Float64}(3.0) - @inferred MOI.add_constraint(model, func1, set1) - @inferred MOI.add_constraint(model, func2, set2) - @inferred MOI.add_constraint(model, func3, set3) - - @test model._input_problem._quadratic_leq_constraints[1][1].constant == 2.0 - @test model._input_problem._quadratic_geq_constraints[1][1].constant == 2.1 - @test model._input_problem._quadratic_eq_constraints[1][1].constant == 2.2 - @test model._input_problem._quadratic_leq_constraints[1][1].quadratic_terms[1].coefficient == 2.5 - @test model._input_problem._quadratic_geq_constraints[1][1].quadratic_terms[1].coefficient == 2.2 - @test model._input_problem._quadratic_eq_constraints[1][1].quadratic_terms[1].coefficient == 2.1 - @test model._input_problem._quadratic_leq_constraints[1][1].affine_terms[1].coefficient == 5.0 - @test model._input_problem._quadratic_geq_constraints[1][1].affine_terms[1].coefficient == 4.0 - @test model._input_problem._quadratic_eq_constraints[1][1].affine_terms[1].coefficient == 3.0 - @test model._input_problem._quadratic_leq_constraints[1][1].quadratic_terms[1].variable_index_1.value == 2 - @test model._input_problem._quadratic_geq_constraints[1][1].quadratic_terms[1].variable_index_1.value == 1 - @test model._input_problem._quadratic_eq_constraints[1][1].quadratic_terms[1].variable_index_1.value == 1 - @test model._input_problem._quadratic_leq_constraints[1][1].quadratic_terms[1].variable_index_2.value == 2 - @test model._input_problem._quadratic_geq_constraints[1][1].quadratic_terms[1].variable_index_2.value == 2 - @test model._input_problem._quadratic_eq_constraints[1][1].quadratic_terms[1].variable_index_2.value == 1 - @test model._input_problem._quadratic_leq_constraints[1][1].affine_terms[1].variable_index.value == 1 - @test model._input_problem._quadratic_geq_constraints[1][1].affine_terms[1].variable_index.value == 2 - @test model._input_problem._quadratic_eq_constraints[1][1].affine_terms[1].variable_index.value == 3 - @test MOI.LessThan{Float64}(1.0) == model._input_problem._quadratic_leq_constraints[1][2] - @test MOI.GreaterThan{Float64}(2.0) == model._input_problem._quadratic_geq_constraints[1][2] - @test MOI.EqualTo{Float64}(3.0) == model._input_problem._quadratic_eq_constraints[1][2] + ci1 = @inferred MOI.add_constraint(model, func1, set1) + ci2 = @inferred MOI.add_constraint(model, func2, set2) + ci3 = @inferred MOI.add_constraint(model, func3, set3) + + @test model._input_problem._quadratic_leq_constraints[ci1][1].constant == 2.0 + @test model._input_problem._quadratic_leq_constraints[ci1][1].quadratic_terms[1].coefficient == 2.5 + @test model._input_problem._quadratic_leq_constraints[ci1][1].affine_terms[1].coefficient == 5.0 + @test model._input_problem._quadratic_leq_constraints[ci1][1].quadratic_terms[1].variable_1.value == 2 + @test model._input_problem._quadratic_leq_constraints[ci1][1].quadratic_terms[1].variable_2.value == 2 + @test model._input_problem._quadratic_leq_constraints[ci1][1].affine_terms[1].variable.value == 1 + + @test model._input_problem._quadratic_geq_constraints[ci2][1].constant == 2.1 + @test model._input_problem._quadratic_geq_constraints[ci2][1].quadratic_terms[1].coefficient == 2.2 + @test model._input_problem._quadratic_geq_constraints[ci2][1].affine_terms[1].coefficient == 4.0 + @test model._input_problem._quadratic_geq_constraints[ci2][1].quadratic_terms[1].variable_1.value == 1 + @test model._input_problem._quadratic_geq_constraints[ci2][1].quadratic_terms[1].variable_2.value == 2 + @test model._input_problem._quadratic_geq_constraints[ci2][1].affine_terms[1].variable.value == 2 + + @test model._input_problem._quadratic_eq_constraints[ci3][1].constant == 2.2 + @test model._input_problem._quadratic_eq_constraints[ci3][1].quadratic_terms[1].coefficient == 2.1 + @test model._input_problem._quadratic_eq_constraints[ci3][1].affine_terms[1].coefficient == 3.0 + @test model._input_problem._quadratic_eq_constraints[ci3][1].quadratic_terms[1].variable_1.value == 1 + @test model._input_problem._quadratic_eq_constraints[ci3][1].quadratic_terms[1].variable_2.value == 1 + @test model._input_problem._quadratic_eq_constraints[ci3][1].affine_terms[1].variable.value == 3 + + @test MOI.LessThan{Float64}(1.0) == model._input_problem._quadratic_leq_constraints[ci1][2] + @test MOI.GreaterThan{Float64}(2.0) == model._input_problem._quadratic_geq_constraints[ci2][2] + @test MOI.EqualTo{Float64}(3.0) == model._input_problem._quadratic_eq_constraints[ci3][2] end @testset "Set Objective" begin @@ -243,44 +223,40 @@ end MOI.set(model, MOI.ObjectiveSense(), MOI.FEASIBILITY_SENSE) @test model._input_problem._optimization_sense == MOI.FEASIBILITY_SENSE - @test MOI.supports(model, MOI.ObjectiveFunction{MOI.SingleVariable}()) + @test MOI.supports(model, MOI.ObjectiveFunction{MOI.VariableIndex}()) @test MOI.supports(model, MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}()) @test MOI.supports(model, MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}()) x = MOI.add_variables(model,3) - MOI.set(model, MOI.ObjectiveFunction{MOI.SingleVariable}(), MOI.SingleVariable(MOI.VariableIndex(2))) - @test model._input_problem._objective_type == EAGO.SINGLE_VARIABLE - @test model._input_problem._objective_sv == MOI.SingleVariable(MOI.VariableIndex(2)) + MOI.set(model, MOI.ObjectiveFunction{MOI.VariableIndex}(), MOI.VariableIndex(2)) + @test model._input_problem._objective == MOI.VariableIndex(2) MOI.set(model, MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), MOI.ScalarAffineFunction{Float64}(MOI.ScalarAffineTerm.(Float64[5.0,-2.3],[x[1],x[2]]),2.0)) - @test model._input_problem._objective_type == EAGO.SCALAR_AFFINE - @test model._input_problem._objective_saf.constant == 2.0 + @test model._input_problem._objective.constant == 2.0 MOI.set(model, MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(), - MOI.ScalarQuadraticFunction{Float64}([MOI.ScalarAffineTerm{Float64}(5.0,x[1])], - [MOI.ScalarQuadraticTerm{Float64}(2.5,x[2],x[2])],3.0)) - @test model._input_problem._objective_type == EAGO.SCALAR_QUADRATIC - @test model._input_problem._objective_sqf.constant == 3.0 + MOI.ScalarQuadraticFunction{Float64}([MOI.ScalarQuadraticTerm{Float64}(2.5,x[2],x[2])], [MOI.ScalarAffineTerm{Float64}(5.0,x[1])], + 3.0)) + @test model._input_problem._objective.constant == 3.0 end @testset "Empty/Isempty, EAGO Model, Single Storage, Optimize Hook " begin - model = EAGO.Optimizer() - @test @inferred MOI.is_empty(model) + x = EAGO.Optimizer() + m = x._global_optimizer + @test @inferred MOI.is_empty(x) t = EAGO.DefaultExt() - model._current_node = EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], -4.0, 1.0, 2, 1) - model._lower_objective_value = -3.0 - model._upper_objective_value = 0.0 - @test_nowarn EAGO.single_storage!(t, model) - new_node = pop!(model._stack) + m._current_node = EAGO.NodeBB(Float64[1.0,5.0], Float64[2.0,6.0], Bool[false, false], true, -4.0, 1.0, 2, 1, 1, EAGO.BD_NONE, 1, 0.1) + m._lower_objective_value = -3.0 + m._upper_objective_value = 0.0 + @test_nowarn EAGO.single_storage!(t, m) + new_node = pop!(m._stack) @test new_node.lower_bound == -3.0 @test new_node.upper_bound == 0.0 - @test_nowarn EAGO.single_storage!(model) - - @test_nowarn EAGO.optimize_hook!(EAGO.DefaultExt(), model) - @test_nowarn EAGO.throw_optimize_hook!(model) + @test_nowarn EAGO.single_storage!(m) + @test_nowarn EAGO.optimize_hook!(EAGO.DefaultExt(), m) end #= @testset "Fallback Interval Bounds" begin @@ -438,9 +414,8 @@ end @test isapprox(JuMP.value(y), 0.0, atol=1E-4) @test isapprox(JuMP.value(z), 0.0, atol=1E-4) @test isapprox(JuMP.value(q), 0.0, atol=1E-4) - @test isapprox(JuMP.objective_value(m), Inf, atol=1E-4) @test JuMP.termination_status(m) == MOI.INFEASIBLE - @test JuMP.primal_status(m) == MOI.INFEASIBILITY_CERTIFICATE + @test JuMP.primal_status(m) == MathOptInterface.NO_SOLUTION m = Model(optimizer_with_attributes(EAGO.Optimizer, "verbosity" => 0, "presolve_scrubber_flag" => false, @@ -530,28 +505,6 @@ end @test JuMP.primal_status(m) == MOI.FEASIBLE_POINT end - #= - m = Model(optimizer_with_attributes(EAGO.Optimizer, "verbosity" => 0)) - # ----- Variables ----- # - x_Idx = Any[2, 3, 4] - @variable(m, x[x_Idx]) - JuMP.set_lower_bound(x[2], 1.0e-6) - JuMP.set_upper_bound(x[2], 1.0) - JuMP.set_lower_bound(x[3], 1.0e-6) - JuMP.set_upper_bound(x[3], 1.0) - JuMP.set_lower_bound(x[4], 1.0e-6) - JuMP.set_upper_bound(x[4], 1.0) - - # ----- Constraints ----- # - @constraint(m, e2, x[2]+x[3]+x[4] == 1.0) - - # ----- Objective ----- # - @NLobjective(m, Min, ((15.3261663216011*x[2]+23.2043471859416*x[3]+6.69678129464404*x[4])*log(2.1055*x[2]+3.1878*x[3]+0.92*x[4])+1.04055250396734*x[2]-2.24199441248417*x[3]+3.1618173099828*x[4]+6.4661663216011*x[2]*log(x[2]/(2.1055*x[2]+3.1878*x[3]+0.92*x[4]))+12.2043471859416*x[3]*log(x[3]/(2.1055*x[2]+3.1878*x[3]+0.92*x[4]))+0.696781294644034*x[4]*log(x[4]/(2.1055*x[2]+3.1878*x[3]+0.92*x[4]))+9.86*x[2]*log(x[2]/(1.972*x[2]+2.4*x[3]+1.4*x[4]))+12*x[3]*log(x[3]/(1.972*x[2]+2.4*x[3]+1.4*x[4]))+7*x[4]*log(x[4]/(1.972*x[2]+2.4*x[3]+1.4*x[4]))+(1.972*x[2]+2.4*x[3]+1.4*x[4])*log(1.972*x[2]+2.4*x[3]+1.4*x[4])+1.972*x[2]*log(x[2]/(1.972*x[2]+0.283910843616504*x[3]+3.02002220174195*x[4]))+2.4*x[3]*log(x[3]/(1.45991339466884*x[2]+2.4*x[3]+0.415073537580851*x[4]))+1.4*x[4]*log(x[4]/(0.602183324335333*x[2]+0.115623371371275*x[3]+1.4*x[4]))-17.2981663216011*x[2]*log(x[2])-25.6043471859416*x[3]*log(x[3])-8.09678129464404*x[4]*log(x[4]))) - - JuMP.optimize!(m) - @test isapprox(JuMP.objective_value(m), 0.000, atol=1E-3) - =# - @testset "NLP Problem #3" begin m = Model(EAGO.Optimizer) set_optimizer_attributes(m, "verbosity" => 0, @@ -667,28 +620,6 @@ end @test isapprox(JuMP.objective_value(m), 0.0018, atol=1E-3) end -@testset "Empty Evaluator" begin - x = EAGO.EmptyNLPEvaluator() - n = EAGO.NodeBB() - @test_nowarn EAGO.set_current_node!(x,n) - - fa = MOI.features_available(x) - @test fa[1] === :Grad - @test fa[2] === :Jac - @test fa[3] === :Hess - - MOI.initialize(x, [:Grad, :Jac, :Hess]) === nothing - @test MOI.eval_objective(x, 0.0) === NaN - - @test_throws AssertionError MOI.eval_constraint(x, [0.0], 0.0) - @test_throws AssertionError MOI.eval_constraint_jacobian(x, [0.0], 0.0) - MOI.eval_objective_gradient(x, [0.0], 0.0) === nothing - MOI.jacobian_structure(x) === nothing - MOI.hessian_lagrangian_structure(x) === nothing - @test_throws AssertionError MOI.eval_hessian_lagrangian(x, [0.0], 0.0, 0.0, 0.0) - MOI.eval_hessian_lagrangian(x, [], 0.0, 0.0, 0.0) === nothing -end - @testset "Register special expressions" begin raw_index(v::MOI.VariableIndex) = v.value @@ -773,21 +704,12 @@ end @testset "Display Testset" begin m = EAGO.Optimizer() - MOI.set(m, MOI.RawParameter(:verbosity), 2) - #MOI.set(m, MOI.ObjectiveSense(), MIN_SENSE) - @test_nowarn EAGO.print_solution!(m) - @test_nowarn EAGO.print_results!(m, true) - @test_nowarn EAGO.print_results!(m, false) - @test_nowarn EAGO.print_results_post_cut!(m) - @test_nowarn EAGO.print_solution!(m) - @test_nowarn EAGO.print_iteration!(m) - @test_nowarn EAGO.print_node!(m) - - #MOI.set(m, MOI.ObjectiveSense(), MAX_SENSE) - #@test_nowarn EAGO.print_results!(m, true) - #@test_nowarn EAGO.print_results!(m, false) - #@test_nowarn EAGO.print_results_post_cut!(m) - #@test_nowarn EAGO.print_iteration!(m) + MOI.set(m, MOI.RawParameter("verbosity"), 2) + @test_nowarn EAGO.print_solution!(m._global_optimizer) + @test_nowarn EAGO.print_results!(m._global_optimizer, true) + @test_nowarn EAGO.print_results!(m._global_optimizer, false) + @test_nowarn EAGO.print_iteration!(m._global_optimizer) + @test_nowarn EAGO.print_node!(m._global_optimizer) end #= diff --git a/test/runtests.jl b/test/runtests.jl index db31f6bf..8a11474f 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,13 +1,18 @@ #!/usr/bin/env julia -using Test -using EAGO, JuMP, MathOptInterface, Ipopt, ForwardDiff -using IntervalArithmetic, SpecialFunctions +using Test, Printf, EAGO, MathOptInterface, Cbc, JuMP, Ipopt + const MOI = MathOptInterface -using ForwardDiff: Dual, Partials +const MOIT = MOI.Test +const MOIU = MOI.Utilities +const MOIB = MOI.Bridges + + +include(joinpath(@__DIR__, "moit_tests.jl")) +include(joinpath(@__DIR__, "minlp_tests.jl")) -include("branch_bound.jl") -include("domain_reduction.jl") -include("optimizer.jl") -include("script_optimizer.jl") -include("semiinfinite.jl") +include(joinpath(@__DIR__, "branch_bound.jl")) +include(joinpath(@__DIR__, "domain_reduction.jl")) +include(joinpath(@__DIR__, "optimizer.jl")) +include(joinpath(@__DIR__, "script_optimizer.jl")) +include(joinpath(@__DIR__, "semiinfinite.jl")) diff --git a/test/semiinfinite.jl b/test/semiinfinite.jl index e297f2f9..96c19c5e 100644 --- a/test/semiinfinite.jl +++ b/test/semiinfinite.jl @@ -34,6 +34,7 @@ end @test isapprox(sip_result.xsol[2], -0.6184706298867955, atol = 1E-2) end +#= @testset "SIP Hybrid" begin # Define semi-infinite program f(x) = (1/3)*x[1]^2 + x[2]^2 + x[1]/2 @@ -51,3 +52,4 @@ end @test isapprox(sip_result.xsol[1], -0.7500000115038946, atol = 1E-2) @test isapprox(sip_result.xsol[2], -0.6184706298867955, atol = 1E-2) end +=# \ No newline at end of file