Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 0 additions & 7 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,6 @@ jobs:
- version: '1'
os: ubuntu-latest
arch: x64
- version: '1.0'
os: ubuntu-latest
arch: x64
- version: '1'
os: windows-latest
arch: x64
Expand All @@ -38,8 +35,6 @@ jobs:
${{ runner.os }}-test-${{ env.cache-name }}-
${{ runner.os }}-test-
${{ runner.os }}-
- shell: bash
run: julia --color=yes --project -e "using Pkg; Pkg.add(Pkg.PackageSpec(url=\"https://github.com/jump-dev/MatrixOptInterface.jl\", rev=\"4652d041f7b60e511c42494433db1f2198bdfb23\")); Pkg.instantiate()"
- uses: julia-actions/julia-buildpkg@v1
- uses: julia-actions/julia-runtest@v1
- uses: julia-actions/julia-processcoverage@v1
Expand All @@ -57,8 +52,6 @@ jobs:
- uses: julia-actions/setup-julia@v1
with:
version: '1'
- shell: bash
run: julia --color=yes --project=docs -e 'using Pkg; Pkg.add(Pkg.PackageSpec(url="https://github.com/jump-dev/MatrixOptInterface.jl",rev="4652d041f7b60e511c42494433db1f2198bdfb23")); Pkg.instantiate()'
- shell: bash
run: julia --project=docs -e "using Pkg; Pkg.develop(PackageSpec(path=pwd()))"
- shell: bash
Expand Down
14 changes: 6 additions & 8 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,29 +12,27 @@ LazyArrays = "5078a376-72f3-5289-bfd5-ec5146d43c02"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee"
MathOptSetDistances = "3b969827-a86c-476c-9527-bb6f1a8fbad5"
MatrixOptInterface = "2f4eb8e6-3e35-4ae4-8c7a-f3d7d9bf20ed"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"

[compat]
BlockDiagonals = "0.1"
IterativeSolvers = "0.9"
ChainRulesCore = "1"
IterativeSolvers = "0.8, 0.9"
JuMP = "0.21"
LazyArrays = "0.16, 0.21"
MathOptInterface = "0.9"
JuMP = "0.22"
LazyArrays = "0.21"
MathOptInterface = "0.10"
MathOptSetDistances = "0.1"
julia = "1"
julia = "1.6"

[extras]
Clp = "e2554f3b-3117-50c0-817c-e040a3ddf72d"
DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab"
GLPK = "60bf3e95-4087-53dc-ae20-288a0d20c6a6"
Ipopt = "b6b21f68-93f8-5de0-b562-5493be1d77c9"
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
OSQP = "ab2f91bb-94b4-55e3-9ba0-7f65df51de79"
SCS = "c946c3f1-0d1f-5ce8-9dea-7daa1f7e2d13"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["Test", "Clp", "DelimitedFiles", "SCS", "OSQP", "GLPK", "Ipopt", "JuMP"]
test = ["Test", "Clp", "DelimitedFiles", "SCS", "OSQP", "GLPK", "Ipopt"]
3 changes: 2 additions & 1 deletion docs/src/chainrules_unit.md
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ function ChainRulesCore.frule(
# setting the perturbation of the linear objective
Δobj = sum(Δgen_costs ⋅ p[:,t] + Δnoload_costs ⋅ u[:,t] for t in size(p, 2))
MOI.set(model, DiffOpt.ForwardInObjective(), Δobj)

optimize!(model)
DiffOpt.forward(JuMP.backend(model))
# querying the corresponding perturbation of the decision
Δp = MOI.get.(model, DiffOpt.ForwardOutVariablePrimal(), p)
Expand Down Expand Up @@ -230,6 +230,7 @@ function ChainRulesCore.rrule(
energy_balance_cons = model[:energy_balance_cons]

MOI.set.(model, DiffOpt.BackwardInVariablePrimal(), p, pb)
optimize!(model)
DiffOpt.backward(JuMP.backend(model))

obj = MOI.get(model, DiffOpt.BackwardOutObjective())
Expand Down
24 changes: 14 additions & 10 deletions docs/src/examples/sensitivity-analysis-svm.jl
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,12 @@ model = Model(() -> diff_optimizer(SCS.Optimizer))
# Add the constraints.

@constraint(
model,
model,
1.0 * l ∈ MOI.Nonnegatives(N),
)
@constraint(
model,
cons,
model,
cons,
y .* (X * w .+ b) + l .- 1 ∈ MOI.Nonnegatives(N),
);

Expand Down Expand Up @@ -144,25 +144,27 @@ for Xi in 1:N
model,
DiffOpt.ForwardInConstraint(),
cons,
MOI.Utilities.vectorize(dy .* MOI.SingleVariable(b)),
MOI.Utilities.vectorize(dy .* index(b)),
)

optimize!(model) # FIXME Workaround for https://github.com/jump-dev/JuMP.jl/issues/2797
DiffOpt.forward(model)

dw = MOI.get.(
model,
DiffOpt.ForwardOutVariablePrimal(),
w
w,
)
db = MOI.get(
model,
DiffOpt.ForwardOutVariablePrimal(),
b
b,
)
push!(∇, norm(dw) + norm(db))

dy[Xi] = 0.0 # reset the change made above
end

normalize!(∇);


Expand Down Expand Up @@ -193,26 +195,28 @@ for Xi in 1:N
model,
DiffOpt.ForwardInConstraint(),
cons,
MOI.Utilities.vectorize(dX[:,i] .* MOI.SingleVariable(w[i])),
MOI.Utilities.vectorize(dX[:,i] .* index(w[i])),
)
end

optimize!(model) # FIXME Workaround for https://github.com/jump-dev/JuMP.jl/issues/2797
DiffOpt.forward(model)

dw = MOI.get.(
model,
DiffOpt.ForwardOutVariablePrimal(),
w
w,
)
db = MOI.get(
model,
DiffOpt.ForwardOutVariablePrimal(),
b
b,
)
push!(∇, norm(dw) + norm(db))

dX[Xi, :] = zeros(D) # reset the change made ago
dX[Xi, :] = zeros(D) # reset the change made at the beginning of the loop
end

normalize!(∇);

# We can visualize point sensitivity with respect to the separating hyperplane. Note that the gradients are normalized.
Expand Down
14 changes: 7 additions & 7 deletions docs/src/manual.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@ For `QPTH`/`OPTNET` style backend, the package supports following `Function-in-S

| MOI Function | MOI Set |
|:-------|:---------------|
| `SingleVariable` | `GreaterThan` |
| `SingleVariable` | `LessThan` |
| `SingleVariable` | `EqualTo` |
| `VariableIndex` | `GreaterThan` |
| `VariableIndex` | `LessThan` |
| `VariableIndex` | `EqualTo` |
| `ScalarAffineFunction` | `GreaterThan` |
| `ScalarAffineFunction` | `LessThan` |
| `ScalarAffineFunction` | `EqualTo` |
Expand All @@ -21,7 +21,7 @@ and the following objective types:

| MOI Function |
|:-------:|
| `SingleVariable` |
| `VariableIndex` |
| `ScalarAffineFunction` |
| `ScalarQuadraticFunction` |

Expand All @@ -47,7 +47,7 @@ and the following objective types:

| MOI Function |
|:-------:|
| `SingleVariable` |
| `VariableIndex` |
| `ScalarAffineFunction` |


Expand Down Expand Up @@ -94,7 +94,7 @@ variable

In the light of above, DiffOpt differentiates program variables ``x``, ``s``, ``y`` w.r.t pertubations/sensivities in problem data i.e. ``dA``, ``db``, ``dc``. This is achieved via *implicit differentiation* and *matrix differential calculus*.

> Note that the primal (P) and dual (D) are self-duals of each other. Similarly for the constraints we support, ``\mathcal{K}`` is same in format as ``\mathcal{K}^*``.
> Note that the primal (P) and dual (D) are self-duals of each other. Similarly, for the constraints we support, ``\mathcal{K}`` is same in format as ``\mathcal{K}^*``.


### Reference articles
Expand All @@ -104,6 +104,6 @@ In the light of above, DiffOpt differentiates program variables ``x``, ``s``, ``
- OptNet: Differentiable Optimization as a Layer in Neural Networks

### Backward Pass vector
One possible point of confusion in finding jacobians is the role of the backward pass vector - above eqn (7), *OptNet: Differentiable Optimization as a Layer in Neural Networks*. While differentiating convex programs, it is often the case that we dont't want to find the acutal derivatives, rather we might be interested in computing the product of jacobians with a *backward pass vector*, often used in backprop in machine learing/automatic differentiation. This is what happens in scheme 1 of `DiffOpt` backend.
One possible point of confusion in finding Jacobians is the role of the backward pass vector - above eqn (7), *OptNet: Differentiable Optimization as a Layer in Neural Networks*. While differentiating convex programs, it is often the case that we don't want to find the acutal derivatives, rather we might be interested in computing the product of Jacobians with a *backward pass vector*, often used in backprop in machine learning/automatic differentiation. This is what happens in scheme 1 of `DiffOpt` backend.

But, for the conic system (scheme 2), we provide perturbations in conic data (`dA`, `db`, `dc`) to compute pertubations (`dx`, `dy`, `dz`) in input variables. Unlike the quadratic case, these perturbations are actual derivatives, not the product with a backward pass vector. This is an important distinction between the two schemes of differential optimization.
2 changes: 1 addition & 1 deletion docs/src/matrix-inversion-manual.md
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ for i in 1:n
end
end

objective_function = MOI.ScalarQuadraticFunction(MOI.ScalarAffineTerm.(q, x),quad_terms,0.)
objective_function = MOI.ScalarQuadraticFunction(quad_terms, MOI.ScalarAffineTerm.(q, x), 0.0)
MOI.set(model, MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(), objective_function)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)

Expand Down
4 changes: 2 additions & 2 deletions docs/src/solve-LP.md
Original file line number Diff line number Diff line change
Expand Up @@ -55,11 +55,11 @@ constraint_indices = []

# set constraints
for i in 1:N
push!(constraint_indices, MOI.add_constraint(model,MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(A[i,:], x), 0.),MOI.LessThan(b[i])))
push!(constraint_indices, MOI.add_constraint(model, MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(A[i,:], x), 0.0), MOI.LessThan(b[i])))
end

for i in 1:D
push!(constraint_indices, MOI.add_constraint(model,MOI.SingleVariable(x[i]),MOI.GreaterThan(0.)))
push!(constraint_indices, MOI.add_constraint(model, x[i], MOI.GreaterThan(0.0)))
end
```

Expand Down
2 changes: 1 addition & 1 deletion docs/src/solve-QP.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ for i in 1:n
end
end

objective_function = MOI.ScalarQuadraticFunction(MOI.ScalarAffineTerm.(q, x),quad_terms,0.0)
objective_function = MOI.ScalarQuadraticFunction(quad_terms, MOI.ScalarAffineTerm.(q, x), 0.0)
MOI.set(model, MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(), objective_function)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)
```
Expand Down
9 changes: 4 additions & 5 deletions docs/src/solve-conic-1.md
Original file line number Diff line number Diff line change
Expand Up @@ -190,15 +190,14 @@ println("s -> ", round.(s_sol; digits=3))
println("y -> ", round.(y_sol; digits=3))

# perturbations in all the parameters
fx = MOI.SingleVariable.(x)
MOI.set(model,
DiffOpt.ForwardInConstraint(), c1, MOI.Utilities.vectorize(ones(1, 9) * fx + ones(1)))
DiffOpt.ForwardInConstraint(), c1, MOI.Utilities.vectorize(ones(1, 9) * x + ones(1)))
MOI.set(model,
DiffOpt.ForwardInConstraint(), c2, MOI.Utilities.vectorize(ones(6, 9) * fx + ones(6)))
DiffOpt.ForwardInConstraint(), c2, MOI.Utilities.vectorize(ones(6, 9) * x + ones(6)))
MOI.set(model,
DiffOpt.ForwardInConstraint(), c3, MOI.Utilities.vectorize(ones(3, 9) * fx + ones(3)))
DiffOpt.ForwardInConstraint(), c3, MOI.Utilities.vectorize(ones(3, 9) * x + ones(3)))
MOI.set(model,
DiffOpt.ForwardInConstraint(), c4, MOI.Utilities.vectorize(ones(1, 9) * fx + ones(1)))
DiffOpt.ForwardInConstraint(), c4, MOI.Utilities.vectorize(ones(1, 9) * x + ones(1)))

# differentiate and get the gradients
DiffOpt.forward(model)
Expand Down
2 changes: 1 addition & 1 deletion docs/src/usage.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ grad_con = MOI.get.(model, DiffOpt.BackwardOutConstraint(), c)
we can use the `forward` method with perturbations in matrices `A`, `b`, `c`
```julia
using LinearAlgebra # for `⋅`
MOI.set(model, DiffOpt.ForwardInObjective(), ones(2) ⋅ MOI.SingleVariable.(x))
MOI.set(model, DiffOpt.ForwardInObjective(), ones(2) ⋅ x)
DiffOpt.forward(model)
grad_x = MOI.get.(model, DiffOpt.ForwardOutVariablePrimal(), x)
```
3 changes: 3 additions & 0 deletions examples/chainrules.jl
Original file line number Diff line number Diff line change
Expand Up @@ -93,12 +93,15 @@ function ChainRulesCore.frule((_, Δload1_demand, Δload2_demand, Δgen_costs,
AffExpr[d1 + d2 for (d1, d2) in zip(Δload1_demand, Δload2_demand)],
)


p = model[:p]
u = model[:u]

# setting the perturbation of the linear objective
Δobj = sum(Δgen_costs ⋅ p[:,t] + Δnoload_costs ⋅ u[:,t] for t in size(p, 2))
MOI.set(model, DiffOpt.ForwardInObjective(), Δobj)
# FIXME Workaround for https://github.com/jump-dev/JuMP.jl/issues/2797
optimize!(model)
DiffOpt.forward(JuMP.backend(model))
# querying the corresponding perturbation of the decision
Δp = MOI.get.(model, DiffOpt.ForwardOutVariablePrimal(), p)
Expand Down
2 changes: 1 addition & 1 deletion examples/solve-LP.jl
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ for i in 1:N
end

for i in 1:D
push!(constraint_indices, MOI.add_constraint(model,MOI.SingleVariable(x[i]),MOI.GreaterThan(0.)))
push!(constraint_indices, MOI.add_constraint(model, x[i], MOI.GreaterThan(0.)))
end

MOI.optimize!(model)
Expand Down
2 changes: 1 addition & 1 deletion examples/solve-QP.jl
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ for i in 1:n
end
end

objective_function = MOI.ScalarQuadraticFunction(MOI.ScalarAffineTerm.(q, x),quad_terms,0.0)
objective_function = MOI.ScalarQuadraticFunction(quad_terms, MOI.ScalarAffineTerm.(q, x), 0.0)
MOI.set(model, MOI.ObjectiveFunction{MOI.ScalarQuadraticFunction{Float64}}(), objective_function)
MOI.set(model, MOI.ObjectiveSense(), MOI.MIN_SENSE)

Expand Down
8 changes: 2 additions & 6 deletions src/DiffOpt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,11 @@ const MOIU = MathOptInterface.Utilities
using MathOptSetDistances
const MOSD = MathOptSetDistances

using MatrixOptInterface
const MatOI = MatrixOptInterface


const VI = MOI.VariableIndex
const CI = MOI.ConstraintIndex

const SUPPORTED_OBJECTIVES = Union{
MOI.SingleVariable,
MOI.VariableIndex,
MOI.ScalarAffineFunction{Float64},
MOI.ScalarQuadraticFunction{Float64}
}
Expand All @@ -37,7 +33,7 @@ const SUPPORTED_SCALAR_SETS = Union{
}

const SUPPORTED_SCALAR_FUNCTIONS = Union{
MOI.SingleVariable,
MOI.VariableIndex,
MOI.ScalarAffineFunction{Float64}
}

Expand Down
Loading