-
-
Notifications
You must be signed in to change notification settings - Fork 394
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Make things work for general AbstractArray
s
#980
Changes from 1 commit
b87c015
6306e2c
ae06063
722fd26
553981a
035cfe9
5776b09
5c300c4
a8e0ab9
46b5548
2254e20
bded621
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -233,7 +233,7 @@ end | |
setobjective(m::Model, something::Any) = | ||
error("in setobjective: needs three arguments: model, objective sense (:Max or :Min), and expression.") | ||
|
||
setobjective(::Model, ::Symbol, x::Array) = | ||
setobjective(::Model, ::Symbol, x::AbstractArray) = | ||
error("in setobjective: array of size $(size(x)) passed as objective; only scalar objectives are allowed") | ||
|
||
function setsolver(m::Model, solver::MathProgBase.AbstractMathProgSolver) | ||
|
@@ -477,7 +477,7 @@ Base.zero(::Variable) = zero(Variable) | |
Base.one(::Type{Variable}) = AffExpr(Variable[],Float64[],1.0) | ||
Base.one(::Variable) = one(Variable) | ||
|
||
function verify_ownership(m::Model, vec::Vector{Variable}) | ||
function verify_ownership(m::Model, vec::AbstractVector{Variable}) | ||
n = length(vec) | ||
@inbounds for i in 1:n | ||
vec[i].m !== m && return false | ||
|
@@ -487,7 +487,7 @@ end | |
|
||
Base.copy(v::Variable, new_model::Model) = Variable(new_model, v.col) | ||
Base.copy(x::Void, new_model::Model) = nothing | ||
function Base.copy(v::Array{Variable}, new_model::Model) | ||
function Base.copy(v::AbstractArray{Variable}, new_model::Model) | ||
ret = similar(v, Variable, size(v)) | ||
for I in eachindex(v) | ||
ret[I] = Variable(new_model, v[I].col) | ||
|
@@ -529,7 +529,7 @@ type SDConstraint <: AbstractConstraint | |
end | ||
|
||
# Special-case X ≥ 0, which is often convenient | ||
function SDConstraint(lhs::Matrix, rhs::Number) | ||
function SDConstraint(lhs::AbstractMatrix, rhs::Number) | ||
rhs == 0 || error("Cannot construct a semidefinite constraint with nonzero scalar bound $rhs") | ||
SDConstraint(lhs) | ||
end | ||
|
@@ -741,12 +741,12 @@ function setRHS(c::LinConstrRef, rhs::Number) | |
end | ||
|
||
Variable(m::Model,lower::Number,upper::Number,cat::Symbol,objcoef::Number, | ||
constraints::JuMPArray,coefficients::Vector{Float64}, name::AbstractString="", value::Number=NaN) = | ||
constraints::JuMPArray,coefficients::AbstractVector{Float64}, name::AbstractString="", value::Number=NaN) = | ||
Variable(m, lower, upper, cat, objcoef, constraints.innerArray, coefficients, name, value) | ||
|
||
# add variable to existing constraints | ||
function Variable(m::Model,lower::Number,upper::Number,cat::Symbol,objcoef::Number, | ||
constraints::Vector,coefficients::Vector{Float64}, name::AbstractString="", value::Number=NaN) | ||
constraints::AbstractVector,coefficients::AbstractVector{Float64}, name::AbstractString="", value::Number=NaN) | ||
for c in constraints | ||
if !isa(c, LinConstrRef) | ||
error("Unexpected constraint of type $(typeof(c)). Column-wise modeling only supported for linear constraints") | ||
|
@@ -909,9 +909,9 @@ include("print.jl") | |
# Deprecations | ||
include("deprecated.jl") | ||
|
||
getvalue{T<:JuMPTypes}(arr::Array{T}) = map(getvalue, arr) | ||
getvalue{T<:JuMPTypes}(arr::AbstractArray{T}) = map(getvalue, arr) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not needed, use There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. So delete the method? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's been previously discussed. The conclusion for now is to allow |
||
|
||
function setvalue{T<:AbstractJuMPScalar}(set::Array{T}, val::Array) | ||
function setvalue{T<:AbstractJuMPScalar}(set::AbstractArray{T}, val::AbstractArray) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Also not needed, use |
||
promote_shape(size(set), size(val)) # Check dimensions match | ||
for I in eachindex(set) | ||
setvalue(set[I], val[I]) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -10,7 +10,7 @@ immutable JuMPArray{T,N,NT} <: JuMPContainer{T,N} | |
meta::Dict{Symbol,Any} | ||
end | ||
|
||
@generated function JuMPArray{T,N}(innerArray::Array{T,N}, indexsets::NTuple{N,Any}) | ||
@generated function JuMPArray{T,N}(innerArray::AbstractArray{T,N}, indexsets::NTuple{N,Any}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't think this is needed |
||
dicttuple = Expr(:tuple) | ||
for i in 1:N | ||
inner = quote | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -181,10 +181,10 @@ function addconstraint(m::Model, c::LinearConstraint) | |
end | ||
return LinConstrRef(m,length(m.linconstr)) | ||
end | ||
addconstraint(m::Model, c::Array{LinearConstraint}) = | ||
addconstraint(m::Model, c::AbstractArray{LinearConstraint}) = | ||
error("The operators <=, >=, and == can only be used to specify scalar constraints. If you are trying to add a vectorized constraint, use the element-wise dot comparison operators (.<=, .>=, or .==) instead") | ||
|
||
function addVectorizedConstraint(m::Model, v::Array{LinearConstraint}) | ||
function addVectorizedConstraint(m::Model, v::AbstractArray{LinearConstraint}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is an internal function, it's unclear how it could be called with an There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm somewhat uncertain if this method should be changed. Is there a compelling reason to pass a sparse matrix of linear constraints, for example? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Without changing this method, the following fails: m = Model()
v = @variable(m, [1:3])
x = OffsetArray(v, -3)
@constraint(m, x .== 0) because there is no There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This doesn't seem like the right fix. I think There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Since this also doesn't work: t = OffsetArray(rand(3), -3)
t .== 0 I'm not sure this is desirable behavior. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. OK, will change. @joehuchette, that seems like a bug in Base. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Done. |
||
ret = Array{LinConstrRef}(size(v)) | ||
for I in eachindex(v) | ||
ret[I] = addconstraint(m, v[I]) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -59,7 +59,7 @@ function addinfocallback(m::Model, f::Function; when::Symbol = _unspecifiedstate | |
push!(m.callbacks, InfoCallback(f, when)) | ||
end | ||
|
||
function lazycallback(d::MathProgBase.MathProgCallbackData, m::Model, cbs::Vector{LazyCallback}) | ||
function lazycallback(d::MathProgBase.MathProgCallbackData, m::Model, cbs::AbstractVector{LazyCallback}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Internal, won't be called with |
||
state = MathProgBase.cbgetstate(d) | ||
@assert state == :MIPSol || state == :MIPNode | ||
anyfrac = mapreduce(|, cbs) do cb | ||
|
@@ -92,7 +92,7 @@ function lazycallback(d::MathProgBase.MathProgCallbackData, m::Model, cbs::Vecto | |
:Continue | ||
end | ||
|
||
function attach_callbacks(m::Model, cbs::Vector{LazyCallback}) | ||
function attach_callbacks(m::Model, cbs::AbstractVector{LazyCallback}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Internal, won't be called with |
||
cb = d -> lazycallback(d,m,cbs) | ||
if applicable(MathProgBase.setlazycallback!, m.internalModel, cb) | ||
MathProgBase.setlazycallback!(m.internalModel, cb) | ||
|
@@ -101,7 +101,7 @@ function attach_callbacks(m::Model, cbs::Vector{LazyCallback}) | |
end | ||
end | ||
|
||
function cutcallback(d::MathProgBase.MathProgCallbackData, m::Model, cbs::Vector{CutCallback}) | ||
function cutcallback(d::MathProgBase.MathProgCallbackData, m::Model, cbs::AbstractVector{CutCallback}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Internal, won't be called with |
||
state = MathProgBase.cbgetstate(d) | ||
@assert state == :MIPNode | ||
MathProgBase.cbgetlpsolution(d,m.colVal) | ||
|
@@ -119,7 +119,7 @@ function cutcallback(d::MathProgBase.MathProgCallbackData, m::Model, cbs::Vector | |
:Continue | ||
end | ||
|
||
function attach_callbacks(m::Model, cbs::Vector{CutCallback}) | ||
function attach_callbacks(m::Model, cbs::AbstractVector{CutCallback}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Internal, won't be called with |
||
cb = d -> cutcallback(d,m,cbs) | ||
if applicable(MathProgBase.setcutcallback!, m.internalModel, cb) | ||
MathProgBase.setcutcallback!(m.internalModel, cb) | ||
|
@@ -129,7 +129,7 @@ function attach_callbacks(m::Model, cbs::Vector{CutCallback}) | |
end | ||
|
||
|
||
function heurcallback(d::MathProgBase.MathProgCallbackData, m::Model, cbs::Vector{HeuristicCallback}) | ||
function heurcallback(d::MathProgBase.MathProgCallbackData, m::Model, cbs::AbstractVector{HeuristicCallback}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Internal, won't be called with |
||
state = MathProgBase.cbgetstate(d) | ||
@assert state == :MIPNode | ||
MathProgBase.cbgetlpsolution(d,m.colVal) | ||
|
@@ -147,7 +147,7 @@ function heurcallback(d::MathProgBase.MathProgCallbackData, m::Model, cbs::Vecto | |
:Continue | ||
end | ||
|
||
function attach_callbacks(m::Model, cbs::Vector{HeuristicCallback}) | ||
function attach_callbacks(m::Model, cbs::AbstractVector{HeuristicCallback}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Internal, won't be called with |
||
cb = d -> heurcallback(d,m,cbs) | ||
if applicable(MathProgBase.setheuristiccallback!, m.internalModel, cb) | ||
MathProgBase.setheuristiccallback!(m.internalModel, cb) | ||
|
@@ -156,7 +156,7 @@ function attach_callbacks(m::Model, cbs::Vector{HeuristicCallback}) | |
end | ||
end | ||
|
||
function infocallback(d::MathProgBase.MathProgCallbackData, m::Model, cbs::Vector{InfoCallback}) | ||
function infocallback(d::MathProgBase.MathProgCallbackData, m::Model, cbs::AbstractVector{InfoCallback}) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Internal, won't be called with |
||
state = MathProgBase.cbgetstate(d) | ||
if state == :MIPSol | ||
MathProgBase.cbgetmipsolution(d,m.colVal) | ||
|
@@ -179,7 +179,7 @@ function infocallback(d::MathProgBase.MathProgCallbackData, m::Model, cbs::Vecto | |
:Continue | ||
end | ||
|
||
function attach_callbacks(m::Model, cbs::Vector{InfoCallback}) | ||
function attach_callbacks(m::Model, cbs::AbstractVector{InfoCallback}) | ||
cb = d -> infocallback(d,m,cbs) | ||
if applicable(MathProgBase.setinfocallback!, m.internalModel, cb) | ||
MathProgBase.setinfocallback!(m.internalModel, cb) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -169,7 +169,7 @@ type NLPEvaluator <: MathProgBase.AbstractNLPEvaluator | |
end | ||
end | ||
|
||
function simplify_expression(nd::Vector{NodeData}, const_values, subexpression_linearity, fixed_variables, parameter_values, x_values, subexpression_values) | ||
function simplify_expression(nd::AbstractVector{NodeData}, const_values, subexpression_linearity, fixed_variables, parameter_values, x_values, subexpression_values) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. None of the changes in this file are needed |
||
|
||
adj = adjmat(nd) | ||
forward_storage = zeros(length(nd)) | ||
|
@@ -180,7 +180,7 @@ function simplify_expression(nd::Vector{NodeData}, const_values, subexpression_l | |
return nd_new, forward_storage[1], linearity[1] | ||
end | ||
|
||
function FunctionStorage(nd::Vector{NodeData}, const_values,numVar, coloring_storage, want_hess::Bool, subexpr::Vector{Vector{NodeData}}, dependent_subexpressions, subexpression_linearity, subexpression_edgelist, subexpression_variables, fixed_variables) | ||
function FunctionStorage(nd::AbstractVector{NodeData}, const_values,numVar, coloring_storage, want_hess::Bool, subexpr::AbstractVector{Vector{NodeData}}, dependent_subexpressions, subexpression_linearity, subexpression_edgelist, subexpression_variables, fixed_variables) | ||
|
||
adj = adjmat(nd) | ||
forward_storage = zeros(length(nd)) | ||
|
@@ -215,7 +215,7 @@ function FunctionStorage(nd::Vector{NodeData}, const_values,numVar, coloring_sto | |
|
||
end | ||
|
||
function SubexpressionStorage(nd::Vector{NodeData}, const_values,numVar, fixed_variables,subexpression_linearity) | ||
function SubexpressionStorage(nd::AbstractVector{NodeData}, const_values,numVar, fixed_variables,subexpression_linearity) | ||
|
||
adj = adjmat(nd) | ||
forward_storage = zeros(length(nd)) | ||
|
@@ -229,7 +229,7 @@ function SubexpressionStorage(nd::Vector{NodeData}, const_values,numVar, fixed_v | |
|
||
end | ||
|
||
function MathProgBase.initialize(d::NLPEvaluator, requested_features::Vector{Symbol}) | ||
function MathProgBase.initialize(d::NLPEvaluator, requested_features::AbstractVector{Symbol}) | ||
for feat in requested_features | ||
if !(feat in MathProgBase.features_available(d)) | ||
error("Unsupported feature $feat") | ||
|
@@ -1081,7 +1081,7 @@ function Base.show(io::IO,s::SubexpressionPrintWrapper) | |
end | ||
|
||
# we splat in the subexpressions (for now) | ||
function tapeToExpr(m::Model, k, nd::Vector{NodeData}, adj, const_values, parameter_values, subexpressions::Vector{Any},user_operators::ReverseDiffSparse.UserOperatorRegistry, generic_variable_names::Bool, splat_subexpressions::Bool, print_mode=REPLMode) | ||
function tapeToExpr(m::Model, k, nd::AbstractVector{NodeData}, adj, const_values, parameter_values, subexpressions::AbstractVector{Any},user_operators::ReverseDiffSparse.UserOperatorRegistry, generic_variable_names::Bool, splat_subexpressions::Bool, print_mode=REPLMode) | ||
|
||
children_arr = rowvals(adj) | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This change is not covered by tests.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fixed.