From 38a78c1627cf6f3ad42f5dc02e10fadecac42217 Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Thu, 5 Jan 2023 12:37:53 +0100 Subject: [PATCH 1/3] MutableArithmetics for IPM/HSD Use the `BigFloat` dot product from MutableArithmetics in HSD code. Helps with the performance of the `BigFloat` arithmetic. The change shouldn't affect other arithmetics, and it's coded so it'd be easy to extend it to another mutable arithmetic apart from just `BigFloat`, if necessary, and if such a type will support MutableArithmetics. Apart from improving performance, this change could possibly also benefit LP problems with numerical issues (when using `BigFloat`), because the MA dot product uses a summation algorithm that's more accurate than naive summation. A performance experiment is presented in the commit message of the following commit. The conclusion is that this commit improves performance only by a tiny bit, likewise with allocation. --- Project.toml | 2 + src/IPM/HSD/HSD.jl | 55 ++++++++++++++---- src/IPM/HSD/dot_for_mutable.jl | 102 +++++++++++++++++++++++++++++++++ src/IPM/HSD/step.jl | 66 ++++++++++++++------- src/Tulip.jl | 1 + 5 files changed, 194 insertions(+), 32 deletions(-) create mode 100644 src/IPM/HSD/dot_for_mutable.jl diff --git a/Project.toml b/Project.toml index 62531f7b..049da3c3 100644 --- a/Project.toml +++ b/Project.toml @@ -12,6 +12,7 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LinearOperators = "5c8ed15e-5a4c-59e4-a42b-c7e8811fb125" Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" +MutableArithmetics = "d8a4904e-b15c-11e9-3269-09a3773c0cb0" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" QPSReader = "10f199a5-22af-520b-b891-7ce84a7b1bd0" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" @@ -27,6 +28,7 @@ Krylov = "0.8, 0.9" LDLFactorizations = "0.8, 0.9, 0.10" LinearOperators = "2.0" MathOptInterface = "1" +MutableArithmetics = "1.2" QPSReader = "0.2" TimerOutputs = "0.5.6" julia = "1.6" diff --git a/src/IPM/HSD/HSD.jl b/src/IPM/HSD/HSD.jl index e2681a11..f08dd902 100644 --- a/src/IPM/HSD/HSD.jl +++ b/src/IPM/HSD/HSD.jl @@ -64,6 +64,8 @@ mutable struct HSD{T, Tv, Tb, Ta, Tk} <: AbstractIPMOptimizer{T} end +include("dot_for_mutable.jl") + include("step.jl") @@ -101,13 +103,22 @@ function compute_residuals!(hsd::HSD{T} mul!(res.rd, transpose(dat.A), pt.y, -one(T), one(T)) @. res.rd += pt.zu .* dat.uflag - pt.zl .* dat.lflag + dot_buf = buffer_for_dot_weighted_sum(T) + # Gap residual # rg = c'x - (b'y + l'zl - u'zu) + k - res.rg = pt.κ + (dot(dat.c, pt.x) - ( - dot(dat.b, pt.y) - + dot(dat.l .* dat.lflag, pt.zl) - - dot(dat.u .* dat.uflag, pt.zu) - )) + res.rg = pt.κ + buffered_dot_weighted_sum!!( + dot_buf, + ( + (dat.c, pt.x), + (dat.b, pt.y), + (dat.l .* dat.lflag, pt.zl), + (dat.u .* dat.uflag, pt.zu), + ), + ( + 1, -1, -1, 1, + ), + ) # Residuals norm res.rp_nrm = norm(res.rp, Inf) @@ -117,11 +128,17 @@ function compute_residuals!(hsd::HSD{T} res.rg_nrm = norm(res.rg, Inf) # Compute primal and dual bounds - hsd.primal_objective = dot(dat.c, pt.x) / pt.τ + dat.c0 - hsd.dual_objective = ( - dot(dat.b, pt.y) - + dot(dat.l .* dat.lflag, pt.zl) - - dot(dat.u .* dat.uflag, pt.zu) + hsd.primal_objective = buffered_dot_product!!(dot_buf.dot, dat.c, pt.x) / pt.τ + dat.c0 + hsd.dual_objective = buffered_dot_weighted_sum!!( + dot_buf, + ( + (dat.b, pt.y), + (dat.l .* dat.lflag, pt.zl), + (dat.u .* dat.uflag, pt.zu), + ), + ( + 1, 1, -1, + ), ) / pt.τ + dat.c0 return nothing @@ -168,12 +185,15 @@ function update_solver_status!(hsd::HSD{T}, ϵp::T, ϵd::T, ϵg::T, ϵi::T) wher return nothing end + dot_buf = buffer_for_dot_weighted_sum(T) + # Check for infeasibility certificates if max( norm(dat.A * pt.x, Inf), norm((pt.x .- pt.xl) .* dat.lflag, Inf), norm((pt.x .+ pt.xu) .* dat.uflag, Inf) - ) * (norm(dat.c, Inf) / max(1, norm(dat.b, Inf))) < - ϵi * dot(dat.c, pt.x) + ) * (norm(dat.c, Inf) / max(1, norm(dat.b, Inf))) < + -ϵi * buffered_dot_product!!(dot_buf.dot, dat.c, pt.x) # Dual infeasible, i.e., primal unbounded hsd.primal_status = Sln_InfeasibilityCertificate hsd.solver_status = Trm_DualInfeasible @@ -185,7 +205,18 @@ function update_solver_status!(hsd::HSD{T}, ϵp::T, ϵd::T, ϵg::T, ϵi::T) wher norm(dat.l .* dat.lflag, Inf), norm(dat.u .* dat.uflag, Inf), norm(dat.b, Inf) - ) / (max(one(T), norm(dat.c, Inf))) < (dot(dat.b, pt.y) + dot(dat.l .* dat.lflag, pt.zl)- dot(dat.u .* dat.uflag, pt.zu)) * ϵi + ) / (max(one(T), norm(dat.c, Inf))) < buffered_dot_weighted_sum!!( + dot_buf, + ( + (dat.b, pt.y), + (dat.l .* dat.lflag, pt.zl), + (dat.u .* dat.uflag, pt.zu), + ), + ( + 1, 1, -1, + ), + ) * ϵi + # Primal infeasible hsd.dual_status = Sln_InfeasibilityCertificate hsd.solver_status = Trm_PrimalInfeasible diff --git a/src/IPM/HSD/dot_for_mutable.jl b/src/IPM/HSD/dot_for_mutable.jl new file mode 100644 index 00000000..0bece3d4 --- /dev/null +++ b/src/IPM/HSD/dot_for_mutable.jl @@ -0,0 +1,102 @@ +# Right now this is just `BigFloat`, but in principle it could be expanded to a whitelist +# that would include other mutable types. +const SupportedMutableArithmetics = BigFloat + +buffer_for_dot_product(::Type{V}) where {V <: AbstractVector{<:Real}} = + buffer_for(LinearAlgebra.dot, V, V) + +buffer_for_dot_product(::Type{F}) where {F <: Real} = + buffer_for_dot_product(Vector{F}) + +buffered_dot_product_to!( + buf::B, + result::F, + x::V, + y::V, +) where {B <: Any, F <: SupportedMutableArithmetics, V <: AbstractVector{F}} = + buffered_operate_to!(buf, result, LinearAlgebra.dot, x, y) + +function buffered_dot_product!!( + buf::B, + x::V, + y::V, +) where {B <: Any, F <: SupportedMutableArithmetics, V <: AbstractVector{F}} + ret = zero(F) + ret = buffered_dot_product_to!(buf, ret, x, y) + return ret +end + +buffered_dot_product!!(::Nothing, x::V, y::V) where {F <: Real, V <: AbstractVector{F}} = + dot(x, y) + +struct DotWeightedSumBuffer{F <: Real, DotBuffer <: Any} + tmp::F + dot::DotBuffer + + function DotWeightedSumBuffer{F}() where {F <: Real} + dot_buffer = buffer_for_dot_product(F) + return new{F, typeof(dot_buffer)}(zero(F), dot_buffer) + end +end + +struct DotWeightedSumBufferDummy + dot::Nothing + + DotWeightedSumBufferDummy() = new(nothing) +end + +buffer_for_dot_weighted_sum(::Type{F}) where {F <: SupportedMutableArithmetics} = + DotWeightedSumBuffer{F}() + +buffer_for_dot_weighted_sum(::Type{F}) where {F <: Real} = + DotWeightedSumBufferDummy() + +function buffered_dot_weighted_sum_to_inner!( + buf::DotWeightedSumBuffer{F}, + sum::F, + vecs::NTuple{n, NTuple{2, <:AbstractVector{F}}}, + weights::NTuple{n, <:Real}, +) where {n, F <: SupportedMutableArithmetics} + sum = zero!!(sum) + + for i in 1:n + weight = weights[i] + (x, y) = vecs[i] + + buffered_dot_product_to!(buf.dot, buf.tmp, x, y) + mul!!(buf.tmp, weight) + + sum = add!!(sum, buf.tmp) + end + + return sum +end + +buffered_dot_weighted_sum_to!( + buf::DotWeightedSumBuffer{F}, + sum::F, + vecs::NTuple{n, NTuple{2, <:AbstractVector{F}}}, + weights::NTuple{n, Int}) where {n, F <: SupportedMutableArithmetics} = + # It seems like the specialization + # *(x::BigFloat, c::Int8) + # could be more efficient than + # *(x::BigFloat, c::Int) + # MPFR has separate functions for those, and Julia uses them, + # there must be a good (performance) reason for that. + buffered_dot_weighted_sum_to_inner!(buf, sum, vecs, map(Int8, weights)) + +function buffered_dot_weighted_sum!!( + buf::DotWeightedSumBuffer{F}, + vecs::NTuple{n, NTuple{2, <:AbstractVector{F}}}, + weights::NTuple{n, Int}, +) where {n, F <: SupportedMutableArithmetics} + ret = zero(F) + ret = buffered_dot_weighted_sum_to!(buf, ret, vecs, weights) + return ret +end + +buffered_dot_weighted_sum!!( + buf::DotWeightedSumBufferDummy, + vecs::NTuple{n, NTuple{2, <:AbstractVector{F}}}, + weights::NTuple{n, Int}) where {n, F <: Real} = + mapreduce((vec2, weight) -> weight*dot(vec2...), +, vecs, weights, init = zero(F)) diff --git a/src/IPM/HSD/step.jl b/src/IPM/HSD/step.jl index cc7b7abc..f37ee2ee 100644 --- a/src/IPM/HSD/step.jl +++ b/src/IPM/HSD/step.jl @@ -61,17 +61,23 @@ function compute_step!(hsd::HSD{T, Tv}, params::IPMOptions{T}) where{T, Tv<:Abst ξ_ = @. (dat.c - ((pt.zl / pt.xl) * dat.l) * dat.lflag - ((pt.zu / pt.xu) * dat.u) * dat.uflag) KKT.solve!(hx, hy, hsd.kkt, dat.b, ξ_) + dot_buf = buffer_for_dot_weighted_sum(T) + # Recover h0 = ρg + κ / τ - c'hx + b'hy - u'hz # Some of the summands may take large values, # so care must be taken for numerical stability - h0 = ( - dot(dat.l .* dat.lflag, (dat.l .* θl) .* dat.lflag) - + dot(dat.u .* dat.uflag, (dat.u .* θu) .* dat.uflag) - - dot((@. (c + (θl * dat.l) * dat.lflag + (θu * dat.u) * dat.uflag)), hx) - + dot(b, hy) - + pt.κ / pt.τ - + hsd.regG - ) + h0 = buffered_dot_weighted_sum!!( + dot_buf, + ( + (dat.l .* dat.lflag, (dat.l .* θl) .* dat.lflag), + (dat.u .* dat.uflag, (dat.u .* θu) .* dat.uflag), + ((@. (c + (θl * dat.l) * dat.lflag + (θu * dat.u) * dat.uflag)), hx), + (b, hy), + ), + ( + 1, 1, -1, 1, + ), + ) + pt.κ / pt.τ + hsd.regG # Affine-scaling direction @timeit hsd.timer "Newton" solve_newton_system!(Δ, hsd, hx, hy, h0, @@ -211,22 +217,42 @@ function solve_newton_system!(Δ::Point{T, Tv}, end @timeit hsd.timer "KKT" KKT.solve!(Δ.x, Δ.y, hsd.kkt, ξp, ξd_) + dot_buf = buffer_for_dot_weighted_sum(T) + # II. Recover Δτ, Δx, Δy # Compute Δτ - @timeit hsd.timer "ξg_" ξg_ = (ξg + ξtk / pt.τ - - dot((ξxzl ./ pt.xl) .* dat.lflag, dat.l .* dat.lflag) # l'(Xl)^-1 * ξxzl - + dot((ξxzu ./ pt.xu) .* dat.uflag, dat.u .* dat.uflag) - - dot(((pt.zl ./ pt.xl) .* ξl) .* dat.lflag, dat.l .* dat.lflag) - - dot(((pt.zu ./ pt.xu) .* ξu) .* dat.uflag, dat.u .* dat.uflag) # - ) + @timeit hsd.timer "ξg_" ξg_ = ξg + ξtk / pt.τ + + buffered_dot_weighted_sum!!( + dot_buf, + ( + ((ξxzl ./ pt.xl) .* dat.lflag, dat.l .* dat.lflag), # l'(Xl)^-1 * ξxzl + ((ξxzu ./ pt.xu) .* dat.uflag, dat.u .* dat.uflag), + (((pt.zl ./ pt.xl) .* ξl) .* dat.lflag, dat.l .* dat.lflag), + (((pt.zu ./ pt.xu) .* ξu) .* dat.uflag, dat.u .* dat.uflag), + ), + ( + -1, 1, -1, -1, + ), + ) @timeit hsd.timer "Δτ" Δ.τ = ( - ξg_ - + dot((@. (dat.c - + ((pt.zl / pt.xl) * dat.l) * dat.lflag - + ((pt.zu / pt.xu) * dat.u) * dat.uflag)) - , Δ.x) - - dot(dat.b, Δ.y) + ξg_ + + buffered_dot_weighted_sum!!( + dot_buf, + ( + ( + (@. ( + dat.c + + ((pt.zl / pt.xl) * dat.l) * dat.lflag + + ((pt.zu / pt.xu) * dat.u) * dat.uflag)), + Δ.x, + ), + (dat.b, Δ.y), + ), + ( + 1, -1, + ), + ) ) / h0 diff --git a/src/Tulip.jl b/src/Tulip.jl index ac52b5cb..224640e9 100644 --- a/src/Tulip.jl +++ b/src/Tulip.jl @@ -2,6 +2,7 @@ module Tulip using LinearAlgebra using Logging +using MutableArithmetics using Printf using SparseArrays using TOML From 78bd3cc7d8d8ec8cc5d3676ad96a8bf03c790e80 Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Tue, 10 Jan 2023 18:05:48 +0100 Subject: [PATCH 2/3] IPM/HSD: use logical slicing instead of elementwise multiplication We now do basically this for a logical vector l: `dot(a[l], b[l])`, instead of `dot(a .* l, b .* l) as before. This is as suggested here: https://github.com/ds4dm/Tulip.jl/issues/122#issuecomment-1105193770 Helps with the performance of the BigFloat arithmetic. A Julia script and a Unix shell script were used to conduct an experiment for assesing the impact of this commit and the previous commit on performance. The scripts and the resulting CSV file follow. The benchmark experiment is conducted and the CSV created by removing sources of system load on a computer and running the shell script four times: once with the `init_csv` command and three times with the `run` command, each time checking out a different commit in the Tulip git repo. Unix (Bourne) shell script: ```sh set -u command=$1 julia_opts='-O3 --min-optlevel=3 --heap-size-hint=5G --depwarn=error --warn-overwrite=yes' script=tulip_benchmark.jl case "$command" in init_csv) printf '%s,%s,%s,%s\n' 'Tulip version' estimator 'measurement type' value ;; run) tulip_version=$2 $PATH_TO_JULIA_BIN $julia_opts "$script" "$tulip_version" ;; *) printf '%s\n' error 2>&1 exit 1 ;; esac ``` Julia script: ```julia const benchmark_seconds = 500 const polynomial_degree = 20 setprecision(BigFloat, 12 * 2^7) using BenchmarkTools import FindMinimaxPolynomial, # v0.2.3 Tulip, MathOptInterface const FMP = FindMinimaxPolynomial const MMX = FMP.Minimax const PPTI = FMP.PolynomialPassingThroughIntervals const NE = FMP.NumericalErrorTypes const to_poly = FMP.ToSparsePolynomial.to_sparse_polynomial const mmx = MMX.minimax_polynomial const error_type_relative = NE.RelativeError() const MOI = MathOptInterface const itv_max_err = FMP.ApproximateInfinityNorm.interval_max_err function make_lp() lp = Tulip.Optimizer{BigFloat}() # Remove iteration limit just in case MOI.set(lp, MOI.RawOptimizerAttribute("IPM_IterationsLimit"), 2000) # Disable presolve, speeds things up #MOI.set(lp, MOI.RawOptimizerAttribute("Presolve_Level"), 0) lp end const itv = (-big"2.0"^-3, big"45.0") odd_monomials(n::Int) = 1:2:n sind_mmx(n::Int) = mmx( make_lp, sind, (itv,), odd_monomials(n), # Small factor to have less variance in the results initial_perturb_factor = 1//(2^20), # We're benchmarking LP, so disable other stuff worst_segments_density = 5, worst_segments_breadth_limit = 2, worst_segments_depth_ratio = 1/2, # Exit right after the first step exit_condition = true, ) function report(estimator; benchmark, benchmark_name) b = estimator(benchmark) println("$benchmark_name,$estimator,time,$(b.time)") println("$benchmark_name,$estimator,gctime,$(b.gctime)") println("$benchmark_name,$estimator,memory,$(b.memory)") println("$benchmark_name,$estimator,allocs,$(b.allocs)") end function report(;benchmark, benchmark_name) for quantile in (minimum, median, maximum) report( quantile, benchmark = benchmark, benchmark_name = benchmark_name, ) end end report( benchmark = (@benchmark sind_mmx(polynomial_degree) seconds=benchmark_seconds), benchmark_name = first(ARGS), ) ``` CSV results: ```csv Tulip version,estimator,measurement type,value v0.9.5,minimum,time,2.63759609e8 v0.9.5,minimum,gctime,3.4505713e7 v0.9.5,minimum,memory,495299296 v0.9.5,minimum,allocs,3629874 v0.9.5,median,time,4.2594161e8 v0.9.5,median,gctime,1.3250321e8 v0.9.5,median,memory,495299296 v0.9.5,median,allocs,3629874 v0.9.5,maximum,time,4.55935021e8 v0.9.5,maximum,gctime,1.40426286e8 v0.9.5,maximum,memory,495299296 v0.9.5,maximum,allocs,3629874 MutableArithmetics for IPM/HSD,minimum,time,2.57993117e8 MutableArithmetics for IPM/HSD,minimum,gctime,2.9403466e7 MutableArithmetics for IPM/HSD,minimum,memory,442052896 MutableArithmetics for IPM/HSD,minimum,allocs,3238720 MutableArithmetics for IPM/HSD,median,time,4.22323273e8 MutableArithmetics for IPM/HSD,median,gctime,1.282365305e8 MutableArithmetics for IPM/HSD,median,memory,442052896 MutableArithmetics for IPM/HSD,median,allocs,3238720 MutableArithmetics for IPM/HSD,maximum,time,4.56330849e8 MutableArithmetics for IPM/HSD,maximum,gctime,1.57061172e8 MutableArithmetics for IPM/HSD,maximum,memory,442052896 MutableArithmetics for IPM/HSD,maximum,allocs,3238720 IPM/HSD: use logical slicing ...,minimum,time,2.40996648e8 IPM/HSD: use logical slicing ...,minimum,gctime,2.5335783e7 IPM/HSD: use logical slicing ...,minimum,memory,386588512 IPM/HSD: use logical slicing ...,minimum,allocs,2833356 IPM/HSD: use logical slicing ...,median,time,3.76039574e8 IPM/HSD: use logical slicing ...,median,gctime,1.06930941e8 IPM/HSD: use logical slicing ...,median,memory,386588512 IPM/HSD: use logical slicing ...,median,allocs,2833356 IPM/HSD: use logical slicing ...,maximum,time,4.00347376e8 IPM/HSD: use logical slicing ...,maximum,gctime,1.27260987e8 IPM/HSD: use logical slicing ...,maximum,memory,386588512 IPM/HSD: use logical slicing ...,maximum,allocs,2833356 ``` Fixes #122 --- src/IPM/HSD/HSD.jl | 12 ++++++------ src/IPM/HSD/step.jl | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/IPM/HSD/HSD.jl b/src/IPM/HSD/HSD.jl index f08dd902..45a16a69 100644 --- a/src/IPM/HSD/HSD.jl +++ b/src/IPM/HSD/HSD.jl @@ -112,8 +112,8 @@ function compute_residuals!(hsd::HSD{T} ( (dat.c, pt.x), (dat.b, pt.y), - (dat.l .* dat.lflag, pt.zl), - (dat.u .* dat.uflag, pt.zu), + (dat.l[dat.lflag], pt.zl[dat.lflag]), + (dat.u[dat.uflag], pt.zu[dat.uflag]), ), ( 1, -1, -1, 1, @@ -133,8 +133,8 @@ function compute_residuals!(hsd::HSD{T} dot_buf, ( (dat.b, pt.y), - (dat.l .* dat.lflag, pt.zl), - (dat.u .* dat.uflag, pt.zu), + (dat.l[dat.lflag], pt.zl[dat.lflag]), + (dat.u[dat.uflag], pt.zu[dat.uflag]), ), ( 1, 1, -1, @@ -209,8 +209,8 @@ function update_solver_status!(hsd::HSD{T}, ϵp::T, ϵd::T, ϵg::T, ϵi::T) wher dot_buf, ( (dat.b, pt.y), - (dat.l .* dat.lflag, pt.zl), - (dat.u .* dat.uflag, pt.zu), + (dat.l[dat.lflag], pt.zl[dat.lflag]), + (dat.u[dat.uflag], pt.zu[dat.uflag]), ), ( 1, 1, -1, diff --git a/src/IPM/HSD/step.jl b/src/IPM/HSD/step.jl index f37ee2ee..5649f7e2 100644 --- a/src/IPM/HSD/step.jl +++ b/src/IPM/HSD/step.jl @@ -69,8 +69,8 @@ function compute_step!(hsd::HSD{T, Tv}, params::IPMOptions{T}) where{T, Tv<:Abst h0 = buffered_dot_weighted_sum!!( dot_buf, ( - (dat.l .* dat.lflag, (dat.l .* θl) .* dat.lflag), - (dat.u .* dat.uflag, (dat.u .* θu) .* dat.uflag), + (dat.l[dat.lflag], (dat.l .* θl)[dat.lflag]), + (dat.u[dat.uflag], (dat.u .* θu)[dat.uflag]), ((@. (c + (θl * dat.l) * dat.lflag + (θu * dat.u) * dat.uflag)), hx), (b, hy), ), @@ -225,10 +225,10 @@ function solve_newton_system!(Δ::Point{T, Tv}, buffered_dot_weighted_sum!!( dot_buf, ( - ((ξxzl ./ pt.xl) .* dat.lflag, dat.l .* dat.lflag), # l'(Xl)^-1 * ξxzl - ((ξxzu ./ pt.xu) .* dat.uflag, dat.u .* dat.uflag), - (((pt.zl ./ pt.xl) .* ξl) .* dat.lflag, dat.l .* dat.lflag), - (((pt.zu ./ pt.xu) .* ξu) .* dat.uflag, dat.u .* dat.uflag), + ((ξxzl ./ pt.xl)[dat.lflag], dat.l[dat.lflag]), # l'(Xl)^-1 * ξxzl + ((ξxzu ./ pt.xu)[dat.uflag], dat.u[dat.uflag]), + (((pt.zl ./ pt.xl) .* ξl)[dat.lflag], dat.l[dat.lflag]), + (((pt.zu ./ pt.xu) .* ξu)[dat.uflag], dat.u[dat.uflag]), ), ( -1, 1, -1, -1, From 9bbe799dcb7c99ea0fdede735d97e466e68d02a3 Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Thu, 20 Apr 2023 21:44:21 +0200 Subject: [PATCH 3/3] exclude some tests temporarily --- test/Interfaces/MOI_wrapper.jl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/Interfaces/MOI_wrapper.jl b/test/Interfaces/MOI_wrapper.jl index 9b3d08df..17154b49 100644 --- a/test/Interfaces/MOI_wrapper.jl +++ b/test/Interfaces/MOI_wrapper.jl @@ -32,6 +32,8 @@ const CONFIG = MOIT.Config(Float64, atol=1e-6, rtol=1e-6, # Tulip not compliant with MOI convention for primal/dual infeasible models # See expected behavior at https://jump.dev/MathOptInterface.jl/dev/background/infeasibility_certificates/ "test_unbounded", + # Tulip is not compliant with the MOI.ListOfModelAttributesSet attribute + "_in_ListOfModelAttributesSet", ] ) @@ -66,6 +68,8 @@ end # Tulip not compliant with MOI convention for primal/dual infeasible models # See expected behavior at https://jump.dev/MathOptInterface.jl/dev/background/infeasibility_certificates/ "test_unbounded", + # Tulip is not compliant with the MOI.ListOfModelAttributesSet attribute + "_in_ListOfModelAttributesSet", ], ) end