diff --git a/Project.toml b/Project.toml index 54fb479a6..9263bb7d8 100644 --- a/Project.toml +++ b/Project.toml @@ -23,6 +23,7 @@ Preferences = "21216c6a-2e73-6563-6e65-726566657250" RecursiveArrayTools = "731186ca-8d62-57ce-b412-fbd966d074cd" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" +SciMLLogging = "a6db7da4-7206-11f0-1eab-35f2a5dbe1d1" SciMLOperators = "c0aeaf25-5076-4817-a8d5-81caf7dfa961" Setfield = "efcf1570-3423-57d1-acb7-fd33fddbac46" StaticArraysCore = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" @@ -124,6 +125,7 @@ Reexport = "1.2.2" SafeTestsets = "0.1" SciMLBase = "2.70" SciMLOperators = "1.7.1" +SciMLLogging = "1.1.0" Setfield = "1.1.1" SparseArrays = "1.10" Sparspak = "0.3.9" diff --git a/docs/src/advanced/developing.md b/docs/src/advanced/developing.md index 31b7a5d1f..ea70d32e1 100644 --- a/docs/src/advanced/developing.md +++ b/docs/src/advanced/developing.md @@ -19,7 +19,7 @@ struct MyLUFactorization{P} <: LinearSolve.SciMLLinearSolveAlgorithm end function LinearSolve.init_cacheval( alg::MyLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assump::LinearSolve.OperatorAssumptions) + verbose::LinearVerbosity, assump::LinearSolve.OperatorAssumptions) lu!(convert(AbstractMatrix, A)) end diff --git a/docs/src/basics/common_solver_opts.md b/docs/src/basics/common_solver_opts.md index 80c994621..e8e534c82 100644 --- a/docs/src/basics/common_solver_opts.md +++ b/docs/src/basics/common_solver_opts.md @@ -26,3 +26,92 @@ solve completely. Error controls only apply to iterative solvers. - `maxiters`: The number of iterations allowed. Defaults to `length(prob.b)` - `Pl,Pr`: The left and right preconditioners, respectively. For more information, see [the Preconditioners page](@ref prec). + +## Verbosity Controls + +The verbosity system in LinearSolve.jl provides fine-grained control over the diagnostic messages, warnings, and errors that are displayed during the solution of linear systems. + +The verbosity system is organized hierarchically into three main categories: + +1. Error Control - Messages related to fallbacks and error handling +2. Performance - Messages related to performance considerations +3. Numerical - Messages related to numerical solvers and iterations + +Each category can be configured independently, and individual settings can be adjusted to suit your needs. + +### Verbosity Levels +The following verbosity levels are available: + +#### Individual Settings +These settings are meant for individual settings within a category. These can also be used to set all of the individual settings in a group to the same value. +- SciMLLogging.None() - Suppress all messages +- SciMLLogging.Info() - Show message as log message at info level +- SciMLLogging.Warn() - Show warnings (default for most settings) +- SciMLLogging.Error() - Throw errors instead of warnings +- SciMLLogging.Level(n) - Show messages with a log level setting of n + +#### Group Settings +These settings are meant for controlling a group of settings. +- SciMLLogging.Default() - Use the default settings +- SciMLLogging.All() - Show all possible messages + +### Basic Usage + +#### Global Verbosity Control + +```julia +using LinearSolve + +# Suppress all messages +verbose = LinearVerbosity(SciMLLogging.None()) +prob = LinearProblem(A, b) +sol = solve(prob; verbose=verbose) + +# Show all messages +verbose = LinearVerbosity(SciMLLogging.All()) +sol = solve(prob; verbose=verbose) + +# Use default settings +verbose = LinearVerbosity(SciMLLogging.Default()) +sol = solve(prob; verbose=verbose) +``` + +#### Group Level Control + +```julia +# Customize by category +verbose = LinearVerbosity( + error_control = SciMLLogging.Warn(), # Show warnings for error control related issues + performance = SciMLLogging.None(), # Suppress performance messages + numerical = SciMLLogging.Info() # Show all numerical related log messages at info level +) + +sol = solve(prob; verbose=verbose) +``` + +#### Fine-grained Control +The constructor for `LinearVerbosity` allows you to set verbosity for each specific message toggle, giving you fine-grained control. +The verbosity settings for the toggles are automatically passed to the group objects. +```julia +# Set specific message types +verbose = LinearVerbosity( + default_lu_fallback = SciMLLogging.Info(), # Show info when LU fallback is used + KrylovJL_verbosity = SciMLLogging.Warn(), # Show warnings from KrylovJL + no_right_preconditioning = SciMLLogging.None(), # Suppress right preconditioning messages + KrylovKit_verbosity = SciMLLogging.Level(KrylovKit.WARN_LEVEL) # Set KrylovKit verbosity level using KrylovKit's own verbosity levels +) + +sol = solve(prob; verbose=verbose) + +``` + +#### Verbosity Levels +##### Error Control Settings +- default_lu_fallback: Controls messages when falling back to LU factorization (default: Warn) +##### Performance Settings +- no_right_preconditioning: Controls messages when right preconditioning is not used (default: Warn) +##### Numerical Settings +- using_IterativeSolvers: Controls messages when using the IterativeSolvers.jl package (default: Warn) +- IterativeSolvers_iterations: Controls messages about iteration counts from IterativeSolvers.jl (default: Warn) +- KrylovKit_verbosity: Controls messages from the KrylovKit.jl package (default: Warn) +- KrylovJL_verbosity: Controls verbosity of the KrylovJL.jl package (default: None) \ No newline at end of file diff --git a/ext/LinearSolveAMDGPUExt.jl b/ext/LinearSolveAMDGPUExt.jl index 4fad3d9f3..2b9dae94d 100644 --- a/ext/LinearSolveAMDGPUExt.jl +++ b/ext/LinearSolveAMDGPUExt.jl @@ -2,7 +2,7 @@ module LinearSolveAMDGPUExt using AMDGPU using LinearSolve: LinearSolve, LinearCache, AMDGPUOffloadLUFactorization, - AMDGPUOffloadQRFactorization, init_cacheval, OperatorAssumptions + AMDGPUOffloadQRFactorization, init_cacheval, OperatorAssumptions, LinearVerbosity using LinearSolve.LinearAlgebra, LinearSolve.SciMLBase # LU Factorization @@ -25,7 +25,7 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::AMDGPUOffloadLUFa end function LinearSolve.init_cacheval(alg::AMDGPUOffloadLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) AMDGPU.rocSOLVER.getrf!(AMDGPU.ROCArray(A)) end @@ -57,7 +57,7 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::AMDGPUOffloadQRFa end function LinearSolve.init_cacheval(alg::AMDGPUOffloadQRFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A_gpu = AMDGPU.ROCArray(A) tau = AMDGPU.ROCVector{eltype(A_gpu)}(undef, min(size(A_gpu)...)) diff --git a/ext/LinearSolveBLISExt.jl b/ext/LinearSolveBLISExt.jl index 8215750c0..c1de81437 100644 --- a/ext/LinearSolveBLISExt.jl +++ b/ext/LinearSolveBLISExt.jl @@ -9,7 +9,8 @@ using LinearSolve using LinearAlgebra: BlasInt, LU using LinearAlgebra.LAPACK: require_one_based_indexing, chkfinite, chkstride1, @blasfunc, chkargsok -using LinearSolve: ArrayInterface, BLISLUFactorization, @get_cacheval, LinearCache, SciMLBase +using LinearSolve: ArrayInterface, BLISLUFactorization, @get_cacheval, LinearCache, SciMLBase, LinearVerbosity, get_blas_operation_info, blas_info_msg +using SciMLLogging: SciMLLogging, @SciMLMessage using SciMLBase: ReturnCode const global libblis = blis_jll.blis @@ -204,13 +205,13 @@ const PREALLOCATED_BLIS_LU = begin end function LinearSolve.init_cacheval(alg::BLISLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_BLIS_LU end function LinearSolve.init_cacheval(alg::BLISLUFactorization, A::AbstractMatrix{<:Union{Float32,ComplexF32,ComplexF64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A = rand(eltype(A), 0, 0) ArrayInterface.lu_instance(A), Ref{BlasInt}() @@ -220,12 +221,45 @@ function SciMLBase.solve!(cache::LinearCache, alg::BLISLUFactorization; kwargs...) A = cache.A A = convert(AbstractMatrix, A) + verbose = cache.verbose if cache.isfresh cacheval = @get_cacheval(cache, :BLISLUFactorization) res = getrf!(A; ipiv = cacheval[1].ipiv, info = cacheval[2]) fact = LU(res[1:3]...), res[4] cache.cacheval = fact + info_value = res[3] + + if info_value != 0 + if !isa(verbose.blas_info, SciMLLogging.Silent) || !isa(verbose.blas_errors, SciMLLogging.Silent) || + !isa(verbose.blas_invalid_args, SciMLLogging.Silent) + op_info = get_blas_operation_info(:dgetrf, A, cache.b, condition = !isa(verbose.condition_number, SciMLLogging.Silent)) + @SciMLMessage(cache.verbose, :condition_number) do + if op_info[:condition_number] === nothing + return "Matrix condition number calculation failed." + else + return "Matrix condition number: $(round(op_info[:condition_number], sigdigits=4)) for $(size(A, 1))×$(size(A, 2)) matrix in dgetrf" + end + end + verb_option, message = blas_info_msg( + :dgetrf, info_value; extra_context = op_info) + @SciMLMessage(message, verbose, verb_option) + end + else + @SciMLMessage(cache.verbose, :blas_success) do + op_info = get_blas_operation_info(:dgetrf, A, cache.b, + condition = !isa(verbose.condition_number, SciMLLogging.Silent)) + @SciMLMessage(cache.verbose, :condition_number) do + if op_info[:condition_number] === nothing + return "Matrix condition number calculation failed." + else + return "Matrix condition number: $(round(op_info[:condition_number], sigdigits=4)) for $(size(A, 1))×$(size(A, 2)) matrix in dgetrf" + end + end + return "BLAS LU factorization (dgetrf) completed successfully for $(op_info[:matrix_size]) matrix" + end + end + if !LinearAlgebra.issuccess(fact[1]) return SciMLBase.build_linear_solution( alg, cache.u, nothing, cache; retcode = ReturnCode.Failure) diff --git a/ext/LinearSolveBandedMatricesExt.jl b/ext/LinearSolveBandedMatricesExt.jl index deb85e25a..376556202 100644 --- a/ext/LinearSolveBandedMatricesExt.jl +++ b/ext/LinearSolveBandedMatricesExt.jl @@ -3,7 +3,7 @@ module LinearSolveBandedMatricesExt using BandedMatrices, LinearAlgebra, LinearSolve import LinearSolve: defaultalg, do_factorization, init_cacheval, DefaultLinearSolver, - DefaultAlgorithmChoice + DefaultAlgorithmChoice, LinearVerbosity # Defaults for BandedMatrices function defaultalg(A::BandedMatrix, b, oa::OperatorAssumptions{Bool}) @@ -41,14 +41,14 @@ for alg in (:SVDFactorization, :MKLLUFactorization, :DiagonalFactorization, :AppleAccelerateLUFactorization, :CholeskyFactorization) @eval begin function init_cacheval(::$(alg), ::BandedMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return nothing end end end function init_cacheval(::LUFactorization, A::BandedMatrix{T}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) where {T} + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T} (T <: BigFloat) && return qr(similar(A, 0, 0)) return lu(similar(A, 0, 0)) end @@ -61,7 +61,7 @@ for alg in (:SVDFactorization, :MKLLUFactorization, :DiagonalFactorization, :AppleAccelerateLUFactorization, :QRFactorization, :LUFactorization) @eval begin function init_cacheval(::$(alg), ::Symmetric{<:Number, <:BandedMatrix}, b, u, Pl, - Pr, maxiters::Int, abstol, reltol, verbose::Bool, + Pr, maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return nothing end diff --git a/ext/LinearSolveCUDAExt.jl b/ext/LinearSolveCUDAExt.jl index 96174ef8e..20dc22604 100644 --- a/ext/LinearSolveCUDAExt.jl +++ b/ext/LinearSolveCUDAExt.jl @@ -7,7 +7,7 @@ using LinearSolve: LinearSolve, is_cusparse, defaultalg, cudss_loaded, DefaultLi error_no_cudss_lu, init_cacheval, OperatorAssumptions, CudaOffloadFactorization, CudaOffloadLUFactorization, CudaOffloadQRFactorization, CUDAOffload32MixedLUFactorization, - SparspakFactorization, KLUFactorization, UMFPACKFactorization + SparspakFactorization, KLUFactorization, UMFPACKFactorization, LinearVerbosity using LinearSolve.LinearAlgebra, LinearSolve.SciMLBase, LinearSolve.ArrayInterface using SciMLBase: AbstractSciMLOperator @@ -51,7 +51,7 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::CudaOffloadLUFact end function LinearSolve.init_cacheval(alg::CudaOffloadLUFactorization, A::AbstractArray, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Check if CUDA is functional before creating CUDA arrays if !CUDA.functional() @@ -79,7 +79,7 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::CudaOffloadQRFact end function LinearSolve.init_cacheval(alg::CudaOffloadQRFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Check if CUDA is functional before creating CUDA arrays if !CUDA.functional() @@ -103,26 +103,26 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::CudaOffloadFactor end function LinearSolve.init_cacheval(alg::CudaOffloadFactorization, A::AbstractArray, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) qr(CUDA.CuArray(A)) end function LinearSolve.init_cacheval( ::SparspakFactorization, A::CUDA.CUSPARSE.CuSparseMatrixCSR, b, u, - Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + Pl, Pr, maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function LinearSolve.init_cacheval( ::KLUFactorization, A::CUDA.CUSPARSE.CuSparseMatrixCSR, b, u, - Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + Pl, Pr, maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function LinearSolve.init_cacheval( ::UMFPACKFactorization, A::CUDA.CUSPARSE.CuSparseMatrixCSR, b, u, - Pl, Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + Pl, Pr, maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -156,7 +156,7 @@ function SciMLBase.solve!(cache::LinearSolve.LinearCache, alg::CUDAOffload32Mixe end function LinearSolve.init_cacheval(alg::CUDAOffload32MixedLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Pre-allocate with Float32 arrays m, n = size(A) diff --git a/ext/LinearSolveCUSOLVERRFExt.jl b/ext/LinearSolveCUSOLVERRFExt.jl index 68b72c604..0522d5e1a 100644 --- a/ext/LinearSolveCUSOLVERRFExt.jl +++ b/ext/LinearSolveCUSOLVERRFExt.jl @@ -10,7 +10,7 @@ using SciMLBase: SciMLBase, LinearProblem, ReturnCode function LinearSolve.init_cacheval(alg::LinearSolve.CUSOLVERRFFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -18,7 +18,7 @@ function LinearSolve.init_cacheval(alg::LinearSolve.CUSOLVERRFFactorization, A::Union{CuSparseMatrixCSR{Float64, Int32}, SparseMatrixCSC{Float64, <:Integer}}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Create initial factorization with appropriate options nrhs = b isa AbstractMatrix ? size(b, 2) : 1 symbolic = alg.symbolic diff --git a/ext/LinearSolveCliqueTreesExt.jl b/ext/LinearSolveCliqueTreesExt.jl index 4c4530baf..d06a4e3fa 100644 --- a/ext/LinearSolveCliqueTreesExt.jl +++ b/ext/LinearSolveCliqueTreesExt.jl @@ -22,7 +22,7 @@ end function LinearSolve.init_cacheval( alg::CliqueTreesFactorization, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, verbose::Bool, assumptions::OperatorAssumptions) + reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) symbfact = _symbolic(A, alg) cholfact, cholwork = cholinit(A, symbfact) linwork = lininit(1, cholfact) diff --git a/ext/LinearSolveFastAlmostBandedMatricesExt.jl b/ext/LinearSolveFastAlmostBandedMatricesExt.jl index 1ceff10c5..572693b2b 100644 --- a/ext/LinearSolveFastAlmostBandedMatricesExt.jl +++ b/ext/LinearSolveFastAlmostBandedMatricesExt.jl @@ -3,7 +3,7 @@ module LinearSolveFastAlmostBandedMatricesExt using FastAlmostBandedMatrices, LinearAlgebra, LinearSolve import LinearSolve: defaultalg, do_factorization, init_cacheval, DefaultLinearSolver, - DefaultAlgorithmChoice + DefaultAlgorithmChoice, LinearVerbosity function defaultalg(A::AlmostBandedMatrix, b, oa::OperatorAssumptions{Bool}) if oa.issq @@ -21,7 +21,7 @@ for alg in (:SVDFactorization, :MKLLUFactorization, :DiagonalFactorization, :AppleAccelerateLUFactorization, :CholeskyFactorization, :LUFactorization) @eval begin function init_cacheval(::$(alg), ::AlmostBandedMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return nothing end end diff --git a/ext/LinearSolveFastLapackInterfaceExt.jl b/ext/LinearSolveFastLapackInterfaceExt.jl index 45b690037..f924cc8cd 100644 --- a/ext/LinearSolveFastLapackInterfaceExt.jl +++ b/ext/LinearSolveFastLapackInterfaceExt.jl @@ -1,6 +1,7 @@ module LinearSolveFastLapackInterfaceExt using LinearSolve, LinearAlgebra +using LinearSolve: LinearVerbosity using FastLapackInterface struct WorkspaceAndFactors{W, F} @@ -9,7 +10,7 @@ struct WorkspaceAndFactors{W, F} end function LinearSolve.init_cacheval(::FastLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ws = LUWs(A) return WorkspaceAndFactors( @@ -36,7 +37,7 @@ end function LinearSolve.init_cacheval( alg::FastQRFactorization{NoPivot}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ws = QRWYWs(A; blocksize = alg.blocksize) return WorkspaceAndFactors(ws, @@ -44,7 +45,7 @@ function LinearSolve.init_cacheval( end function LinearSolve.init_cacheval( ::FastQRFactorization{ColumnNorm}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ws = QRpWs(A) return WorkspaceAndFactors(ws, @@ -52,10 +53,10 @@ function LinearSolve.init_cacheval( end function LinearSolve.init_cacheval(alg::FastQRFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return init_cacheval(alg, convert(AbstractMatrix, A), b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) end diff --git a/ext/LinearSolveForwardDiffExt.jl b/ext/LinearSolveForwardDiffExt.jl index 77f8a8659..75ae8be32 100644 --- a/ext/LinearSolveForwardDiffExt.jl +++ b/ext/LinearSolveForwardDiffExt.jl @@ -1,12 +1,13 @@ module LinearSolveForwardDiffExt using LinearSolve -using LinearSolve: SciMLLinearSolveAlgorithm, __init +using LinearSolve: SciMLLinearSolveAlgorithm, __init, LinearVerbosity using LinearAlgebra using ForwardDiff using ForwardDiff: Dual, Partials using SciMLBase using RecursiveArrayTools +using SciMLLogging: Verbosity const DualLinearProblem = LinearProblem{ <:Union{Number, <:AbstractArray, Nothing}, iip, @@ -136,7 +137,7 @@ function __dual_init( abstol = LinearSolve.default_tol(real(eltype(prob.b))), reltol = LinearSolve.default_tol(real(eltype(prob.b))), maxiters::Int = length(prob.b), - verbose::Bool = false, + verbose = LinearVerbosity(SciMLLogging.None()), Pl = nothing, Pr = nothing, assumptions = OperatorAssumptions(issquare(prob.A)), diff --git a/ext/LinearSolveHYPREExt.jl b/ext/LinearSolveHYPREExt.jl index ad7d98333..05cbd8e06 100644 --- a/ext/LinearSolveHYPREExt.jl +++ b/ext/LinearSolveHYPREExt.jl @@ -5,7 +5,8 @@ using HYPRE.LibHYPRE: HYPRE_Complex using HYPRE: HYPRE, HYPREMatrix, HYPRESolver, HYPREVector using LinearSolve: HYPREAlgorithm, LinearCache, LinearProblem, LinearSolve, OperatorAssumptions, default_tol, init_cacheval, __issquare, - __conditioning, LinearSolveAdjoint + __conditioning, LinearSolveAdjoint, LinearVerbosity +using SciMLLogging: Verbosity, verbosity_to_int using SciMLBase: LinearProblem, LinearAliasSpecifier, SciMLBase using UnPack: @unpack using Setfield: @set! @@ -22,7 +23,7 @@ end function LinearSolve.init_cacheval(alg::HYPREAlgorithm, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) return HYPRECache(nothing, nothing, nothing, nothing, true, true, true) end @@ -64,7 +65,7 @@ function SciMLBase.init(prob::LinearProblem, alg::HYPREAlgorithm, eltype(prob.A)), # TODO: Implement length() for HYPREVector in HYPRE.jl? maxiters::Int = prob.b isa HYPREVector ? 1000 : length(prob.b), - verbose::Bool = false, + verbose = LinearVerbosity(SciMLLogging.None()), Pl = LinearAlgebra.I, Pr = LinearAlgebra.I, assumptions = OperatorAssumptions(), @@ -111,6 +112,18 @@ function SciMLBase.init(prob::LinearProblem, alg::HYPREAlgorithm, alias_b = aliases.alias_b end + if verbose isa Bool + #@warn "Using `true` or `false` for `verbose` is being deprecated. Please use a `LinearVerbosity` type to specify verbosity settings. + # For details see the verbosity section of the common solver options documentation page." + if verbose + verbose = LinearVerbosity() + else + verbose = LinearVerbosity(SciMLLogging.None()) + end + elseif verbose isa SciMLLogging.Type + verbose = LinearVerbosity(verbose) + end + A = A isa HYPREMatrix ? A : HYPREMatrix(A) b = b isa HYPREVector ? b : HYPREVector(b) u0 = u0 isa HYPREVector ? u0 : (u0 === nothing ? nothing : HYPREVector(u0)) @@ -159,10 +172,11 @@ function create_solver(alg::HYPREAlgorithm, cache::LinearCache) solver = create_solver(alg.solver, comm) # Construct solver options + verbose = isnothing(cache.verbose.numerical) ? 0 : verbosity_to_int(cache.verbose.numerical.HYPRE_verbosity) solver_options = (; AbsoluteTol = cache.abstol, MaxIter = cache.maxiters, - PrintLevel = Int(cache.verbose), + PrintLevel = verbose, Tol = cache.reltol) # Preconditioner (uses Pl even though it might not be a *left* preconditioner just *a* diff --git a/ext/LinearSolveIterativeSolversExt.jl b/ext/LinearSolveIterativeSolversExt.jl index 901b6bf74..d811ae47d 100644 --- a/ext/LinearSolveIterativeSolversExt.jl +++ b/ext/LinearSolveIterativeSolversExt.jl @@ -1,8 +1,9 @@ module LinearSolveIterativeSolversExt using LinearSolve, LinearAlgebra -using LinearSolve: LinearCache, DEFAULT_PRECS +using LinearSolve: LinearCache, DEFAULT_PRECS, LinearVerbosity import LinearSolve: IterativeSolversJL +using SciMLLogging: @SciMLMessage, Verbosity using IterativeSolvers @@ -47,7 +48,7 @@ LinearSolve.default_alias_b(::IterativeSolversJL, ::Any, ::Any) = true function LinearSolve.init_cacheval(alg::IterativeSolversJL, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) restart = (alg.gmres_restart == 0) ? min(20, size(A, 1)) : alg.gmres_restart s = :idrs_s in keys(alg.kwargs) ? alg.kwargs.idrs_s : 4 # shadow space @@ -56,7 +57,8 @@ function LinearSolve.init_cacheval(alg::IterativeSolversJL, A, b, u, Pl, Pr, max iterable = if alg.generate_iterator === IterativeSolvers.cg_iterator! !LinearSolve._isidentity_struct(Pr) && - @warn "$(alg.generate_iterator) doesn't support right preconditioning" + @SciMLMessage("$(alg.generate_iterator) doesn't support right preconditioning", + verbose, :no_right_preconditioning) alg.generate_iterator(u, A, b, Pl; kwargs...) elseif alg.generate_iterator === IterativeSolvers.gmres_iterable! @@ -64,7 +66,8 @@ function LinearSolve.init_cacheval(alg::IterativeSolversJL, A, b, u, Pl, Pr, max kwargs...) elseif alg.generate_iterator === IterativeSolvers.idrs_iterable! !!LinearSolve._isidentity_struct(Pr) && - @warn "$(alg.generate_iterator) doesn't support right preconditioning" + @SciMLMessage("$(alg.generate_iterator) doesn't support right preconditioning", + verbose, :no_right_preconditioning) history = IterativeSolvers.ConvergenceHistory(partial = true) history[:abstol] = abstol history[:reltol] = reltol @@ -72,7 +75,8 @@ function LinearSolve.init_cacheval(alg::IterativeSolversJL, A, b, u, Pl, Pr, max alg.kwargs...) elseif alg.generate_iterator === IterativeSolvers.bicgstabl_iterator! !!LinearSolve._isidentity_struct(Pr) && - @warn "$(alg.generate_iterator) doesn't support right preconditioning" + @SciMLMessage("$(alg.generate_iterator) doesn't support right preconditioning", + verbose, :no_right_preconditioning) alg.generate_iterator(u, A, b, alg.args...; Pl = Pl, abstol = abstol, reltol = reltol, max_mv_products = maxiters * 2, @@ -103,14 +107,15 @@ function SciMLBase.solve!(cache::LinearCache, alg::IterativeSolversJL; kwargs... end purge_history!(cache.cacheval, cache.u, cache.b) - cache.verbose && println("Using IterativeSolvers.$(alg.generate_iterator)") + @SciMLMessage("Using IterativeSolvers.$(alg.generate_iterator)", + cache.verbose, :using_IterativeSolvers) i = 0 for iter in enumerate(cache.cacheval) i += 1 - cache.verbose && println("Iter: $(iter[1]), residual: $(iter[2])") + @SciMLMessage("Iter: $(iter[1]), residual: $(iter[2])", + cache.verbose, :IterativeSolvers_iterations) # TODO inject callbacks KSP into solve! cb!(cache.cacheval) end - cache.verbose && println() resid = cache.cacheval isa IterativeSolvers.IDRSIterable ? cache.cacheval.R : cache.cacheval.residual diff --git a/ext/LinearSolveKrylovKitExt.jl b/ext/LinearSolveKrylovKitExt.jl index 1aa1e5d52..0271b6796 100644 --- a/ext/LinearSolveKrylovKitExt.jl +++ b/ext/LinearSolveKrylovKitExt.jl @@ -2,6 +2,7 @@ module LinearSolveKrylovKitExt using LinearSolve, KrylovKit, LinearAlgebra using LinearSolve: LinearCache, DEFAULT_PRECS +using SciMLLogging: Verbosity, verbosity_to_int function LinearSolve.KrylovKitJL(args...; KrylovAlg = KrylovKit.GMRES, gmres_restart = 0, @@ -25,7 +26,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::KrylovKitJL; kwargs...) atol = float(cache.abstol) rtol = float(cache.reltol) maxiter = cache.maxiters - verbosity = cache.verbose ? 1 : 0 + verbosity = isnothing(cache.verbose.numerical) ? 0 : verbosity_to_int(cache.verbose.numerical.KrylovKit_verbosity) krylovdim = (alg.gmres_restart == 0) ? min(20, size(cache.A, 1)) : alg.gmres_restart kwargs = (atol = atol, rtol = rtol, maxiter = maxiter, verbosity = verbosity, diff --git a/ext/LinearSolveMetalExt.jl b/ext/LinearSolveMetalExt.jl index 7f34bf087..2cf4bbc79 100644 --- a/ext/LinearSolveMetalExt.jl +++ b/ext/LinearSolveMetalExt.jl @@ -4,13 +4,13 @@ using Metal, LinearSolve using LinearAlgebra, SciMLBase using SciMLBase: AbstractSciMLOperator using LinearSolve: ArrayInterface, MKLLUFactorization, MetalOffload32MixedLUFactorization, - @get_cacheval, LinearCache, SciMLBase, OperatorAssumptions + @get_cacheval, LinearCache, SciMLBase, OperatorAssumptions, LinearVerbosity default_alias_A(::MetalLUFactorization, ::Any, ::Any) = false default_alias_b(::MetalLUFactorization, ::Any, ::Any) = false function LinearSolve.init_cacheval(alg::MetalLUFactorization, A::AbstractArray, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(convert(AbstractMatrix, A)) end @@ -34,7 +34,7 @@ default_alias_A(::MetalOffload32MixedLUFactorization, ::Any, ::Any) = false default_alias_b(::MetalOffload32MixedLUFactorization, ::Any, ::Any) = false function LinearSolve.init_cacheval(alg::MetalOffload32MixedLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Pre-allocate with Float32 arrays m, n = size(A) diff --git a/ext/LinearSolvePardisoExt.jl b/ext/LinearSolvePardisoExt.jl index 5b459d8cc..82fe64055 100644 --- a/ext/LinearSolvePardisoExt.jl +++ b/ext/LinearSolvePardisoExt.jl @@ -3,8 +3,8 @@ module LinearSolvePardisoExt using Pardiso, LinearSolve using SparseArrays using SparseArrays: nonzeros, rowvals, getcolptr -using LinearSolve: PardisoJL, @unpack - +using LinearSolve: PardisoJL, @unpack, LinearVerbosity +using SciMLLogging: @SciMLMessage, verbosity_to_bool using LinearSolve.SciMLBase LinearSolve.needs_concrete_A(alg::PardisoJL) = true @@ -20,7 +20,7 @@ function LinearSolve.init_cacheval(alg::PardisoJL, maxiters::Int, abstol, reltol, - verbose::Bool, + verbose::LinearVerbosity, assumptions::LinearSolve.OperatorAssumptions) @unpack nprocs, solver_type, matrix_type, cache_analysis, iparm, dparm, vendor = alg A = convert(AbstractMatrix, A) @@ -73,8 +73,12 @@ function LinearSolve.init_cacheval(alg::PardisoJL, error("Number type not supported by Pardiso") end end - verbose && Pardiso.set_msglvl!(solver, Pardiso.MESSAGE_LEVEL_ON) - + + if !isnothing(verbose.numerical) + if verbosity_to_bool(verbose.numerical.pardiso_verbosity) + Pardiso.set_msglvl!(solver, Pardiso.MESSAGE_LEVEL_ON) + end + end #= Note: It is recommended to use IPARM(11)=1 (scaling) and IPARM(13)=1 (matchings) for highly indefinite symmetric matrices e.g. from interior point optimizations or saddle point problems. diff --git a/ext/LinearSolveRecursiveFactorizationExt.jl b/ext/LinearSolveRecursiveFactorizationExt.jl index 947dd8020..765c895c5 100644 --- a/ext/LinearSolveRecursiveFactorizationExt.jl +++ b/ext/LinearSolveRecursiveFactorizationExt.jl @@ -2,7 +2,7 @@ module LinearSolveRecursiveFactorizationExt using LinearSolve: LinearSolve, userecursivefactorization, LinearCache, @get_cacheval, RFLUFactorization, RF32MixedLUFactorization, default_alias_A, - default_alias_b + default_alias_b, LinearVerbosity using LinearSolve.LinearAlgebra, LinearSolve.ArrayInterface, RecursiveFactorization using SciMLBase: SciMLBase, ReturnCode @@ -42,7 +42,7 @@ const PREALLOCATED_RF32_LU = begin end function LinearSolve.init_cacheval(alg::RF32MixedLUFactorization{P, T}, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::LinearSolve.OperatorAssumptions) where {P, T} # Pre-allocate appropriate 32-bit arrays based on input type m, n = size(A) diff --git a/ext/LinearSolveSparseArraysExt.jl b/ext/LinearSolveSparseArraysExt.jl index 57e135164..273b135ae 100644 --- a/ext/LinearSolveSparseArraysExt.jl +++ b/ext/LinearSolveSparseArraysExt.jl @@ -4,7 +4,7 @@ using LinearSolve: LinearSolve, BLASELTYPES, pattern_changed, ArrayInterface, @get_cacheval, CHOLMODFactorization, GenericFactorization, GenericLUFactorization, KLUFactorization, LUFactorization, NormalCholeskyFactorization, - OperatorAssumptions, + OperatorAssumptions, LinearVerbosity, QRFactorization, RFLUFactorization, UMFPACKFactorization, solve using ArrayInterface: ArrayInterface using LinearAlgebra: LinearAlgebra, I, Hermitian, Symmetric, cholesky, ldiv!, lu, lu!, QR @@ -34,7 +34,7 @@ end function LinearSolve.init_cacheval(alg::RFLUFactorization, A::Union{AbstractSparseArray, LinearSolve.SciMLOperators.AbstractSciMLOperator}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing, nothing end @@ -65,7 +65,7 @@ end function LinearSolve.init_cacheval(alg::GenericFactorization, A::Union{Hermitian{T, <:SparseMatrixCSC}, Symmetric{T, <:SparseMatrixCSC}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T} newA = copy(convert(AbstractMatrix, A)) LinearSolve.do_factorization(alg, newA, b, u) @@ -78,7 +78,7 @@ function LinearSolve.init_cacheval( alg::LUFactorization, A::AbstractSparseArray{<:Number, <:Integer}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -86,7 +86,7 @@ function LinearSolve.init_cacheval( alg::GenericLUFactorization, A::AbstractSparseArray{<:Number, <:Integer}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -94,7 +94,7 @@ function LinearSolve.init_cacheval( alg::UMFPACKFactorization, A::AbstractArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -102,7 +102,7 @@ function LinearSolve.init_cacheval( alg::LUFactorization, A::AbstractSparseArray{Float64, Int64}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_UMFPACK end @@ -110,7 +110,7 @@ function LinearSolve.init_cacheval( alg::LUFactorization, A::AbstractSparseArray{T, Int64}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} if LinearSolve.is_cusparse(A) ArrayInterface.lu_instance(A) else @@ -123,7 +123,7 @@ function LinearSolve.init_cacheval( alg::LUFactorization, A::AbstractSparseArray{T, Int32}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} if LinearSolve.is_cusparse(A) ArrayInterface.lu_instance(A) else @@ -136,7 +136,7 @@ function LinearSolve.init_cacheval( alg::LUFactorization, A::LinearSolve.GPUArraysCore.AnyGPUArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end @@ -144,7 +144,7 @@ function LinearSolve.init_cacheval( alg::UMFPACKFactorization, A::AbstractSparseArray{Float64, Int}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_UMFPACK end @@ -152,7 +152,7 @@ function LinearSolve.init_cacheval( alg::UMFPACKFactorization, A::LinearSolve.GPUArraysCore.AnyGPUArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -160,7 +160,7 @@ function LinearSolve.init_cacheval( alg::UMFPACKFactorization, A::AbstractSparseArray{T, Int64}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int64}( zero(Int64), zero(Int64), [Int64(1)], Int64[], T[])) end @@ -169,7 +169,7 @@ function LinearSolve.init_cacheval( alg::UMFPACKFactorization, A::AbstractSparseArray{T, Int32}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} SparseArrays.UMFPACK.UmfpackLU(SparseMatrixCSC{T, Int32}( zero(Int32), zero(Int32), [Int32(1)], Int32[], T[])) end @@ -218,7 +218,7 @@ function LinearSolve.init_cacheval( alg::KLUFactorization, A::AbstractArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -226,7 +226,7 @@ function LinearSolve.init_cacheval( alg::KLUFactorization, A::AbstractSparseArray{Float64, Int64}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_KLU end @@ -234,7 +234,7 @@ function LinearSolve.init_cacheval( alg::KLUFactorization, A::LinearSolve.GPUArraysCore.AnyGPUArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -242,7 +242,7 @@ function LinearSolve.init_cacheval( alg::KLUFactorization, A::AbstractSparseArray{Float64, Int32}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) KLU.KLUFactorization(SparseMatrixCSC{Float64, Int32}( 0, 0, [Int32(1)], Int32[], Float64[])) end @@ -288,7 +288,7 @@ function LinearSolve.init_cacheval(alg::CHOLMODFactorization, A::Union{SparseMatrixCSC{T, Int}, Symmetric{T, SparseMatrixCSC{T, Int}}}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: Float64} PREALLOCATED_CHOLMOD end @@ -297,7 +297,7 @@ function LinearSolve.init_cacheval(alg::CHOLMODFactorization, A::Union{SparseMatrixCSC{T, Int}, Symmetric{T, SparseMatrixCSC{T, Int}}}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {T <: + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} cholesky(sparse(reshape([one(T)], 1, 1))) end @@ -306,14 +306,14 @@ function LinearSolve.init_cacheval(alg::CHOLMODFactorization, A::AbstractArray, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function LinearSolve.init_cacheval(alg::NormalCholeskyFactorization, A::Union{AbstractSparseArray{T}, LinearSolve.GPUArraysCore.AnyGPUArray, Symmetric{T, <:AbstractSparseArray{T}}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T <: BLASELTYPES} ArrayInterface.cholesky_instance(convert(AbstractMatrix, A)) end @@ -373,20 +373,20 @@ function LinearSolve.init_cacheval( alg::QRFactorization, A::AbstractSparseArray{<:Number, <:Integer}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function LinearSolve.init_cacheval( alg::QRFactorization, A::SparseMatrixCSC{Float64, <:Integer}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(convert(AbstractMatrix, A), alg.pivot) end function LinearSolve.init_cacheval( alg::QRFactorization, A::Symmetric{<:Number, <:SparseMatrixCSC}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return nothing end diff --git a/ext/LinearSolveSparspakExt.jl b/ext/LinearSolveSparspakExt.jl index 4cf36ce35..380e682ca 100644 --- a/ext/LinearSolveSparspakExt.jl +++ b/ext/LinearSolveSparspakExt.jl @@ -1,6 +1,7 @@ module LinearSolveSparspakExt using LinearSolve, LinearAlgebra +using LinearSolve: LinearVerbosity using Sparspak using Sparspak.SparseCSCInterface.SparseArrays using SparseArrays: AbstractSparseMatrixCSC, nonzeros, rowvals, getcolptr @@ -12,14 +13,14 @@ function LinearSolve.init_cacheval( ::SparspakFactorization, A::SparseMatrixCSC{Float64, Int}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_SPARSEPAK end function LinearSolve.init_cacheval( ::SparspakFactorization, A::AbstractSparseMatrixCSC{Tv, Ti}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) where {Tv, Ti} + verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {Tv, Ti} if size(A, 1) == size(A, 2) A = convert(AbstractMatrix, A) if A isa SparseArrays.AbstractSparseArray diff --git a/src/LinearSolve.jl b/src/LinearSolve.jl index 73498621b..c5dc40724 100644 --- a/src/LinearSolve.jl +++ b/src/LinearSolve.jl @@ -21,6 +21,7 @@ using SciMLBase: SciMLBase, LinearAliasSpecifier, AbstractSciMLOperator, using SciMLOperators: SciMLOperators, AbstractSciMLOperator, IdentityOperator, MatrixOperator, has_ldiv!, issquare +using SciMLLogging: SciMLLogging, @SciMLMessage, verbosity_to_int, AbstractVerbositySpecifier, LogLevel, VerbosityPreset using Setfield: @set, @set! using UnPack: @unpack using DocStringExtensions: DocStringExtensions @@ -361,6 +362,8 @@ const BLASELTYPES = Union{Float32, Float64, ComplexF32, ComplexF64} function defaultalg_symbol end +include("verbosity.jl") +include("blas_logging.jl") include("generic_lufact.jl") include("common.jl") include("extension_algs.jl") @@ -501,4 +504,6 @@ export OperatorAssumptions, OperatorCondition export LinearSolveAdjoint +export LinearVerbosity + end diff --git a/src/appleaccelerate.jl b/src/appleaccelerate.jl index 0b031ddbc..513ce5c28 100644 --- a/src/appleaccelerate.jl +++ b/src/appleaccelerate.jl @@ -235,14 +235,14 @@ const PREALLOCATED_APPLE_LU = begin end function LinearSolve.init_cacheval(alg::AppleAccelerateLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_APPLE_LU end function LinearSolve.init_cacheval(alg::AppleAccelerateLUFactorization, A::AbstractMatrix{<:Union{Float32, ComplexF32, ComplexF64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A = rand(eltype(A), 0, 0) luinst = ArrayInterface.lu_instance(A) @@ -295,7 +295,7 @@ const PREALLOCATED_APPLE32_LU = begin end function LinearSolve.init_cacheval(alg::AppleAccelerate32MixedLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Pre-allocate appropriate 32-bit arrays based on input type m, n = size(A) diff --git a/src/blas_logging.jl b/src/blas_logging.jl new file mode 100644 index 000000000..0180c4fae --- /dev/null +++ b/src/blas_logging.jl @@ -0,0 +1,173 @@ + +""" + interpret_blas_code(func::Symbol, info::Integer) + +Interpret BLAS/LAPACK return codes (info parameter) to provide human-readable error messages. +Returns a tuple of (category::Symbol, message::String, details::String) +""" +function interpret_blas_code(func::Symbol, info::Integer) + if info == 0 + return (:success, "Operation completed successfully", "") + elseif info < 0 + return (:invalid_argument, + "Invalid argument error", + "Argument $(-info) had an illegal value") + else + # info > 0 means different things for different functions + return interpret_positive_info(func, info) + end +end + +function interpret_positive_info(func::Symbol, info::Integer) + func_str = string(func) + + # LU factorization routines + if occursin("getrf", func_str) + return (:singular_matrix, + "Matrix is singular", + "U($info,$info) is exactly zero. The factorization has been completed, but U is singular and division by U will produce infinity.") + + # Cholesky factorization routines + elseif occursin("potrf", func_str) + return (:not_positive_definite, + "Matrix is not positive definite", + "The leading minor of order $info is not positive definite, and the factorization could not be completed.") + + # QR factorization routines + elseif occursin("geqrf", func_str) || occursin("geqrt", func_str) + return (:numerical_issue, + "Numerical issue in QR factorization", + "Householder reflector $info could not be formed properly.") + + # SVD routines + elseif occursin("gesdd", func_str) || occursin("gesvd", func_str) + return (:convergence_failure, + "SVD did not converge", + "The algorithm failed to compute singular values. $info off-diagonal elements of an intermediate bidiagonal form did not converge to zero.") + + # Symmetric/Hermitian eigenvalue routines + elseif occursin("syev", func_str) || occursin("heev", func_str) + return (:convergence_failure, + "Eigenvalue computation did not converge", + "$info off-diagonal elements of an intermediate tridiagonal form did not converge to zero.") + + # Bunch-Kaufman factorization + elseif occursin("sytrf", func_str) || occursin("hetrf", func_str) + return (:singular_matrix, + "Matrix is singular", + "D($info,$info) is exactly zero. The factorization has been completed, but the block diagonal matrix D is singular.") + + # Solve routines (should not have positive info) + elseif occursin("getrs", func_str) || occursin("potrs", func_str) || + occursin("sytrs", func_str) || occursin("hetrs", func_str) + return (:unexpected_error, + "Unexpected positive return code from solve routine", + "Solve routine $func returned info=$info which should not happen.") + + # General eigenvalue problem + elseif occursin("ggev", func_str) || occursin("gges", func_str) + if info <= size + return (:convergence_failure, + "QZ iteration failed", + "The QZ iteration failed to compute all eigenvalues. Elements 1:$(info-1) converged.") + else + return (:unexpected_error, + "Unexpected error in generalized eigenvalue problem", + "Info value $info is unexpected for $func.") + end + + # LDLT factorization + elseif occursin("ldlt", func_str) + return (:singular_matrix, + "Matrix is singular", + "The $(info)-th pivot is zero. The factorization has been completed but division will produce infinity.") + + # Default case + else + return (:unknown_error, + "Unknown positive return code", + "Function $func returned info=$info. Consult LAPACK documentation for details.") + end +end + + + +""" + blas_info_msg(func::Symbol, info::Integer, verbose::LinearVerbosity; + extra_context::Dict{Symbol,Any} = Dict()) + +Log BLAS/LAPACK return code information with appropriate verbosity level. +""" +function blas_info_msg(func::Symbol, info::Integer; + extra_context::Dict{Symbol, Any} = Dict()) + category, message, details = interpret_blas_code(func, info) + + verbosity_field = if category in [:singular_matrix, :not_positive_definite, :convergence_failure] + :blas_errors + elseif category == :invalid_argument + :blas_invalid_args + else + :blas_info + end + + # Build structured message components + msg_main = "BLAS/LAPACK $func: $message" + msg_details = !isempty(details) ? details : nothing + msg_info = info + + # Build complete message with all details + full_msg = if !isempty(extra_context) || msg_details !== nothing + parts = String[msg_main] + if msg_details !== nothing + push!(parts, "Details: $msg_details") + end + push!(parts, "Return code (info): $msg_info") + if !isempty(extra_context) + for (key, value) in extra_context + push!(parts, "$key: $value") + end + end + join(parts, "\n ") + else + "$msg_main (info=$msg_info)" + end + + verbosity_field, full_msg +end + + +function get_blas_operation_info(func::Symbol, A, b; condition = false) + info = Dict{Symbol, Any}() + + # Matrix properties + info[:matrix_size] = size(A) + info[:matrix_type] = typeof(A) + info[:element_type] = eltype(A) + + # Condition number (based on verbosity setting) + if condition && size(A, 1) == size(A, 2) + try + cond_num = cond(A) + info[:condition_number] = cond_num + + # Log the condition number if enabled + cond_msg = "Matrix condition number: $(round(cond_num, sigdigits=4)) for $(size(A, 1))×$(size(A, 2)) matrix in $func" + + catch + # Skip if condition number computation fails + info[:condition_number] = nothing + end + end + + # RHS properties if provided + if b !== nothing + info[:rhs_size] = size(b) + info[:rhs_type] = typeof(b) + end + + # Memory usage estimate + mem_bytes = prod(size(A)) * sizeof(eltype(A)) + info[:memory_usage_MB] = round(mem_bytes / 1024^2, digits = 2) + + return info +end \ No newline at end of file diff --git a/src/common.jl b/src/common.jl index 7a29b521e..6f6f50d53 100644 --- a/src/common.jl +++ b/src/common.jl @@ -89,7 +89,7 @@ solving and caching of factorizations and intermediate results. - `abstol::Ttol`: Absolute tolerance for iterative solvers. - `reltol::Ttol`: Relative tolerance for iterative solvers. - `maxiters::Int`: Maximum number of iterations for iterative solvers. -- `verbose::Bool`: Whether to print verbose output during solving. +- `verbose::LinearVerbosity`: Whether to print verbose output during solving. - `assumptions::OperatorAssumptions{issq}`: Assumptions about the operator properties. - `sensealg::S`: Sensitivity analysis algorithm for automatic differentiation. @@ -119,7 +119,7 @@ mutable struct LinearCache{TA, Tb, Tu, Tp, Talg, Tc, Tl, Tr, Ttol, issq, S} abstol::Ttol reltol::Ttol maxiters::Int - verbose::Bool + verbose::LinearVerbosity assumptions::OperatorAssumptions{issq} sensealg::S end @@ -267,7 +267,7 @@ function __init(prob::LinearProblem, alg::SciMLLinearSolveAlgorithm, abstol = default_tol(real(eltype(prob.b))), reltol = default_tol(real(eltype(prob.b))), maxiters::Int = length(prob.b), - verbose::Bool = false, + verbose = LinearVerbosity(), Pl = nothing, Pr = nothing, assumptions = OperatorAssumptions(issquare(prob.A)), @@ -324,6 +324,20 @@ function __init(prob::LinearProblem, alg::SciMLLinearSolveAlgorithm, copy(A) end + if verbose isa Bool + #@warn "Using `true` or `false` for `verbose` is being deprecated. Please use a `LinearVerbosity` type to specify verbosity settings. + # For details see the verbosity section of the common solver options documentation page." + if verbose + verbose_spec = LinearVerbosity{true}() + else + verbose_spec = LinearVerbosity{false}() + end + elseif verbose isa SciMLLogging.VerbosityPreset + verbose_spec = LinearVerbosity(verbose) + else + verbose_spec = verbose + end + b = if issparsematrix(b) && !(A isa Diagonal) Array(b) # the solution to a linear solve will always be dense! elseif alias_b || b isa SVector @@ -361,7 +375,7 @@ function __init(prob::LinearProblem, alg::SciMLLinearSolveAlgorithm, # TODO: deprecate once all docs are updated to the new form #@warn "passing Preconditioners at `init`/`solve` time is deprecated. Instead add a `precs` function to your algorithm." end - cacheval = init_cacheval(alg, A, b, u0_, Pl, Pr, maxiters, abstol, reltol, verbose, + cacheval = init_cacheval(alg, A, b, u0_, Pl, Pr, maxiters, abstol, reltol, verbose_spec, assumptions) isfresh = true precsisfresh = false @@ -371,7 +385,7 @@ function __init(prob::LinearProblem, alg::SciMLLinearSolveAlgorithm, typeof(Pl), typeof(Pr), typeof(reltol), typeof(assumptions.issq), typeof(sensealg)}( A, b, u0_, p, alg, cacheval, isfresh, precsisfresh, Pl, Pr, abstol, reltol, - maxiters, verbose, assumptions, sensealg) + maxiters, verbose_spec, assumptions, sensealg) return cache end diff --git a/src/default.jl b/src/default.jl index b6a6733fd..664571fe3 100644 --- a/src/default.jl +++ b/src/default.jl @@ -450,7 +450,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::Nothing, end function init_cacheval(alg::Nothing, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assump::OperatorAssumptions) + verbose::LinearVerbosity, assump::OperatorAssumptions) init_cacheval(defaultalg(A, b, assump), A, b, u, Pl, Pr, maxiters, abstol, reltol, verbose, assump) @@ -461,7 +461,7 @@ cache.cacheval = NamedTuple(LUFactorization = cache of LUFactorization, ...) """ @generated function init_cacheval(alg::DefaultLinearSolver, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assump::OperatorAssumptions) + verbose::LinearVerbosity, assump::OperatorAssumptions) caches = map(first.(EnumX.symbol_map(DefaultAlgorithmChoice.T))) do alg if alg === :KrylovJL_GMRES || alg === :KrylovJL_CRAIGMR || alg === :KrylovJL_LSMR quote @@ -513,7 +513,8 @@ end newex = quote sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback - ## TODO: Add verbosity logging here about using the fallback + @SciMLMessage("LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.", + cache.verbose, :default_lu_fallback) sol = SciMLBase.solve!( cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; @@ -533,7 +534,8 @@ end sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback - ## TODO: Add verbosity logging here about using the fallback + @SciMLMessage("LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.", + cache.verbose, :default_lu_fallback) sol = SciMLBase.solve!( cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; @@ -553,7 +555,8 @@ end sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback - ## TODO: Add verbosity logging here about using the fallback + @SciMLMessage("LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.", + cache.verbose, :default_lu_fallback) sol = SciMLBase.solve!( cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; @@ -573,7 +576,8 @@ end sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback - ## TODO: Add verbosity logging here about using the fallback + @SciMLMessage("LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.", + cache.verbose, :default_lu_fallback) sol = SciMLBase.solve!( cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; @@ -593,7 +597,8 @@ end sol = SciMLBase.solve!(cache, $(algchoice_to_alg(alg)), args...; kwargs...) if sol.retcode === ReturnCode.Failure && alg.safetyfallback - ## TODO: Add verbosity logging here about using the fallback + @SciMLMessage("LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.", + cache.verbose, :default_lu_fallback) sol = SciMLBase.solve!( cache, QRFactorization(ColumnNorm()), args...; kwargs...) SciMLBase.build_linear_solution(alg, sol.u, sol.resid, sol.cache; diff --git a/src/factorization.jl b/src/factorization.jl index 9f9065c4a..ca335d0ed 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -50,14 +50,14 @@ end # RF Bad fallback: will fail if `A` is just a stand-in # This should instead just create the factorization type. function init_cacheval(alg::AbstractFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, verbose::Bool, assumptions::OperatorAssumptions) + reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) do_factorization(alg, convert(AbstractMatrix, A), b, u) end ## RFLU Factorization function LinearSolve.init_cacheval(alg::RFLUFactorization, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) ArrayInterface.lu_instance(convert(AbstractMatrix, A)), ipiv end @@ -65,14 +65,14 @@ end function LinearSolve.init_cacheval( alg::RFLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_LU, PREALLOCATED_IPIV end function LinearSolve.init_cacheval(alg::RFLUFactorization, A::Union{Diagonal, SymTridiagonal, Tridiagonal}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing, nothing end @@ -171,7 +171,7 @@ end function init_cacheval( alg::GenericLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ipiv = Vector{LinearAlgebra.BlasInt}(undef, min(size(A)...)) ArrayInterface.lu_instance(convert(AbstractMatrix, A)), ipiv @@ -179,7 +179,7 @@ end function init_cacheval( alg::GenericLUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_LU, PREALLOCATED_IPIV end @@ -211,21 +211,21 @@ end function init_cacheval( alg::LUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(convert(AbstractMatrix, A)) end function init_cacheval(alg::LUFactorization, A::Union{<:Adjoint, <:Transpose}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) error_no_cudss_lu(A) return lu(A; check = false) end function init_cacheval(alg::GenericLUFactorization, A::Union{<:Adjoint, <:Transpose}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) error_no_cudss_lu(A) A isa GPUArraysCore.AnyGPUArray && return nothing ipiv = Vector{LinearAlgebra.BlasInt}(undef, 0) @@ -236,21 +236,21 @@ const PREALLOCATED_LU = ArrayInterface.lu_instance(rand(1, 1)) function init_cacheval(alg::LUFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_LU end function init_cacheval(alg::LUFactorization, A::AbstractSciMLOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(alg::GenericLUFactorization, A::AbstractSciMLOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -300,13 +300,13 @@ function do_factorization(alg::QRFactorization, A, b, u) end function init_cacheval(alg::QRFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(convert(AbstractMatrix, A), alg.pivot) end function init_cacheval(alg::QRFactorization, A::Symmetric{<:Number, <:Array}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return qr(convert(AbstractMatrix, A), alg.pivot) end @@ -314,13 +314,13 @@ end const PREALLOCATED_QR_ColumnNorm = ArrayInterface.qr_instance(rand(1, 1), ColumnNorm()) function init_cacheval(alg::QRFactorization{ColumnNorm}, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return PREALLOCATED_QR_ColumnNorm end function init_cacheval( alg::QRFactorization, A::Union{<:Adjoint, <:Transpose}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A isa GPUArraysCore.AnyGPUArray && return qr(A) return qr(A, alg.pivot) end @@ -328,12 +328,12 @@ end const PREALLOCATED_QR_NoPivot = ArrayInterface.qr_instance(rand(1, 1)) function init_cacheval(alg::QRFactorization{NoPivot}, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return PREALLOCATED_QR_NoPivot end function init_cacheval(alg::QRFactorization, A::AbstractSciMLOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -379,33 +379,33 @@ function do_factorization(alg::CholeskyFactorization, A, b, u) end function init_cacheval(alg::CholeskyFactorization, A::SMatrix{S1, S2}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {S1, S2} cholesky(A) end function init_cacheval(alg::CholeskyFactorization, A::GPUArraysCore.AnyGPUArray, b, u, Pl, - Pr, maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + Pr, maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) cholesky(A; check = false) end function init_cacheval( alg::CholeskyFactorization, A::AbstractArray{<:BLASELTYPES}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.cholesky_instance(convert(AbstractMatrix, A), alg.pivot) end const PREALLOCATED_CHOLESKY = ArrayInterface.cholesky_instance(rand(1, 1), NoPivot()) function init_cacheval(alg::CholeskyFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_CHOLESKY end function init_cacheval(alg::CholeskyFactorization, A::Union{Diagonal, AbstractSciMLOperator, AbstractArray}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -433,12 +433,12 @@ end function init_cacheval(alg::LDLtFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(alg::LDLtFactorization, A::SymTridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.ldlt_instance(convert(AbstractMatrix, A)) end @@ -472,7 +472,7 @@ function do_factorization(alg::SVDFactorization, A, b, u) end function init_cacheval(alg::SVDFactorization, A::Union{Matrix, SMatrix}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(convert(AbstractMatrix, A)) end @@ -480,13 +480,13 @@ end const PREALLOCATED_SVD = ArrayInterface.svd_instance(rand(1, 1)) function init_cacheval(alg::SVDFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_SVD end function init_cacheval(alg::SVDFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -515,7 +515,7 @@ end function init_cacheval(alg::BunchKaufmanFactorization, A::Symmetric{<:Number, <:Matrix}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.bunchkaufman_instance(convert(AbstractMatrix, A)) end @@ -525,13 +525,13 @@ const PREALLOCATED_BUNCHKAUFMAN = ArrayInterface.bunchkaufman_instance(Symmetric function init_cacheval(alg::BunchKaufmanFactorization, A::Symmetric{Float64, Matrix{Float64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_BUNCHKAUFMAN end function init_cacheval(alg::BunchKaufmanFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -572,58 +572,58 @@ end function init_cacheval( alg::GenericFactorization{typeof(lu)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(lu!)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu!)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization{typeof(lu)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(lu!)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval( alg::GenericFactorization{typeof(lu!)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(lu!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval( alg::GenericFactorization{typeof(lu)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end @@ -631,24 +631,24 @@ end function init_cacheval( alg::GenericFactorization{typeof(qr)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(qr!)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(qr)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval( alg::GenericFactorization{typeof(qr!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end @@ -656,33 +656,33 @@ end function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(qr!)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization{typeof(qr)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(qr!)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval( alg::GenericFactorization{typeof(qr!)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.qr_instance(A) end @@ -690,87 +690,87 @@ end function init_cacheval( alg::GenericFactorization{typeof(svd)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(svd!)}, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval( alg::GenericFactorization{typeof(svd)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::Diagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization{typeof(svd!)}, A::Tridiagonal, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.svd_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(svd!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval( alg::GenericFactorization{typeof(svd)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval(alg::GenericFactorization, A::Diagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval(alg::GenericFactorization, A::Tridiagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval(alg::GenericFactorization, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval(alg::GenericFactorization, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) init_cacheval(alg, convert(AbstractMatrix, A), b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) end function init_cacheval(alg::GenericFactorization, A::AbstractMatrix, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) do_factorization(alg, A, b, u) end @@ -778,7 +778,7 @@ function init_cacheval( alg::Union{GenericFactorization{typeof(bunchkaufman!)}, GenericFactorization{typeof(bunchkaufman)}}, A::Union{Hermitian, Symmetric}, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, verbose::Bool, assumptions::OperatorAssumptions) + reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) BunchKaufman(A.data, Array(1:size(A, 1)), A.uplo, true, false, 0) end @@ -787,7 +787,7 @@ function init_cacheval( GenericFactorization{typeof(bunchkaufman)}}, A::StridedMatrix{<:LinearAlgebra.BlasFloat}, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) if eltype(A) <: Complex return bunchkaufman!(Hermitian(A)) else @@ -801,49 +801,49 @@ end # Cholesky needs the posdef matrix, for GenericFactorization assume structure is needed function init_cacheval( alg::GenericFactorization{typeof(cholesky)}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) newA = copy(convert(AbstractMatrix, A)) do_factorization(alg, newA, b, u) end function init_cacheval( alg::GenericFactorization{typeof(cholesky!)}, A::AbstractMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) newA = copy(convert(AbstractMatrix, A)) do_factorization(alg, newA, b, u) end function init_cacheval(alg::GenericFactorization{typeof(cholesky!)}, A::Diagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval( alg::GenericFactorization{typeof(cholesky!)}, A::Tridiagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(cholesky!)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end function init_cacheval(alg::GenericFactorization{typeof(cholesky)}, A::Diagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) Diagonal(inv.(A.diag)) end function init_cacheval( alg::GenericFactorization{typeof(cholesky)}, A::Tridiagonal, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.lu_instance(A) end function init_cacheval( alg::GenericFactorization{typeof(cholesky)}, A::SymTridiagonal{T, V}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) where {T, V} LinearAlgebra.LDLt{T, SymTridiagonal{T, V}}(A) end @@ -875,7 +875,7 @@ end function init_cacheval(alg::UMFPACKFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -901,7 +901,7 @@ end function init_cacheval(alg::KLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -929,7 +929,7 @@ end function init_cacheval(alg::CHOLMODFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -986,13 +986,13 @@ default_alias_b(::NormalCholeskyFactorization, ::Any, ::Any) = true const PREALLOCATED_NORMALCHOLESKY = ArrayInterface.cholesky_instance(rand(1, 1), NoPivot()) function init_cacheval(alg::NormalCholeskyFactorization, A::SMatrix, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return cholesky(Symmetric((A)' * A)) end function init_cacheval(alg::NormalCholeskyFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A_ = convert(AbstractMatrix, A) return ArrayInterface.cholesky_instance( @@ -1003,13 +1003,13 @@ const PREALLOCATED_NORMALCHOLESKY_SYMMETRIC = ArrayInterface.cholesky_instance( Symmetric(rand(1, 1)), NoPivot()) function init_cacheval(alg::NormalCholeskyFactorization, A::Matrix{Float64}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) return PREALLOCATED_NORMALCHOLESKY_SYMMETRIC end function init_cacheval(alg::NormalCholeskyFactorization, A::Union{Diagonal, AbstractSciMLOperator}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -1070,7 +1070,7 @@ default_alias_A(::NormalBunchKaufmanFactorization, ::Any, ::Any) = true default_alias_b(::NormalBunchKaufmanFactorization, ::Any, ::Any) = true function init_cacheval(alg::NormalBunchKaufmanFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) ArrayInterface.bunchkaufman_instance(convert(AbstractMatrix, A)) end @@ -1098,7 +1098,7 @@ A special implementation only for solving `Diagonal` matrices fast. struct DiagonalFactorization <: AbstractDenseFactorization end function init_cacheval(alg::DiagonalFactorization, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -1149,12 +1149,12 @@ end function init_cacheval(alg::SparspakFactorization, A::Union{AbstractMatrix, Nothing, AbstractSciMLOperator}, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions) + verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(::SparspakFactorization, ::StaticArray, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end @@ -1194,39 +1194,39 @@ struct CliqueTreesFactorization{A, S} <: AbstractSparseFactorization end function init_cacheval(::CliqueTreesFactorization, ::Union{AbstractMatrix, Nothing, AbstractSciMLOperator}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(::CliqueTreesFactorization, ::StaticArray, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end # Fallback init_cacheval for extension-based algorithms when extensions aren't loaded # These return nothing since the actual implementations are in the extensions function init_cacheval(::BLISLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(::CudaOffloadLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end function init_cacheval(::MetalLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, assumptions::OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) nothing end for alg in vcat(InteractiveUtils.subtypes(AbstractDenseFactorization), InteractiveUtils.subtypes(AbstractSparseFactorization)) @eval function init_cacheval(alg::$alg, A::MatrixOperator, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) init_cacheval(alg, A.A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) end end diff --git a/src/iterative_wrappers.jl b/src/iterative_wrappers.jl index 16d28d908..35a904c3d 100644 --- a/src/iterative_wrappers.jl +++ b/src/iterative_wrappers.jl @@ -185,7 +185,7 @@ end # zeroinit allows for init_cacheval to start by initing with A (0,0) function init_cacheval(alg::KrylovJL, A, b, u, Pl, Pr, maxiters::Int, abstol, reltol, - verbose::Bool, assumptions::OperatorAssumptions; zeroinit = true) + verbose::LinearVerbosity, assumptions::OperatorAssumptions; zeroinit = true) KS = get_KrylovJL_solver(alg.KrylovAlg) if zeroinit @@ -240,7 +240,7 @@ end # Krylov.jl tries to init with `ArrayPartition(undef, ...)`. Avoid hitting that! function init_cacheval( alg::LinearSolve.KrylovJL, A, b::RecursiveArrayTools.ArrayPartition, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, ::LinearSolve.OperatorAssumptions) + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, ::LinearSolve.OperatorAssumptions) return nothing end @@ -268,7 +268,7 @@ function SciMLBase.solve!(cache::LinearCache, alg::KrylovJL; kwargs...) atol = float(cache.abstol) rtol = float(cache.reltol) itmax = cache.maxiters - verbose = cache.verbose ? 1 : 0 + verbose = cache.verbose cacheval = if cache.alg isa DefaultLinearSolver if alg.KrylovAlg === Krylov.gmres! @@ -284,13 +284,16 @@ function SciMLBase.solve!(cache::LinearCache, alg::KrylovJL; kwargs...) cache.cacheval end + krylovJL_verbose = verbosity_to_int(verbose.KrylovJL_verbosity) + args = (cacheval, cache.A, cache.b) - kwargs = (atol = atol, rtol, itmax, verbose, + kwargs = (atol = atol, rtol, itmax, verbose = krylovJL_verbose, ldiv = true, history = true, alg.kwargs...) if cache.cacheval isa Krylov.CgWorkspace N !== I && - @warn "$(alg.KrylovAlg) doesn't support right preconditioning." + @SciMLMessage("$(alg.KrylovAlg) doesn't support right preconditioning.", + verbose, :no_right_preconditioning) Krylov.krylov_solve!(args...; M, kwargs...) elseif cache.cacheval isa Krylov.GmresWorkspace Krylov.krylov_solve!(args...; M, N, restart = alg.gmres_restart > 0, kwargs...) @@ -298,7 +301,8 @@ function SciMLBase.solve!(cache::LinearCache, alg::KrylovJL; kwargs...) Krylov.krylov_solve!(args...; M, N, kwargs...) elseif cache.cacheval isa Krylov.MinresWorkspace N !== I && - @warn "$(alg.KrylovAlg) doesn't support right preconditioning." + @SciMLMessage("$(alg.KrylovAlg) doesn't support right preconditioning.", + verbose, :no_right_preconditioning) Krylov.krylov_solve!(args...; M, kwargs...) else Krylov.krylov_solve!(args...; kwargs...) diff --git a/src/mkl.jl b/src/mkl.jl index 0453f8f1a..934a29b45 100644 --- a/src/mkl.jl +++ b/src/mkl.jl @@ -220,14 +220,14 @@ const PREALLOCATED_MKL_LU = begin end function LinearSolve.init_cacheval(alg::MKLLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_MKL_LU end function LinearSolve.init_cacheval(alg::MKLLUFactorization, A::AbstractMatrix{<:Union{Float32, ComplexF32, ComplexF64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A = rand(eltype(A), 0, 0) ArrayInterface.lu_instance(A), Ref{BlasInt}() @@ -278,7 +278,7 @@ const PREALLOCATED_MKL32_LU = begin end function LinearSolve.init_cacheval(alg::MKL32MixedLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Pre-allocate appropriate 32-bit arrays based on input type m, n = size(A) diff --git a/src/openblas.jl b/src/openblas.jl index 96abb6f14..2f0d4e938 100644 --- a/src/openblas.jl +++ b/src/openblas.jl @@ -245,14 +245,14 @@ const PREALLOCATED_OPENBLAS_LU = begin end function LinearSolve.init_cacheval(alg::OpenBLASLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) PREALLOCATED_OPENBLAS_LU end function LinearSolve.init_cacheval(alg::OpenBLASLUFactorization, A::AbstractMatrix{<:Union{Float32, ComplexF32, ComplexF64}}, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) A = rand(eltype(A), 0, 0) ArrayInterface.lu_instance(A), Ref{BlasInt}() @@ -303,7 +303,7 @@ const PREALLOCATED_OPENBLAS32_LU = begin end function LinearSolve.init_cacheval(alg::OpenBLAS32MixedLUFactorization, A, b, u, Pl, Pr, - maxiters::Int, abstol, reltol, verbose::Bool, + maxiters::Int, abstol, reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) # Pre-allocate appropriate 32-bit arrays based on input type m, n = size(A) diff --git a/src/simplegmres.jl b/src/simplegmres.jl index a21826c9f..644a62b61 100644 --- a/src/simplegmres.jl +++ b/src/simplegmres.jl @@ -161,7 +161,7 @@ function init_cacheval(alg::SimpleGMRES{UDB}, args...; kwargs...) where {UDB} end function _init_cacheval(::Val{false}, alg::SimpleGMRES, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, ::Bool, ::OperatorAssumptions; zeroinit = true, kwargs...) + abstol, reltol, ::LinearVerbosity, ::OperatorAssumptions; zeroinit = true, kwargs...) @unpack memory, restart, blocksize, warm_start = alg if zeroinit @@ -392,7 +392,7 @@ function SciMLBase.solve!(cache::SimpleGMRESCache{false}, lincache::LinearCache) end function _init_cacheval(::Val{true}, alg::SimpleGMRES, A, b, u, Pl, Pr, maxiters::Int, - abstol, reltol, ::Bool, ::OperatorAssumptions; zeroinit = true, + abstol, reltol, ::LinearVerbosity, ::OperatorAssumptions; zeroinit = true, blocksize = alg.blocksize) @unpack memory, restart, warm_start = alg diff --git a/src/simplelu.jl b/src/simplelu.jl index 9917f5869..78d6775ab 100644 --- a/src/simplelu.jl +++ b/src/simplelu.jl @@ -218,6 +218,6 @@ function SciMLBase.solve!(cache::LinearCache, alg::SimpleLUFactorization; kwargs end function init_cacheval(alg::SimpleLUFactorization, A, b, u, Pl, Pr, maxiters::Int, abstol, - reltol, verbose::Bool, assumptions::OperatorAssumptions) + reltol, verbose::LinearVerbosity, assumptions::OperatorAssumptions) LUSolver(convert(AbstractMatrix, A)) end diff --git a/src/verbosity.jl b/src/verbosity.jl new file mode 100644 index 000000000..595041273 --- /dev/null +++ b/src/verbosity.jl @@ -0,0 +1,310 @@ +mutable struct LinearVerbosity{Enabled} <: AbstractVerbositySpecifier{Enabled} + # Error control + default_lu_fallback::SciMLLogging.LogLevel + # Performance + no_right_preconditioning::SciMLLogging.LogLevel + # Numerical + using_iterative_solvers::SciMLLogging.LogLevel + using_IterativeSolvers::SciMLLogging.LogLevel + IterativeSolvers_iterations::SciMLLogging.LogLevel + KrylovKit_verbosity::SciMLLogging.LogLevel + KrylovJL_verbosity::SciMLLogging.LogLevel + HYPRE_verbosity::SciMLLogging.LogLevel + pardiso_verbosity::SciMLLogging.LogLevel + blas_errors::SciMLLogging.LogLevel + blas_invalid_args::SciMLLogging.LogLevel + blas_info::SciMLLogging.LogLevel + blas_success::SciMLLogging.LogLevel + condition_number::SciMLLogging.LogLevel + + function LinearVerbosity{true}(; + # Error control defaults + default_lu_fallback = SciMLLogging.Warn(), + # Performance defaults + no_right_preconditioning = SciMLLogging.Warn(), + # Numerical defaults + using_iterative_solvers = SciMLLogging.Warn(), + using_IterativeSolvers = SciMLLogging.Warn(), + IterativeSolvers_iterations = SciMLLogging.Warn(), + KrylovKit_verbosity = SciMLLogging.Warn(), + KrylovJL_verbosity = SciMLLogging.Silent(), + HYPRE_verbosity = SciMLLogging.Info(), + pardiso_verbosity = SciMLLogging.Silent(), + blas_errors = SciMLLogging.Warn(), + blas_invalid_args = SciMLLogging.Warn(), + blas_info = SciMLLogging.Silent(), + blas_success = SciMLLogging.Silent(), + condition_number = SciMLLogging.Silent()) + + new{true}(default_lu_fallback, no_right_preconditioning, + using_iterative_solvers, using_IterativeSolvers, + IterativeSolvers_iterations, KrylovKit_verbosity, + KrylovJL_verbosity, HYPRE_verbosity, pardiso_verbosity, + blas_errors, blas_invalid_args, blas_info, blas_success, condition_number) + end + + function LinearVerbosity{false}() + new{false}(SciMLLogging.Silent(), SciMLLogging.Silent(), + SciMLLogging.Silent(), SciMLLogging.Silent(), + SciMLLogging.Silent(), SciMLLogging.Silent(), + SciMLLogging.Silent(), SciMLLogging.Silent(), SciMLLogging.Silent(), + SciMLLogging.Silent(), SciMLLogging.Silent(), SciMLLogging.Silent(), SciMLLogging.Silent(), SciMLLogging.Silent()) + end +end + +LinearVerbosity(enabled::Bool) = enabled ? LinearVerbosity{true}() : LinearVerbosity{false}() + +function LinearVerbosity(verbose::SciMLLogging.VerbosityPreset) + if verbose isa SciMLLogging.None + LinearVerbosity{false}() + elseif verbose isa SciMLLogging.All + LinearVerbosity{true}( + default_lu_fallback = SciMLLogging.Info(), + no_right_preconditioning = SciMLLogging.Info(), + using_iterative_solvers = SciMLLogging.Info(), + using_IterativeSolvers = SciMLLogging.Info(), + IterativeSolvers_iterations = SciMLLogging.Info(), + KrylovKit_verbosity = SciMLLogging.Info(), + KrylovJL_verbosity = SciMLLogging.Info(), + HYPRE_verbosity = SciMLLogging.Info(), + pardiso_verbosity = SciMLLogging.Info(), + blas_errors = SciMLLogging.Info(), + blas_invalid_args = SciMLLogging.Info(), + blas_info = SciMLLogging.Info(), + blas_success = SciMLLogging.Info(), + condition_number = SciMLLogging.Info() + ) + elseif verbose isa SciMLLogging.Minimal + LinearVerbosity{true}( + default_lu_fallback = SciMLLogging.Error(), + no_right_preconditioning = SciMLLogging.Silent(), + using_iterative_solvers = SciMLLogging.Silent(), + using_IterativeSolvers = SciMLLogging.Silent(), + IterativeSolvers_iterations = SciMLLogging.Silent(), + KrylovKit_verbosity = SciMLLogging.Silent(), + KrylovJL_verbosity = SciMLLogging.Silent(), + HYPRE_verbosity = SciMLLogging.Silent(), + pardiso_verbosity = SciMLLogging.Silent(), + blas_errors = SciMLLogging.Error(), + blas_invalid_args = SciMLLogging.Error(), + blas_info = SciMLLogging.Silent(), + blas_success = SciMLLogging.Silent(), + condition_number = SciMLLogging.Silent() + ) + elseif verbose isa SciMLLogging.Standard + LinearVerbosity{true}() # Use default settings + elseif verbose isa SciMLLogging.Detailed + LinearVerbosity{true}( + default_lu_fallback = SciMLLogging.Info(), + no_right_preconditioning = SciMLLogging.Info(), + using_iterative_solvers = SciMLLogging.Info(), + using_IterativeSolvers = SciMLLogging.Info(), + IterativeSolvers_iterations = SciMLLogging.Info(), + KrylovKit_verbosity = SciMLLogging.Warn(), + KrylovJL_verbosity = SciMLLogging.Warn(), + HYPRE_verbosity = SciMLLogging.Info(), + pardiso_verbosity = SciMLLogging.Warn(), + blas_errors = SciMLLogging.Warn(), + blas_invalid_args = SciMLLogging.Warn(), + blas_info = SciMLLogging.Info(), + blas_success = SciMLLogging.Info(), + condition_number = SciMLLogging.Info() + ) + else + LinearVerbosity{true}() # Default fallback + end +end + +@inline function LinearVerbosity(verbose::SciMLLogging.None) + LinearVerbosity{false}() +end + +function LinearVerbosity(; error_control=nothing, performance=nothing, numerical=nothing, kwargs...) + # Validate group arguments + if error_control !== nothing && !(error_control isa SciMLLogging.LogLevel) + throw(ArgumentError("error_control must be a SciMLLogging.LogLevel, got $(typeof(error_control))")) + end + if performance !== nothing && !(performance isa SciMLLogging.LogLevel) + throw(ArgumentError("performance must be a SciMLLogging.LogLevel, got $(typeof(performance))")) + end + if numerical !== nothing && !(numerical isa SciMLLogging.LogLevel) + throw(ArgumentError("numerical must be a SciMLLogging.LogLevel, got $(typeof(numerical))")) + end + + # Validate individual kwargs + for (key, value) in kwargs + if !(key in error_control_options || key in performance_options || key in numerical_options) + throw(ArgumentError("Unknown verbosity option: $key. Valid options are: $(tuple(error_control_options..., performance_options..., numerical_options...))")) + end + if !(value isa SciMLLogging.LogLevel) + throw(ArgumentError("$key must be a SciMLLogging.LogLevel, got $(typeof(value))")) + end + end + + # Build arguments using NamedTuple for type stability + default_args = ( + default_lu_fallback = SciMLLogging.Warn(), + no_right_preconditioning = SciMLLogging.Warn(), + using_iterative_solvers = SciMLLogging.Warn(), + using_IterativeSolvers = SciMLLogging.Warn(), + IterativeSolvers_iterations = SciMLLogging.Warn(), + KrylovKit_verbosity = SciMLLogging.Warn(), + KrylovJL_verbosity = SciMLLogging.Silent(), + HYPRE_verbosity = SciMLLogging.Info(), + pardiso_verbosity = SciMLLogging.Silent(), + blas_errors = SciMLLogging.Warn(), + blas_invalid_args = SciMLLogging.Warn(), + blas_info = SciMLLogging.Silent(), + blas_success = SciMLLogging.Silent(), + condition_number = SciMLLogging.Silent() + ) + + # Apply group-level settings + final_args = if error_control !== nothing || performance !== nothing || numerical !== nothing + NamedTuple{keys(default_args)}( + _resolve_arg_value(key, default_args[key], error_control, performance, numerical) + for key in keys(default_args) + ) + else + default_args + end + + # Apply individual overrides + if !isempty(kwargs) + final_args = merge(final_args, NamedTuple(kwargs)) + end + + LinearVerbosity{true}(; final_args...) +end + +# Helper function to resolve argument values based on group membership +@inline function _resolve_arg_value(key::Symbol, default_val, error_control, performance, numerical) + if key in error_control_options && error_control !== nothing + return error_control + elseif key in performance_options && performance !== nothing + return performance + elseif key in numerical_options && numerical !== nothing + return numerical + else + return default_val + end +end + +# Group classifications +const error_control_options = (:default_lu_fallback, :blas_errors, :blas_invalid_args) +const performance_options = (:no_right_preconditioning,) +const numerical_options = (:using_iterative_solvers, :using_IterativeSolvers, :IterativeSolvers_iterations, + :KrylovKit_verbosity, :KrylovJL_verbosity, :HYPRE_verbosity, :pardiso_verbosity, + :blas_info, :blas_success, :condition_number) + +function option_group(option::Symbol) + if option in error_control_options + return :error_control + elseif option in performance_options + return :performance + elseif option in numerical_options + return :numerical + else + error("Unknown verbosity option: $option") + end +end + +# Get all options in a group +function group_options(verbosity::LinearVerbosity, group::Symbol) + if group === :error_control + return NamedTuple{error_control_options}(getproperty(verbosity, opt) for opt in error_control_options) + elseif group === :performance + return NamedTuple{performance_options}(getproperty(verbosity, opt) for opt in performance_options) + elseif group === :numerical + return NamedTuple{numerical_options}(getproperty(verbosity, opt) for opt in numerical_options) + else + error("Unknown group: $group") + end +end + +function Base.setproperty!(verbosity::LinearVerbosity, name::Symbol, value) + # Check if this is a group name + if name === :error_control + if value isa SciMLLogging.LogLevel + for opt in error_control_options + setfield!(verbosity, opt, value) + end + else + error("error_control must be set to a SciMLLogging.LogLevel") + end + elseif name === :performance + if value isa SciMLLogging.LogLevel + for opt in performance_options + setfield!(verbosity, opt, value) + end + else + error("performance must be set to a SciMLLogging.LogLevel") + end + elseif name === :numerical + if value isa SciMLLogging.LogLevel + for opt in numerical_options + setfield!(verbosity, opt, value) + end + else + error("numerical must be set to a SciMLLogging.LogLevel") + end + else + # Check if this is an individual option + if name in error_control_options || name in performance_options || name in numerical_options + if value isa SciMLLogging.LogLevel + setfield!(verbosity, name, value) + else + error("$name must be set to a SciMLLogging.LogLevel") + end + else + # Fall back to default behavior for unknown properties + setfield!(verbosity, name, value) + end + end +end + +function Base.getproperty(verbosity::LinearVerbosity, name::Symbol) + # Check if this is a group name + if name === :error_control + return group_options(verbosity, :error_control) + elseif name === :performance + return group_options(verbosity, :performance) + elseif name === :numerical + return group_options(verbosity, :numerical) + else + # Fall back to default field access + return getfield(verbosity, name) + end +end + +function Base.show(io::IO, verbosity::LinearVerbosity{Enabled}) where Enabled + if Enabled + println(io, "LinearVerbosity{true}:") + + # Show error control group + println(io, " Error Control:") + for opt in error_control_options + level = getfield(verbosity, opt) + level_name = typeof(level).name.name + println(io, " $opt: $level_name") + end + + # Show performance group + println(io, " Performance:") + for opt in performance_options + level = getfield(verbosity, opt) + level_name = typeof(level).name.name + println(io, " $opt: $level_name") + end + + # Show numerical group + println(io, " Numerical:") + for opt in numerical_options + level = getfield(verbosity, opt) + level_name = typeof(level).name.name + println(io, " $opt: $level_name") + end + else + print(io, "LinearVerbosity{false} (all logging disabled)") + end +end diff --git a/test/runtests.jl b/test/runtests.jl index 73fd5413f..e07f9df49 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -17,6 +17,7 @@ if GROUP == "All" || GROUP == "Core" @time @safetestset "Adjoint Sensitivity" include("adjoint.jl") @time @safetestset "ForwardDiff Overloads" include("forwarddiff_overloads.jl") @time @safetestset "Traits" include("traits.jl") + @time @safetestset "Verbosity" include("verbosity.jl") @time @safetestset "BandedMatrices" include("banded.jl") @time @safetestset "Mixed Precision" include("test_mixed_precision.jl") end diff --git a/test/verbosity.jl b/test/verbosity.jl new file mode 100644 index 000000000..dc6e49aff --- /dev/null +++ b/test/verbosity.jl @@ -0,0 +1,388 @@ +using LinearSolve +using LinearSolve: LinearVerbosity, option_group, group_options, BLISLUFactorization +using SciMLLogging +using Test +@testset "LinearVerbosity Tests" begin + @testset "Default constructor" begin + v1 = LinearVerbosity() + @test v1 isa LinearVerbosity{true} + @test v1.default_lu_fallback isa SciMLLogging.Warn + @test v1.KrylovKit_verbosity isa SciMLLogging.Warn + end + + @testset "Bool constructor" begin + v2_true = LinearVerbosity(true) + v2_false = LinearVerbosity(false) + @test v2_true isa LinearVerbosity{true} + @test v2_false isa LinearVerbosity{false} + end + + @testset "VerbosityPreset constructors" begin + v3_none = LinearVerbosity(SciMLLogging.None()) + v3_all = LinearVerbosity(SciMLLogging.All()) + v3_minimal = LinearVerbosity(SciMLLogging.Minimal()) + v3_standard = LinearVerbosity(SciMLLogging.Standard()) + v3_detailed = LinearVerbosity(SciMLLogging.Detailed()) + + @test v3_none isa LinearVerbosity{false} + @test v3_all isa LinearVerbosity{true} + @test v3_all.default_lu_fallback isa SciMLLogging.Info + @test v3_minimal.default_lu_fallback isa SciMLLogging.Error + @test v3_minimal.KrylovKit_verbosity isa SciMLLogging.Silent + @test v3_standard isa LinearVerbosity{true} + @test v3_detailed.KrylovKit_verbosity isa SciMLLogging.Warn + end + + @testset "Group-level keyword constructors" begin + v4_error = LinearVerbosity(error_control = SciMLLogging.Error()) + @test v4_error.default_lu_fallback isa SciMLLogging.Error + + v4_numerical = LinearVerbosity(numerical = SciMLLogging.Silent()) + @test v4_numerical.KrylovKit_verbosity isa SciMLLogging.Silent + @test v4_numerical.using_IterativeSolvers isa SciMLLogging.Silent + @test v4_numerical.pardiso_verbosity isa SciMLLogging.Silent + + v4_performance = LinearVerbosity(performance = SciMLLogging.Info()) + @test v4_performance.no_right_preconditioning isa SciMLLogging.Info + end + + @testset "Mixed group and individual settings" begin + v5_mixed = LinearVerbosity( + numerical = SciMLLogging.Silent(), + KrylovKit_verbosity = SciMLLogging.Warn(), + performance = SciMLLogging.Info() + ) + # Individual override should take precedence + @test v5_mixed.KrylovKit_verbosity isa SciMLLogging.Warn + # Other numerical options should use group setting + @test v5_mixed.using_IterativeSolvers isa SciMLLogging.Silent + # Performance group setting should apply + @test v5_mixed.no_right_preconditioning isa SciMLLogging.Info + end + + @testset "Individual keyword arguments" begin + v6_individual = LinearVerbosity( + default_lu_fallback = SciMLLogging.Error(), + KrylovKit_verbosity = SciMLLogging.Info(), + pardiso_verbosity = SciMLLogging.Silent() + ) + @test v6_individual.default_lu_fallback isa SciMLLogging.Error + @test v6_individual.KrylovKit_verbosity isa SciMLLogging.Info + @test v6_individual.pardiso_verbosity isa SciMLLogging.Silent + # Unspecified options should use defaults + @test v6_individual.no_right_preconditioning isa SciMLLogging.Warn + end + + @testset "Group classification functions" begin + @test option_group(:default_lu_fallback) == :error_control + @test option_group(:KrylovKit_verbosity) == :numerical + @test option_group(:no_right_preconditioning) == :performance + + # Test error for unknown option + @test_throws ErrorException option_group(:unknown_option) + end + + @testset "Group options function" begin + v8 = LinearVerbosity(numerical = SciMLLogging.Warn()) + numerical_opts = group_options(v8, :numerical) + @test numerical_opts isa NamedTuple + @test :KrylovKit_verbosity in keys(numerical_opts) + @test :using_IterativeSolvers in keys(numerical_opts) + @test numerical_opts.KrylovKit_verbosity isa SciMLLogging.Warn + + error_opts = group_options(v8, :error_control) + @test :default_lu_fallback in keys(error_opts) + + performance_opts = group_options(v8, :performance) + @test :no_right_preconditioning in keys(performance_opts) + + # Test error for unknown group + @test_throws ErrorException group_options(v8, :unknown_group) + end + + @testset "Type parameter consistency" begin + v_enabled = LinearVerbosity{true}() + v_disabled = LinearVerbosity{false}() + + @test v_enabled isa LinearVerbosity{true} + @test v_disabled isa LinearVerbosity{false} + + # Test that the constructors create the right types + @test LinearVerbosity() isa LinearVerbosity{true} + @test LinearVerbosity(true) isa LinearVerbosity{true} + @test LinearVerbosity(false) isa LinearVerbosity{false} + end + + @testset "Group getproperty access" begin + v = LinearVerbosity() + + # Test getting groups returns NamedTuples + error_group = v.error_control + performance_group = v.performance + numerical_group = v.numerical + + @test error_group isa NamedTuple + @test performance_group isa NamedTuple + @test numerical_group isa NamedTuple + + # Test correct keys are present + @test :default_lu_fallback in keys(error_group) + @test :no_right_preconditioning in keys(performance_group) + @test :KrylovKit_verbosity in keys(numerical_group) + @test :using_IterativeSolvers in keys(numerical_group) + @test :pardiso_verbosity in keys(numerical_group) + + # Test values are LogLevel types + @test error_group.default_lu_fallback isa SciMLLogging.LogLevel + @test performance_group.no_right_preconditioning isa SciMLLogging.LogLevel + @test numerical_group.KrylovKit_verbosity isa SciMLLogging.LogLevel + + # Individual field access should still work + @test v.default_lu_fallback isa SciMLLogging.Warn + @test v.KrylovKit_verbosity isa SciMLLogging.Warn + end + + @testset "Group setproperty! setting" begin + v = LinearVerbosity() + + # Test setting entire error_control group + v.error_control = SciMLLogging.Error() + @test v.default_lu_fallback isa SciMLLogging.Error + + # Test setting entire performance group + v.performance = SciMLLogging.Info() + @test v.no_right_preconditioning isa SciMLLogging.Info + + # Test setting entire numerical group + v.numerical = SciMLLogging.Silent() + @test v.KrylovKit_verbosity isa SciMLLogging.Silent + @test v.using_IterativeSolvers isa SciMLLogging.Silent + @test v.pardiso_verbosity isa SciMLLogging.Silent + @test v.HYPRE_verbosity isa SciMLLogging.Silent + + # Test that other groups aren't affected + @test v.default_lu_fallback isa SciMLLogging.Error # error_control unchanged + @test v.no_right_preconditioning isa SciMLLogging.Info # performance unchanged + + # Test individual setting still works after group setting + v.KrylovKit_verbosity = SciMLLogging.Warn() + @test v.KrylovKit_verbosity isa SciMLLogging.Warn + # Other numerical options should still be Silent + @test v.using_IterativeSolvers isa SciMLLogging.Silent + end + + @testset "Group setproperty! error handling" begin + v = LinearVerbosity() + + # Test error for invalid group value type + @test_throws ErrorException v.error_control = "invalid" + @test_throws ErrorException v.performance = 123 + @test_throws ErrorException v.numerical = :invalid + + # Test error for invalid individual option type + @test_throws ErrorException v.KrylovKit_verbosity = "invalid" + @test_throws ErrorException v.default_lu_fallback = 123 + end + + @testset "getproperty and setproperty! consistency" begin + v = LinearVerbosity() + + # Set a group and verify getproperty reflects the change + v.numerical = SciMLLogging.Error() + numerical_group = v.numerical + + @test all(x -> x isa SciMLLogging.Error, values(numerical_group)) + + # Set individual option and verify both individual and group access work + v.KrylovKit_verbosity = SciMLLogging.Info() + @test v.KrylovKit_verbosity isa SciMLLogging.Info + + updated_numerical = v.numerical + @test updated_numerical.KrylovKit_verbosity isa SciMLLogging.Info + # Other numerical options should still be Error + @test updated_numerical.using_IterativeSolvers isa SciMLLogging.Error + end +end + + +A = [1.0 0 0 0 + 0 1 0 0 + 0 0 1 0 + 0 0 0 0] +b = rand(4) +prob = LinearProblem(A, b) + +@test_logs (:warn, + "LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.") solve( + prob, + verbose = LinearVerbosity(default_lu_fallback = SciMLLogging.Warn())) + +@test_logs (:warn, + "LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.") solve( + prob, verbose = true) + +@test_logs min_level=SciMLLogging.Logging.Warn solve(prob, verbose = false) + +@test_logs (:info, + "LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.") solve( + prob, + verbose = LinearVerbosity(default_lu_fallback = SciMLLogging.Info())) + +verb = LinearVerbosity(default_lu_fallback = SciMLLogging.Warn()) + +@test_logs (:warn, + "LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.") solve( + prob, + verbose = verb) + +verb.default_lu_fallback = SciMLLogging.Info() + +@test_logs (:info, + "LU factorization failed, falling back to QR factorization. `A` is potentially rank-deficient.") solve( + prob, + verbose = verb) + +@testset "BLAS Return Code Interpretation" begin + # Test interpretation of various BLAS return codes + @testset "Return Code Interpretation" begin + # Test successful operation + category, message, details = LinearSolve.interpret_blas_code(:dgetrf, 0) + @test category == :success + @test message == "Operation completed successfully" + + # Test invalid argument + category, message, details = LinearSolve.interpret_blas_code(:dgetrf, -3) + @test category == :invalid_argument + @test occursin("Argument 3", details) + + # Test singular matrix in LU + category, message, details = LinearSolve.interpret_blas_code(:dgetrf, 2) + @test category == :singular_matrix + @test occursin("U(2,2)", details) + + # Test not positive definite in Cholesky + category, message, details = LinearSolve.interpret_blas_code(:dpotrf, 3) + @test category == :not_positive_definite + @test occursin("minor of order 3", details) + + # Test SVD convergence failure + category, message, details = LinearSolve.interpret_blas_code(:dgesvd, 5) + @test category == :convergence_failure + @test occursin("5 off-diagonal", details) + end + + @testset "BLAS Operation Info" begin + # Test getting operation info without condition number + A = rand(10, 10) + b = rand(10) + + # Test with condition_number disabled (default) + info = LinearSolve.get_blas_operation_info(:dgetrf, A, b) + + @test info[:matrix_size] == (10, 10) + @test info[:element_type] == Float64 + @test !haskey(info, :condition_number) # Should not compute by default + @test info[:memory_usage_MB] >= 0 # Memory can be 0 for very small matrices + + # Test with condition number computation enabled via verbosity + verbose_with_cond = LinearVerbosity(condition_number = SciMLLogging.Info()) + info_with_cond = LinearSolve.get_blas_operation_info( + :dgetrf, A, b, condition = !isa(verbose_with_cond.condition_number, SciMLLogging.Silent)) + @test haskey(info_with_cond, :condition_number) + end + + @testset "Error Categories" begin + # Test different error categories are properly identified + test_cases = [ + (:dgetrf, 1, :singular_matrix), + (:dpotrf, 2, :not_positive_definite), + (:dgeqrf, 3, :numerical_issue), + (:dgesdd, 4, :convergence_failure), + (:dsyev, 5, :convergence_failure), + (:dsytrf, 6, :singular_matrix), + (:dgetrs, 1, :unexpected_error), + (:unknown_func, 1, :unknown_error) + ] + + for (func, code, expected_category) in test_cases + category, _, _ = LinearSolve.interpret_blas_code(func, code) + @test category == expected_category + end + end +end + +# Try to load BLIS extension +try + using blis_jll, LAPACK_jll +catch LoadError + # BLIS dependencies not available, tests will be skipped +end + +@testset "BLIS Verbosity Integration Tests" begin + @testset "BLIS solver with verbosity logging" begin + # Test basic BLIS solver functionality with verbosity + if Base.get_extension(LinearSolve, :LinearSolveBLISExt) == nothing + # Only test if BLIS is available + @info "Skipping BLIS tests - BLIS not available" + else + # Test successful solve with success logging enabled + A_good = [2.0 1.0; 1.0 2.0] + b_good = [3.0, 4.0] + prob_good = LinearProblem(A_good, b_good) + + verbose_success = LinearVerbosity( + blas_success = SciMLLogging.Info(), + blas_errors = SciMLLogging.Silent(), + blas_info = SciMLLogging.Silent() + ) + + @test_logs (:info, r"BLAS LU factorization.*completed successfully") solve( + prob_good, BLISLUFactorization(); verbose = verbose_success) + + # Test singular matrix with error logging + A_singular = [1.0 2.0; 2.0 4.0] + b_singular = [1.0, 2.0] + prob_singular = LinearProblem(A_singular, b_singular) + + verbose_errors = LinearVerbosity( + blas_errors = SciMLLogging.Warn(), + blas_success = SciMLLogging.Silent(), + blas_info = SciMLLogging.Silent() + ) + + @test_logs (:warn, r"BLAS/LAPACK.*Matrix is singular") solve( + prob_singular, BLISLUFactorization(); verbose = verbose_errors) + + # Test with info logging enabled + verbose_info = LinearVerbosity( + blas_info = SciMLLogging.Info(), + blas_errors = SciMLLogging.Info(), + blas_success = SciMLLogging.Silent() + ) + + @test_logs (:info, r"BLAS/LAPACK.*Matrix is singular") solve( + prob_singular, BLISLUFactorization(); verbose = verbose_info) + + # Test with all BLAS logging disabled - should produce no logs + verbose_silent = LinearVerbosity( + blas_errors = SciMLLogging.Silent(), + blas_invalid_args = SciMLLogging.Silent(), + blas_info = SciMLLogging.Silent(), + blas_success = SciMLLogging.Silent() + ) + + @test_logs min_level=SciMLLogging.Logging.Warn solve( + prob_singular, BLISLUFactorization(); verbose = verbose_silent) + + # Test condition number logging if enabled + verbose_with_cond = LinearVerbosity( + condition_number = SciMLLogging.Info(), + blas_success = SciMLLogging.Info(), + blas_errors = SciMLLogging.Silent() + ) + + @test_logs (:info, r"Matrix condition number:.*for.*matrix") match_mode=:any solve( + prob_good, BLISLUFactorization(); verbose = verbose_with_cond) + end + end +end \ No newline at end of file