diff --git a/Project.toml b/Project.toml index 3ad86c655..800e1e45b 100644 --- a/Project.toml +++ b/Project.toml @@ -26,6 +26,7 @@ NamedTupleTools = "d9ec5142-1e00-5aa0-9d6a-321866360f50" OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" Parameters = "d96e819e-fc66-5662-9728-84c9c7592b0a" ProgressLogging = "33c8b6b6-d38a-422a-b730-caa89a2f386c" +PyFormattedStrings = "5f89f4a4-a228-4886-b223-c468a82ed5b9" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" Setfield = "efcf1570-3423-57d1-acb7-fd33fddbac46" @@ -34,8 +35,6 @@ SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" -StrFormat = "b5087856-efa9-5a6d-8e6f-97303a7af894" -StrLiterals = "68059f60-971f-57ff-a2d0-18e7de9ccc84" TOML = "fa267f1f-6049-4f14-aa54-33bafae1ed76" Tables = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" TerminalLoggers = "5d786b92-1e48-4d6f-9151-6b4477ca9bed" @@ -76,14 +75,13 @@ NamedTupleTools = "v0.14" OrderedCollections = "1" Parameters = "0.12" ProgressLogging = "0.1.3" +PyFormattedStrings = "0.1.12" Reexport = "1" Setfield = "0.7, 0.8, 1" SpecialFunctions = "1, 2" StaticArrays = "1" Statistics = "1" StatsBase = "0.33, 0.34" -StrFormat = "1" -StrLiterals = "1" TOML = "1" Tables = "1.9" TerminalLoggers = "0.1.4" diff --git a/docs/src/exactdiagonalization.md b/docs/src/exactdiagonalization.md index 13ebe6691..a4b9bb610 100644 --- a/docs/src/exactdiagonalization.md +++ b/docs/src/exactdiagonalization.md @@ -12,6 +12,7 @@ ExactDiagonalization ExactDiagonalizationProblem solve(::ExactDiagonalizationProblem) init(::ExactDiagonalizationProblem) +estimate_memory_requirement ``` ## Solver algorithms diff --git a/src/ExactDiagonalization/ExactDiagonalization.jl b/src/ExactDiagonalization/ExactDiagonalization.jl index 7b19d38be..60859a0a4 100644 --- a/src/ExactDiagonalization/ExactDiagonalization.jl +++ b/src/ExactDiagonalization/ExactDiagonalization.jl @@ -27,6 +27,7 @@ using VectorInterface: VectorInterface, add using OrderedCollections: freeze using NamedTupleTools: delete using StaticArrays: setindex +using PyFormattedStrings: PyFormattedStrings, @f_str import Folds using Rimu: Rimu, DictVectors, Hamiltonians, Interfaces, BitStringAddresses, replace_keys, @@ -42,6 +43,7 @@ using ..Hamiltonians: allows_address_type, check_address_type, dimension, export ExactDiagonalizationProblem, KrylovKitSolver, LinearAlgebraSolver export ArpackSolver, LOBPCGSolver export BasisSetRepresentation, build_basis +export estimate_memory_requirement export LinearMap diff --git a/src/ExactDiagonalization/algorithms.jl b/src/ExactDiagonalization/algorithms.jl index 2ff291207..917cf1bd4 100644 --- a/src/ExactDiagonalization/algorithms.jl +++ b/src/ExactDiagonalization/algorithms.jl @@ -39,7 +39,7 @@ struct KrylovKitSolver{MatrixFree} <: AbstractAlgorithm{MatrixFree} end end KrylovKitSolver(matrix_free::Bool; kwargs...) = KrylovKitSolver{matrix_free}(; kwargs...) -KrylovKitSolver(; matrix_free=true, kwargs...) = KrylovKitSolver(matrix_free; kwargs...) +KrylovKitSolver(; matrix_free=false, kwargs...) = KrylovKitSolver(matrix_free; kwargs...) function Base.show(io::IO, s::KrylovKitSolver) nt = (; matrix_free=ismatrixfree(s), s.kwargs...) @@ -49,13 +49,12 @@ function Base.show(io::IO, s::KrylovKitSolver) end """ - ArpackSolver(; kwargs...) + ArpackSolver(matrix_free::Bool; kwargs...) + ArpackSolver(; matrix_free = false, kwargs...) Algorithm for solving an [`ExactDiagonalizationProblem`](@ref) after instantiating a sparse matrix. It uses the Lanzcos method for hermitian problems, and the Arnoldi method for -non-hermitian problems, using the Arpack Fortran library. This is faster than -[`KrylovKitSolver(; matrix_free=true)`](@ref), but it requires more memory and will only be -useful if the matrix fits into memory. +non-hermitian problems, using the Arpack Fortran library. # Arguments - `matrix_free = false`: Whether to use a matrix-free algorithm. If `false`, a sparse matrix @@ -86,7 +85,7 @@ struct ArpackSolver{MatrixFree} <: AbstractAlgorithm{MatrixFree} end end ArpackSolver(matrix_free::Bool; kwargs...) = ArpackSolver{matrix_free}(; kwargs...) -ArpackSolver(; matrix_free=true, kwargs...) = ArpackSolver(matrix_free; kwargs...) +ArpackSolver(; matrix_free=false, kwargs...) = ArpackSolver(matrix_free; kwargs...) function Base.show(io::IO, s::ArpackSolver) nt = (; matrix_free=ismatrixfree(s), s.kwargs...) @@ -96,7 +95,8 @@ function Base.show(io::IO, s::ArpackSolver) end """ - LOBPCGSolver(; kwargs...) + LOBPCGSolver(matrix_free::Bool; kwargs...) + LOBPCGSolver(; matrix_free = false, kwargs...) The Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG). Algorithm for solving an [`ExactDiagonalizationProblem`](@ref) after instantiating a @@ -133,7 +133,7 @@ struct LOBPCGSolver{MatrixFree} <: AbstractAlgorithm{MatrixFree} end end LOBPCGSolver(matrix_free::Bool; kwargs...) = LOBPCGSolver{matrix_free}(; kwargs...) -LOBPCGSolver(; matrix_free=true, kwargs...) = LOBPCGSolver(matrix_free; kwargs...) +LOBPCGSolver(; matrix_free=false, kwargs...) = LOBPCGSolver(matrix_free; kwargs...) function Base.show(io::IO, s::LOBPCGSolver) nt = (; matrix_free=ismatrixfree(s), s.kwargs...) diff --git a/src/ExactDiagonalization/exact_diagonalization_problem.jl b/src/ExactDiagonalization/exact_diagonalization_problem.jl index d055422b2..8537b335f 100644 --- a/src/ExactDiagonalization/exact_diagonalization_problem.jl +++ b/src/ExactDiagonalization/exact_diagonalization_problem.jl @@ -12,6 +12,10 @@ collection of addresses can be passed as `v0`. - `algorithm=LinearAlgebraSolver()`: The algorithm to use for solving the problem. The algorithm can also be specified as the second positional argument in the `init` function. +- `linear_dimension` (optional): The estimated dimension of the problem. This is + usually automatically determined from the Hamiltonian. +- `info=false`: Whether to print additional information about memory requirements. +- `warn=true`: Whether to print warnings if available memory is insufficient. - Optional keyword arguments will be passed on to the `init` and `solve` functions. # Algorithms @@ -78,6 +82,8 @@ julia> p = ExactDiagonalizationProblem(HubbardReal1D(BoseFS(1,1,1))) ExactDiagonalizationProblem( HubbardReal1D(fs"|1 1 1⟩"; u=1.0, t=1.0), nothing; + algorithm=LinearAlgebraSolver(), + linear_dimension=10, NamedTuple()... ) @@ -111,17 +117,68 @@ See also [`solve(::ExactDiagonalizationProblem)`](@ref), Using the `LOBPCGSolver()` algorithm requires the IterativeSolvers.jl package. The package can be loaded with `using IterativeSolvers`. """ -struct ExactDiagonalizationProblem{H<:AbstractHamiltonian, V} +struct ExactDiagonalizationProblem{H<:AbstractHamiltonian,V,ALG<:AbstractAlgorithm,AV} hamiltonian::H initial_vector::V + algorithm::ALG + linear_dimension::Number + addr_or_vec::AV # starting address or iterable of addresses kwargs::NamedTuple +end - function ExactDiagonalizationProblem( - hamiltonian::H, initial_vector::V=nothing; kwargs... - ) where {H<:AbstractHamiltonian,V} - return new{H,V}(hamiltonian, initial_vector, NamedTuple(kwargs)) +function ExactDiagonalizationProblem( + hamiltonian::H, initial_vector::V=nothing; + linear_dimension=nothing, algorithm::ALG=LinearAlgebraSolver(), + info=false, warn=true, kwargs... +) where {H<:AbstractHamiltonian,V,ALG<:AbstractAlgorithm} + # Set up the starting address or vector of addresses + addr_or_vec = _set_up_starting_address(initial_vector, hamiltonian) + if linear_dimension === nothing + linear_dimension = dimension( + hamiltonian, + addr_or_vec isa AbstractFockAddress ? addr_or_vec : first(addr_or_vec) + ) + end + dense, matrix_free, sparse = estimate_memory_requirement( + hamiltonian, linear_dimension + ) + free_memory = Sys.free_memory() # available memory in bytes + total_memory = Sys.total_memory() # total memory in bytes + if info + message = "Setting up ExactDiagonalizationProblem with algorithm $(algorithm)\n"* + f"- Linear dimension is {float(linear_dimension):.1e}\n"* + f"- Estimated memory requirements: dense = {dense/1e6:.1e} MB, "* + f"matrix_free = {matrix_free/1e6:.1e} MB, "* + f"sparse = {sparse/1e6:.1e} MB\n"* + f"- Total available memory: {total_memory/1e6:.1e} MB, free: {free_memory/1e6:.1e} MB" + @info message + end + if warn + if algorithm isa LinearAlgebraSolver && total_memory < dense + message = "ExactDiagonalizationProblem with algorithm $(algorithm):\n"* + f"Available memory may be less than the required memory for a dense matrix.\n\n"* + f"Total available memory: {total_memory/1e6:.1e} MB, required: {dense/1e6:.1e} MB.\n"* + "Consider using a sparse algorithm like `algorithm=KrylovKitSolver()`!" + @warn message + elseif algorithm isa AbstractAlgorithm{false} && total_memory < sparse + message = "ExactDiagonalizationProblem with algorithm $(algorithm):\n"* + f"Available memory may be less than the required memory for a sparse matrix.\n\n"* + f"Total available memory: {total_memory / 1e6:.1e} MB, required: {sparse / 1e6:.1e} MB.\n"* + "Consider using a matrix-free algorithm like `algorithm=KrylovKitSolver(; matrix_free=true)`!" + @warn message + elseif algorithm isa AbstractAlgorithm{true} && total_memory < matrix_free + message = "ExactDiagonalizationProblem with algorithm $(algorithm)\n"* + f"Available memory may be less than the required memory for a matrix-free algorithm.\n"* + f"Total available memory: {total_memory / 1e6:.1e} MB, required: {matrix_free/1e6:.1e} MB" + @warn message + end end + return ExactDiagonalizationProblem{H,V,ALG,typeof(addr_or_vec)}( + hamiltonian, initial_vector, algorithm, linear_dimension, addr_or_vec, + NamedTuple(kwargs) + ) end + function ExactDiagonalizationProblem( hamiltonian::AbstractHamiltonian, v0::AbstractDVec; kwargs... ) @@ -137,6 +194,8 @@ function Base.show(io::IO, p::ExactDiagonalizationProblem) print(io, ",\n ") show(io, p.initial_vector) print(io, ";\n ") + print(io, "algorithm=$(p.algorithm),\n ") + print(io, "linear_dimension=$(p.linear_dimension),\n ") show(io, p.kwargs) print(io, "...\n)") end @@ -146,4 +205,65 @@ function Base.:(==)(p1::ExactDiagonalizationProblem, p2::ExactDiagonalizationPro p1.kwargs == p2.kwargs end -Rimu.Hamiltonians.dimension(p::ExactDiagonalizationProblem) = dimension(p.hamiltonian) +Rimu.Hamiltonians.dimension(p::ExactDiagonalizationProblem) = p.linear_dimension + +function _set_up_starting_address(v0, ham) + if isnothing(v0) + addr_or_vec = starting_address(ham) + elseif allows_address_type(ham, v0) || + v0 isa Union{NTuple,Vector} && allows_address_type(ham, eltype(v0)) + addr_or_vec = v0 + elseif v0 isa FrozenDVec + addr_or_vec = keys(v0) + else + throw(ArgumentError("Invalid starting vector in `ExactDiagonalizationProblem`.")) + end + + return addr_or_vec # single address or iterable of addresses +end + +""" + estimate_memory_requirement(::ExactDiagonalizationProblem) + estimate_memory_requirement(h::AbstractHamiltonian, linear_dimension=dimension(h)) + -> (; dense, matrix_free, sparse) + +Estimate the memory requirement for an [`ExactDiagonalizationProblem`](@ref). +This function estimates the memory requirement based on the linear dimension and the +Hamiltonian. It returns an estimate of the memory size in bytes for three +different types of algorithms: +- `dense`: The memory requirement for [`LinearAlgebraSolver()`](@ref) using a dense matrix. +- `matrix_free`: The memory requirement for [`KrylovKitSolver(; matrix_free=true)`](@ref) + using a matrix-free algorithm. +- `sparse`: The memory requirement for [`KrylovKitSolver(; matrix_free=false)`](@ref) + using a sparse matrix. + +The memory requirements for other sparse solvers are expected to be similar to the +`KrylovKitSolver` estimates. +""" +function estimate_memory_requirement(prob::ExactDiagonalizationProblem) + return estimate_memory_requirement(prob.hamiltonian, prob.linear_dimension) +end + +function estimate_memory_requirement( + hamiltonian::AbstractHamiltonian, + linear_dimension=dimension(hamiltonian) +) + krylovdim = 30 + # standard Krylov dimension in KrylovKit, could be read from the algorithm + # LOBPCG should need much less memory + address_size = sizeof(starting_address(hamiltonian)) + column_size = Rimu.num_offdiagonals(hamiltonian, starting_address(hamiltonian)) + coefficient_size = sizeof(eltype(hamiltonian)) + + dense = 2 * linear_dimension^2 * coefficient_size + # for dense matrix and QR + linear_dimension * address_size # for basis + + matrix_free = krylovdim * linear_dimension * coefficient_size + # for solver algorithm + 2 * linear_dimension * (address_size + coefficient_size) # for LinearMap + + sparse = linear_dimension * column_size * coefficient_size + # for sparse matrix + linear_dimension * address_size + # for basis + krylovdim * linear_dimension * coefficient_size # for solver algorithm + + return (; dense, matrix_free, sparse) +end diff --git a/src/ExactDiagonalization/init_and_solvers.jl b/src/ExactDiagonalization/init_and_solvers.jl index 79c2f0254..57d3e3399 100644 --- a/src/ExactDiagonalization/init_and_solvers.jl +++ b/src/ExactDiagonalization/init_and_solvers.jl @@ -13,27 +13,17 @@ function CommonSolve.init( # no algorithm specified as positional argument kwargs... ) kwargs = (; prob.kwargs..., kwargs...) # remove duplicates - algorithm = get(kwargs, :algorithm, LinearAlgebraSolver()) - kwargs = delete(kwargs, :algorithm) - new_prob = ExactDiagonalizationProblem(prob.hamiltonian, prob.initial_vector; kwargs...) + algorithm = get(kwargs, :algorithm, prob.algorithm) + info = get(kwargs, :info, false) + warn = get(kwargs, :warn, false) + kwargs = delete(kwargs, :info, :warn, :algorithm) + new_prob = ExactDiagonalizationProblem( + prob.hamiltonian, prob.initial_vector; + info, warn, algorithm, kwargs... + ) return init(new_prob, algorithm) end -# TODO since this is the same for all solvers, maybe move it to ExactDiagonalizationProblem? -function _set_up_starting_address(v0, ham) - if isnothing(v0) - addr_or_vec = starting_address(ham) - elseif allows_address_type(ham, v0) || - v0 isa Union{NTuple,Vector} && allows_address_type(ham, eltype(v0)) - addr_or_vec = v0 - elseif v0 isa FrozenDVec - addr_or_vec = keys(v0) - else - throw(ArgumentError("Invalid starting vector in `ExactDiagonalizationProblem`.")) - end - - return addr_or_vec -end function _set_up_initial_vector(ham, v0, basis) T = float(eltype(ham)) if isnothing(v0) @@ -83,13 +73,11 @@ function CommonSolve.init( kwargs = (; prob.kwargs..., algorithm.kwargs..., kwargs...) linmap_kwargs, solver_kwargs = split_keys(kwargs, :basis, :full_basis) - # determine the starting address or vector and seed address to build the matrix from - addr_or_vec = _set_up_starting_address( - prob.initial_vector, prob.hamiltonian + # create the LinearMap + linmap = LinearMap(prob.hamiltonian; + starting_address=prob.addr_or_vec, linmap_kwargs... ) - # create the LinearMap - linmap = LinearMap(prob.hamiltonian; starting_address=addr_or_vec, linmap_kwargs...) basis = linmap.basis initial_vector = _set_up_initial_vector(prob.hamiltonian, prob.initial_vector, basis) @@ -111,13 +99,8 @@ function CommonSolve.init( :sizelim, :cutoff, :filter, :nnzs, :col_hint, :sort, :max_depth, :minimum_size ) - # determine the starting address or vector and seed address to build the matrix from - addr_or_vec = _set_up_starting_address( - prob.initial_vector, prob.hamiltonian - ) - # create the BasisSetRepresentation - bsr = BasisSetRepresentation(prob.hamiltonian, addr_or_vec; bsr_kwargs...) + bsr = BasisSetRepresentation(prob.hamiltonian, prob.addr_or_vec; bsr_kwargs...) matrix = bsr.sparse_matrix basis = bsr.basis @@ -125,6 +108,7 @@ function CommonSolve.init( return IterativeEDSolver(algorithm, prob, matrix, initial_vector, basis, solver_kwargs) end +Hamiltonians.dimension(s::IterativeEDSolver) = dimension(s.linear_map) struct DenseEDSolver{P,A,BSR} problem::P @@ -153,13 +137,10 @@ function CommonSolve.init( :sizelim, :cutoff, :filter, :nnzs, :col_hint, :sort, :max_depth, :minimum_size ) - # determine the seed address to build the matrix from - addr_or_vec = _set_up_starting_address( - prob.initial_vector, prob.hamiltonian - ) - # create the BasisSetRepresentation - bsr = BasisSetRepresentation(prob.hamiltonian, addr_or_vec; bsr_kwargs...) + bsr = BasisSetRepresentation(prob.hamiltonian, prob.addr_or_vec; bsr_kwargs...) return DenseEDSolver(prob, algorithm, bsr, solver_kwargs) end + +Rimu.Hamiltonians.dimension(s::DenseEDSolver) = dimension(s.basis_set_rep) diff --git a/src/ExactDiagonalization/operator_as_map.jl b/src/ExactDiagonalization/operator_as_map.jl index d04889169..878f20f13 100644 --- a/src/ExactDiagonalization/operator_as_map.jl +++ b/src/ExactDiagonalization/operator_as_map.jl @@ -22,7 +22,7 @@ end Wrapper for an [`AbstractOperator`](@ref) and a basis that allows multiplying regular Julia vectors with the operator without storing the matrix representation of the operator in -memory. +memory. Requires that the `adjoint` of the operator is implemented. If an `op::`[`AbstractOperator`](@ref) with no `basis` is passed, the basis is constructed automatically from the [`starting_address`](@ref) (note that function is only defined for @@ -82,7 +82,7 @@ function LinearMaps.LinearMap( full_basis::Bool=false, ) if !isnothing(basis) - full_basis && @warn "`basis` and `full_basis` given. Ignorning `full_basis`." + full_basis && @warn "`basis` and `full_basis` given. Ignoring `full_basis`." elseif full_basis basis = build_basis(starting_address) else @@ -98,6 +98,8 @@ function LinearMaps.LinearMap( return LinearMap(operator; starting_address, full_basis) end +Hamiltonians.dimension(op::LinearMap) = size(op, 1) + Base.size(op::OperatorAsMap) = (length(op.basis), length(op.basis)) Base.size(op::OperatorAsMap, i) = length(op.basis) LinearAlgebra.ishermitian(op::OperatorAsMap) = ishermitian(op.operator_adj) diff --git a/src/ExactDiagonalization/solve.jl b/src/ExactDiagonalization/solve.jl index 37095b289..4b48292d7 100644 --- a/src/ExactDiagonalization/solve.jl +++ b/src/ExactDiagonalization/solve.jl @@ -44,6 +44,8 @@ Base.iterate(S::EDResult, ::Val{:vectors}) = (S.vectors, Val(:success)) Base.iterate(S::EDResult, ::Val{:success}) = (S.info, Val(:done)) Base.iterate(::EDResult, ::Val{:done}) = nothing +Hamiltonians.dimension(r::EDResult) = length(r.basis) + function Base.show(io::IO, r::EDResult) io = IOContext(io, :compact => true) n = length(r.values) diff --git a/src/Hamiltonians/abstract.jl b/src/Hamiltonians/abstract.jl index f2b7f9197..3f790cc5d 100644 --- a/src/Hamiltonians/abstract.jl +++ b/src/Hamiltonians/abstract.jl @@ -69,6 +69,14 @@ dimension(h::AbstractHamiltonian) = dimension(h, starting_address(h)) dimension(::AbstractObservable, addr) = dimension(addr) dimension(addr::AbstractFockAddress) = dimension(typeof(addr)) dimension(::T) where {T<:Number} = typemax(T) # e.g. integer addresses +function dimension(m::AbstractMatrix) + r,c = size(m) + if r == c + return r + else + throw(ArgumentError("Matrix is not square")) + end +end function dimension(::Type{<:BoseFS{N,M}}) where {N,M} return number_conserving_bose_dimension(N,M) diff --git a/src/Rimu.jl b/src/Rimu.jl index ba9bed471..92abb5a72 100644 --- a/src/Rimu.jl +++ b/src/Rimu.jl @@ -73,7 +73,7 @@ export DontUpdate, DoubleLogUpdate, DoubleLogUpdateAfterTargetWalkers export ReportingStrategy, ReportDFAndInfo, ReportToFile export ReplicaStrategy, NoStats, AllOverlaps export PostStepStrategy, Projector, ProjectedEnergy, SignCoherence, WalkerLoneliness, Timer, - SingleParticleDensity, single_particle_density + SingleParticleDensity, single_particle_density, VMSize export TimeStepStrategy, ConstantTimeStep, OvershootControl export localpart, walkernumber export smart_logger, default_logger @@ -88,6 +88,7 @@ function __init__() MPI.Initialized() || MPI.Init(threadlevel=:funneled) end +include("strategies_and_params/get_vmsize.jl") include("strategies_and_params/fciqmcrunstrategy.jl") include("strategies_and_params/poststepstrategy.jl") include("strategies_and_params/replicastrategy.jl") diff --git a/src/StatsTools/StatsTools.jl b/src/StatsTools/StatsTools.jl index f46a086fa..5ac931d85 100644 --- a/src/StatsTools/StatsTools.jl +++ b/src/StatsTools/StatsTools.jl @@ -32,8 +32,7 @@ using MonteCarloMeasurements: MonteCarloMeasurements, AbstractParticles, pcov, using Random: Random using SpecialFunctions: SpecialFunctions, erf using Statistics: Statistics -using StrFormat: StrFormat, @f_str -using StrLiterals: StrLiterals +using PyFormattedStrings: PyFormattedStrings, @f_str import ..Interfaces: num_replicas, num_spectral_states, num_overlaps import ProgressLogging, Folds diff --git a/src/StatsTools/ratio_of_means.jl b/src/StatsTools/ratio_of_means.jl index b2a201242..4321309f6 100644 --- a/src/StatsTools/ratio_of_means.jl +++ b/src/StatsTools/ratio_of_means.jl @@ -63,10 +63,10 @@ function Base.show(io::IO, r::RatioBlockingResult{T,P}) where {T<:Real,P} q = pquantile(r.ratio, [0.16, 0.5, 0.84]) qr95 = pquantile(r.ratio, [0.025, 0.975]) println(io, "RatioBlockingResult{$T,$P}") - println(io, f" ratio = \%g(q[2]) ± (\%g(q[3]-q[2]), \%g(q[2]-q[1])) (MC)") - println(io, f" 95% confidence interval: [\%g(qr95[1]), \%g(qr95[2])] (MC)") - println(io, f" linear error propagation: \%g(r.f) ± \%g(r.σ_f)") - println(io, f" |δ_y| = |\%g(r.δ_y)| (≤ 0.1 for normal approx)") + println(io, f" ratio = {q[2]:5g} ± ({q[3]-q[2]:5g}, {q[2]-q[1]:5g}) (MC)") + println(io, f" 95% confidence interval: [{qr95[1]:5g}, {qr95[2]:5g}] (MC)") + println(io, f" linear error propagation: {r.f:5g} ± {r.σ_f:5g}") + println(io, f" |δ_y| = {r.δ_y:5g} (≤ 0.1 for normal approx)") if r.success println(io, " Blocking successful with $(r.blocks) blocks after $(r.k-1) transformations (k = $(r.k)).") else @@ -78,13 +78,15 @@ function Base.show(io::IO, r::RatioBlockingResult{T,P}) where {T<:Complex,P} qr = pquantile(real(r.ratio), [0.16, 0.5, 0.84]) qi = pquantile(imag(r.ratio), [0.16, 0.5, 0.84]) println(io, "RatioBlockingResult{$T,$P}") - println(io, f" ratio = \%g(qr[2]) ± (\%g(qr[3]-qr[2]), \%g(qr[2]-qr[1])) + [\%g(qi[2]) ± (\%g(qi[3]-qi[2]), \%g(qi[2]-qi[1]))]*im (MC)") + println(io, f" ratio = {qr[2]:5g} ± ({qr[3]-qr[2]:5g}, {qr[2]-qr[1]:5g}) + "* + f"[{qi[2]:5g} ± ({qi[3]-qi[2]:5g}, {qi[2]-qi[1]:5g})]*im (MC)" + ) qr95 = pquantile(real(r.ratio), [0.025, 0.975]) qi95 = pquantile(imag(r.ratio), [0.025, 0.975]) - println(io, f" 95% confidence interval real: [\%g(qr95[1]), \%g(qr95[2])] (MC)") - println(io, f" 95% confidence interval imag: [\%g(qi95[1]), \%g(qi95[2])] (MC)") + println(io, f" 95% confidence interval real: [{qr95[1]:5g}, {qr95[2]:5g}] (MC)") + println(io, f" 95% confidence interval imag: [{qi95[1]:5g}, {qi95[2]:5g}] (MC)") println(io, " linear error propagation: ($(r.f)) ± ($(r.σ_f))") - println(io, f" |δ_y| = \%g(abs(r.δ_y)) (≤ 0.1 for normal approx)") + println(io, f" |δ_y| = {abs(r.δ_y):5g} (≤ 0.1 for normal approx)") if r.success println(io, " Blocking successful with $(r.blocks) blocks after $(r.k-1) transformations (k = $(r.k)).") else diff --git a/src/strategies_and_params/get_vmsize.jl b/src/strategies_and_params/get_vmsize.jl new file mode 100644 index 000000000..296a12b8b --- /dev/null +++ b/src/strategies_and_params/get_vmsize.jl @@ -0,0 +1,44 @@ +# copied from https://github.com/JuliaLang/julia/blob/master/test/netload/memtest.jl#L3 +# This relies on internals of the Julia runtime, so it may break in future versions. + +struct RUsage + ru_utime_sec::Clong # user CPU time used + ru_utime_usec::Clong # user CPU time used + ru_stime_sec::Clong # system CPU time used + ru_stime_usec::Clong # system CPU time used + ru_maxrss::Clong # maximum resident set size + ru_ixrss::Clong # integral shared memory size + ru_idrss::Clong # integral unshared data size + ru_isrss::Clong # integral unshared stack size + ru_minflt::Clong # page reclaims (soft page faults) + ru_majflt::Clong # page faults (hard page faults) + ru_nswap::Clong # swaps + ru_inblock::Clong # block input operations + ru_oublock::Clong # block output operations + ru_msgsnd::Clong # IPC messages sent + ru_msgrcv::Clong # IPC messages received + ru_nsignals::Clong # signals received + ru_nvcsw::Clong # voluntary context switches + ru_nivcsw::Clong # involuntary context switches +end + + +""" + Rimu.get_vmsize() + +Get the maximum resident set size (VM size) of the current process in bytes. + +## Example +```julia +julia> Rimu.get_vmsize() +1214218240 + +julia> Rimu.get_vmsize()|> Base.format_bytes +"1.133 GiB" +``` +""" +function get_vmsize() + ru = Vector{RUsage}(undef, 1) + ccall(:getrusage, Cint, (Cint, Ptr{Cvoid}), 0, ru) + return ru[1].ru_maxrss +end diff --git a/src/strategies_and_params/poststepstrategy.jl b/src/strategies_and_params/poststepstrategy.jl index 447a538a3..faf37788c 100644 --- a/src/strategies_and_params/poststepstrategy.jl +++ b/src/strategies_and_params/poststepstrategy.jl @@ -187,7 +187,7 @@ function loneliness(::Type{<:Complex}, vector, threshold) end """ - Timer <: PostStepStrategy + Timer() <: PostStepStrategy Record current time after every step. See `Base.Libc.time` for information on what time is recorded. @@ -288,3 +288,24 @@ function single_particle_density(add::Union{CompositeFS}; component=0) return float.(Tuple(onr(add)[component])) end end + +""" + VMSize(; human_readable=true) <: PostStepStrategy + +After each step, compute the maximum resident set size (VM size) of the current process in +bytes. Reports to a column named `vm_size`. +The `human_readable` keyword argument can be used to format the output in a human-readable +way. The default is `true`, which formats the output as a string using `Base.format_bytes`. + +See also [`PostStepStrategy`](@ref). +""" +struct VMSize{HumanReadable} <: PostStepStrategy end + +VMSize(human_readable::Bool) = VMSize{human_readable}() +function VMSize(; human_readable=true) + return VMSize(human_readable) +end + +post_step_action(::VMSize{false}, _, _) = (:vm_size => get_vmsize(),) +# `base.format_bytes` is not a public function, so might break in the future. +post_step_action(::VMSize{true}, _, _) = (:vm_size => Base.format_bytes(get_vmsize()),) diff --git a/src/strategies_and_params/reportingstrategy.jl b/src/strategies_and_params/reportingstrategy.jl index c30eaae13..57293d218 100644 --- a/src/strategies_and_params/reportingstrategy.jl +++ b/src/strategies_and_params/reportingstrategy.jl @@ -141,7 +141,7 @@ function report!(report::Report, nt::NamedTuple, postfix::SymbolOrString="") report!(report, pairs(nt), postfix) return report end -function report!(report::Report, kvpairs, postfix::SymbolOrString="") +function report!(report::Report, kvpairs::Union{Tuple,Base.Pairs}, postfix::SymbolOrString="") for (k, v) in kvpairs report!(report, k, v, postfix) end diff --git a/test/ExactDiagonalization.jl b/test/ExactDiagonalization.jl index a5d73ebc1..2a9bcf2a5 100644 --- a/test/ExactDiagonalization.jl +++ b/test/ExactDiagonalization.jl @@ -328,6 +328,7 @@ Random.seed!(1234) # for reproducibility, as some solvers start with random vect solver = init(prob, alg) @test repr(solver) isa String # no error on print @test solver.problem == prob + @test dimension(solver) ≤ dimension(ham) end @testset "Sanity checks" begin prob = ExactDiagonalizationProblem(ham) @@ -336,6 +337,8 @@ Random.seed!(1234) # for reproducibility, as some solvers start with random vect @test result.success @test length(result.values) == length(result.vectors) @test length(result.coefficient_vectors) == length(result.vectors) + @test dimension(result) ≤ dimension(ham) + @test dimension(result) == length(result.basis) == length(result.vectors[1]) for (i, dv) in enumerate(result.vectors) @test DVec(zip(result.basis, result.coefficient_vectors[i])) ≈ dv @@ -368,11 +371,6 @@ Random.seed!(1234) # for reproducibility, as some solvers start with random vect prob = ExactDiagonalizationProblem(ham; algorithm=alg) @test init(prob).algorithm == alg @test init(prob, LinearAlgebraSolver()).algorithm == LinearAlgebraSolver() - - @test_logs( - (:warn, "The keyword(s) \"algorithm\" are unused and will be ignored."), - solve(prob, KrylovKitSolver()) - ) end @testset "Unused kwargs" begin prob = ExactDiagonalizationProblem(ham; one=1) @@ -419,7 +417,7 @@ Random.seed!(1234) # for reproducibility, as some solvers start with random vect @testset "Unsuccessful solve warning" begin ham = HubbardMom1D(BoseFS(10, 5 => 10); u=6.0) for alg in algs[2:end] - prob = ExactDiagonalizationProblem(ham; howmany=10) + prob = ExactDiagonalizationProblem(ham; warn=false, howmany=10) result = @test_logs((:warn,), solve(prob, alg; maxiters=1)) @test !result.success end @@ -428,14 +426,21 @@ Random.seed!(1234) # for reproducibility, as some solvers start with random vect @testset "General" begin @testset "Bad starting vector" begin ham = HubbardReal1D(BoseFS(1, 2, 3)) - prob = ExactDiagonalizationProblem(ham, [1, 2, 3]) - @test_throws ArgumentError init(prob, KrylovKitSolver(true)) - @test_throws ArgumentError init(prob, ArpackSolver()) + @test_throws ArgumentError ExactDiagonalizationProblem(ham, [1, 2, 3]) end @testset "LOBPCG which errors" begin prob = ExactDiagonalizationProblem(HubbardMom1D(BoseFS(0, 2, 0))) @test_throws ArgumentError solve(prob, LOBPCGSolver(); which=:LM) end + + @testset "Memory info and warning" begin + @test_logs((:info,), + ExactDiagonalizationProblem(HubbardMom1D(BoseFS(0, 2, 0)); info=true) + ) + @test_logs((:warn,), + ExactDiagonalizationProblem(HubbardReal1D(BoseFS(100, 1 => 100))) + ) + end end end diff --git a/test/projector_monte_carlo_problem.jl b/test/projector_monte_carlo_problem.jl index 39404acfe..2fd60b8c8 100644 --- a/test/projector_monte_carlo_problem.jl +++ b/test/projector_monte_carlo_problem.jl @@ -246,7 +246,7 @@ using Rimu: num_replicas, num_spectral_states # continue simulation and change strategies sm3 = solve!(sm; last_step = 600, - post_step_strategy = Rimu.Timer(), + post_step_strategy = (Rimu.Timer(), VMSize()), metadata = Dict(:test => 1) ) @test sm3 === sm @@ -254,7 +254,7 @@ using Rimu: num_replicas, num_spectral_states @test sm.state.step[] == 600 @test size(sm.df)[1] == 100 # the report was emptied @test parse(Int, Rimu.get_metadata(sm.report, "test")) == 1 - @test Rimu.get_metadata(sm.report, "post_step_strategy") == "(Rimu.Timer(),)" + @test Rimu.get_metadata(sm.report, "post_step_strategy") == "(Rimu.Timer(), VMSize{true}())" # continue simulation and change replica strategy @test_throws ArgumentError solve!(sm; replica_strategy = NoStats(3))