diff --git a/docs/make.jl b/docs/make.jl index 3da5fef9e..914ec9793 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -11,9 +11,8 @@ include("pages.jl") makedocs( sitename = "LinearSolve.jl", authors = "Chris Rackauckas", - modules = [LinearSolve, LinearSolve.SciMLBase], + modules = [LinearSolve], clean = true, doctest = false, linkcheck = true, - warnonly = [:docs_block, :missing_docs], linkcheck_ignore = [ "https://cli.github.com/manual/installation", ], diff --git a/docs/src/advanced/internal_api.md b/docs/src/advanced/internal_api.md index 4e327383e..67e15261b 100644 --- a/docs/src/advanced/internal_api.md +++ b/docs/src/advanced/internal_api.md @@ -107,6 +107,7 @@ These trait functions help determine algorithm capabilities and requirements: ```@docs LinearSolve.needs_concrete_A +LinearSolve.needs_square_A ``` ## Utility Functions @@ -122,11 +123,10 @@ LinearSolve.__init_u0_from_Ab ## Solve Functions -For custom solving strategies: +The default solver dispatch and adjoint evaluation internals: ```@docs -LinearSolve.LinearSolveFunction -LinearSolve.DirectLdiv! +LinearSolve.defaultalg_adjoint_eval ``` ## Preconditioner Infrastructure @@ -143,10 +143,26 @@ LinearSolve.InvPreconditioner These are internal algorithm implementations: ```@docs -LinearSolve.SimpleLUFactorization LinearSolve.LUSolver ``` +## BLAS Logging Internals + +The BLAS logging system provides diagnostic information about BLAS operations: + +```@docs +LinearSolve.BlasOperationInfo +LinearSolve.interpret_blas_code +LinearSolve.blas_info_msg +LinearSolve._format_blas_context +``` + +## SimpleGMRES Internals + +```@docs +LinearSolve._sym_givens +``` + ## Developer Notes ### Adding New Algorithms diff --git a/docs/src/assets/Project.toml b/docs/src/assets/Project.toml new file mode 100644 index 000000000..b2f6de188 --- /dev/null +++ b/docs/src/assets/Project.toml @@ -0,0 +1,11 @@ +[deps] +Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" +LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" +LinearSolveAutotune = "67398393-80e8-4254-b7e4-1b9a36a3c5b6" +SciMLOperators = "c0aeaf25-5076-4817-a8d5-81caf7dfa961" + +[compat] +Documenter = "1" +LinearSolve = "3" +LinearSolveAutotune = "1.1" +SciMLOperators = "1" diff --git a/docs/src/solvers/solvers.md b/docs/src/solvers/solvers.md index 4f7306325..a1f77df53 100644 --- a/docs/src/solvers/solvers.md +++ b/docs/src/solvers/solvers.md @@ -130,6 +130,8 @@ LinearSolve.DefaultLinearSolver ```@docs RFLUFactorization +ButterflyFactorization +RF32MixedLUFactorization ``` ### Base.LinearAlgebra @@ -142,9 +144,11 @@ customized per-package, details given below describe a subset of important array ```@docs LUFactorization GenericLUFactorization +GenericFactorization QRFactorization SVDFactorization CholeskyFactorization +LDLtFactorization BunchKaufmanFactorization CHOLMODFactorization NormalCholeskyFactorization @@ -232,7 +236,9 @@ CliqueTreesFactorization ```@docs KrylovJL_CG KrylovJL_MINRES +KrylovJL_MINARES KrylovJL_GMRES +KrylovJL_FGMRES KrylovJL_BICGSTAB KrylovJL_LSMR KrylovJL_CRAIGMR @@ -250,6 +256,7 @@ MKL32MixedLUFactorization ```@docs OpenBLASLUFactorization +OpenBLAS32MixedLUFactorization ``` ### AppleAccelerate.jl @@ -283,6 +290,8 @@ MetalOffload32MixedLUFactorization ```@docs MKLPardisoFactorize MKLPardisoIterate +PanuaPardisoFactorize +PanuaPardisoIterate LinearSolve.PardisoJL ``` @@ -296,6 +305,7 @@ The following are non-standard GPU factorization routines. Using these solvers requires adding the package CUDA.jl, i.e. `using CUDA` ```@docs +CudaOffloadFactorization CudaOffloadLUFactorization CudaOffloadQRFactorization CUDAOffload32MixedLUFactorization @@ -362,6 +372,37 @@ KrylovKitJL HYPREAlgorithm ``` +### BLIS + +!!! note + + Using this solver requires adding the packages blis_jll and LAPACK_jll, i.e. `using blis_jll, LAPACK_jll` + +```@docs +LinearSolve.BLISLUFactorization +``` + +### AlgebraicMultigrid.jl + +!!! note + + Using this solver requires adding the package AlgebraicMultigrid.jl, i.e. `using AlgebraicMultigrid` + +```@docs +AlgebraicMultigridJL +``` + +### PETSc.jl + +!!! note + + Using PETSc solvers requires Julia version 1.10 or higher, and that the packages + PETSc.jl and SparseArrays.jl are loaded. + +```@docs +PETScAlgorithm +``` + ### Ginkgo.jl !!! note @@ -374,3 +415,9 @@ GinkgoJL GinkgoJL_CG GinkgoJL_GMRES ``` + +### Sensitivity / Adjoint + +```@docs +LinearSolveAdjoint +``` diff --git a/docs_build.log b/docs_build.log new file mode 100644 index 000000000..504365a08 Binary files /dev/null and b/docs_build.log differ diff --git a/src/LinearSolve.jl b/src/LinearSolve.jl index 2f8678b1e..366736f92 100644 --- a/src/LinearSolve.jl +++ b/src/LinearSolve.jl @@ -96,6 +96,37 @@ end @reexport using SciMLBase +# Attach LinearProblem docstring to LinearSolve module for Documenter.jl +# (LinearProblem is defined in SciMLBase but reexported here) +@doc """ + LinearProblem(A, b; u0 = nothing, p = nothing) + LinearProblem{iip}(A, b; u0 = nothing, p = nothing) + +Define a linear system problem ``Au = b``. + +## Arguments + + - `A`: The coefficient matrix or linear operator. Can be a dense matrix, sparse matrix, + or any `AbstractSciMLOperator`. + - `b`: The right-hand side vector. + - `u0`: (optional) Initial guess for iterative solvers. Defaults to `nothing`. + - `p`: (optional) Parameters for the problem. Defaults to `nothing`. + +## Example + +```julia +using LinearSolve + +A = [1.0 2.0; 3.0 4.0] +b = [5.0, 6.0] +prob = LinearProblem(A, b) +sol = solve(prob) +sol.u # solution vector +``` + +For more details, see the [SciMLBase LinearProblem documentation](https://docs.sciml.ai/SciMLBase/stable/). +""" LinearProblem + """ SciMLLinearSolveAlgorithm <: SciMLBase.AbstractLinearAlgorithm diff --git a/src/default.jl b/src/default.jl index d7ccf5b47..19494025d 100644 --- a/src/default.jl +++ b/src/default.jl @@ -118,8 +118,6 @@ This function is primarily used internally by `solve(::LinearProblem)` when no explicit algorithm is provided. For manual algorithm selection, users can directly instantiate specific algorithm types. """ -# Legacy fallback -# For SciML algorithms already using `defaultalg`, all assume square matrix. defaultalg(A, b) = defaultalg(A, b, OperatorAssumptions(true)) function defaultalg( @@ -562,13 +560,8 @@ defaultalg_symbol(::Type{<:GenericFactorization{typeof(ldlt!)}}) = :LDLtFactoriz defaultalg_symbol(::Type{<:QRFactorization{ColumnNorm}}) = :QRFactorizationPivoted -""" -if alg.alg === DefaultAlgorithmChoice.LUFactorization -SciMLBase.solve!(cache, LUFactorization(), args...; kwargs...)) -else -... -end -""" +# Generated dispatch: routes to the specific solver based on alg.alg, +# with automatic fallback to column-pivoted QR when LU factorization fails. @generated function SciMLBase.solve!( cache::LinearCache, alg::DefaultLinearSolver, args...; @@ -814,13 +807,15 @@ end end """ -``` -elseif DefaultAlgorithmChoice.LUFactorization === cache.alg - (cache.cacheval.LUFactorization)' \\ dy -else - ... -end -``` + defaultalg_adjoint_eval(cache::LinearCache, dy) + +Generated function that dispatches the adjoint (transpose) linear solve for the +default solver polyalgorithm. Given the cached factorization from the forward +solve, computes `A' \\ dy` using the appropriate factorization stored in +`cache.cacheval`. + +This is used internally by the adjoint rule for `LinearSolve` to compute +sensitivities efficiently. """ @generated function defaultalg_adjoint_eval(cache::LinearCache, dy) ex = :() diff --git a/src/factorization.jl b/src/factorization.jl index 904ef5b00..9fc636692 100644 --- a/src/factorization.jl +++ b/src/factorization.jl @@ -498,6 +498,27 @@ end ## LDLtFactorization +""" + LDLtFactorization(shift = 0.0, perm = nothing) + +A wrapper around `LinearAlgebra.ldlt!` for `LDLt` factorization of symmetric +(or Hermitian) positive semi-definite tridiagonal matrices (e.g. `SymTridiagonal`). + +## Keyword Arguments + + - `shift`: Diagonal shift applied before factoring. Defaults to `0.0`. + - `perm`: Optional permutation vector. Defaults to `nothing`. + +## Example + +```julia +using LinearSolve, LinearAlgebra +A = SymTridiagonal([4.0, 5.0, 6.0], [1.0, 2.0]) +b = [1.0, 2.0, 3.0] +prob = LinearProblem(A, b) +sol = solve(prob, LDLtFactorization()) +``` +""" struct LDLtFactorization{T} <: AbstractDenseFactorization shift::Float64 perm::T