Browse Source

Merge main

pull/35/head
termi-official 9 months ago
parent
commit
b59e6a16c6
  1. 6
      .git-blame-ignore-revs
  2. 7
      .github/dependabot.yml
  3. 73
      .github/workflows/Check.yml
  4. 27
      .github/workflows/Documentation.yml
  5. 41
      .github/workflows/Test.yml
  6. 55
      .github/workflows/ci.yml
  7. 11
      .pre-commit-config.yaml
  8. 112
      CHANGELOG.md
  9. 21
      Project.toml
  10. 61
      README.md
  11. 11
      docs/Makefile
  12. 316
      docs/Manifest.toml
  13. 1
      docs/Project.toml
  14. 24
      docs/liveserver.jl
  15. 10
      docs/make.jl
  16. 13
      docs/src/api.md
  17. 16
      docs/src/libhypre.md
  18. 61
      docs/src/matrix-vector.md
  19. 33
      examples/ex5.jl
  20. 300
      ext/HYPREPartitionedArrays.jl
  21. 86
      ext/HYPRESparseArrays.jl
  22. 80
      ext/HYPRESparseMatricesCSR.jl
  23. 16
      gen/Makefile
  24. 140
      gen/Manifest.toml
  25. 7
      gen/generator.jl
  26. 7
      gen/generator.toml
  27. 15
      gen/prologue.jl
  28. 6
      gen/solver_options.jl
  29. 1768
      lib/LibHYPRE.jl
  30. 531
      src/HYPRE.jl
  31. 2
      src/Internals.jl
  32. 1
      src/LibHYPRE.jl
  33. 2
      src/precs.jl
  34. 42
      src/solver_options.jl
  35. 199
      src/solvers.jl
  36. 379
      test/runtests.jl
  37. 117
      test/test_assembler.jl
  38. 23
      test/test_utils.jl

6
.git-blame-ignore-revs

@ -0,0 +1,6 @@ @@ -0,0 +1,6 @@
# Runic formatting
# https://github.com/fredrikekre/HYPRE.jl/commit/640d77944e846a1f94e248bf2dea53310314f457
640d77944e846a1f94e248bf2dea53310314f457
# Switch from ccall() to @ccall in generated output
# https://github.com/fredrikekre/HYPRE.jl/commit/b4790048a7803298004bde24658ac90215a837a4
b4790048a7803298004bde24658ac90215a837a4

7
.github/dependabot.yml

@ -0,0 +1,7 @@ @@ -0,0 +1,7 @@
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/" # Location of package manifests
schedule:
interval: "monthly"

73
.github/workflows/Check.yml

@ -0,0 +1,73 @@ @@ -0,0 +1,73 @@
name: Code checks
on:
pull_request:
push:
branches: ["master"]
jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1
explicit-imports:
runs-on: ubuntu-latest
name: "ExplicitImports.jl"
steps:
- uses: actions/checkout@v4
# - uses: julia-actions/setup-julia@v2
# with:
# version: '1'
- uses: julia-actions/cache@v2
# - uses: julia-actions/julia-buildpkg@v1
- name: Install dependencies
shell: julia --project=@explicit-imports {0}
run: |
# Add ExplicitImports.jl and packages that HYPRE has extensions for
using Pkg
Pkg.develop([
PackageSpec(name = "HYPRE", path = pwd()),
])
Pkg.add([
PackageSpec(name = "ExplicitImports", version = "1.9"),
PackageSpec(name = "PartitionedArrays"),
PackageSpec(name = "SparseArrays"),
PackageSpec(name = "SparseMatricesCSR"),
])
- name: ExplicitImports.jl code checks
shell: julia --project=@explicit-imports {0}
run: |
using HYPRE, ExplicitImports, PartitionedArrays, SparseArrays, SparseMatricesCSR
# Check HYPRE
check_no_implicit_imports(HYPRE)
check_no_stale_explicit_imports(HYPRE)
check_all_qualified_accesses_via_owners(HYPRE)
check_no_self_qualified_accesses(HYPRE)
# Check extension modules
for ext in (:HYPREPartitionedArrays, :HYPRESparseArrays, :HYPRESparseMatricesCSR)
extmod = Base.get_extension(HYPRE, ext)
if extmod !== nothing
check_no_implicit_imports(extmod)
check_no_stale_explicit_imports(extmod)
check_all_qualified_accesses_via_owners(extmod)
check_no_self_qualified_accesses(extmod)
else
@warn "$(ext) extension not available."
end
end
runic:
name: Runic
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v2
with:
version: '1'
- uses: julia-actions/cache@v2
- uses: fredrikekre/runic-action@v1
with:
version: '1'

27
.github/workflows/Documentation.yml

@ -0,0 +1,27 @@ @@ -0,0 +1,27 @@
---
name: Documentation
on:
push:
branches:
- 'master'
- 'release-'
tags: ['*']
pull_request:
jobs:
docs:
name: Julia 1.11 - ubuntu-latest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v2
with:
version: '1.11'
- uses: julia-actions/cache@v2
- name: Install dependencies
run: julia --project=docs -e 'using Pkg; Pkg.instantiate()'
- name: Build and deploy
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }}
run: julia --project=docs --color=yes docs/make.jl

41
.github/workflows/Test.yml

@ -0,0 +1,41 @@ @@ -0,0 +1,41 @@
name: Test
on:
push:
branches:
- 'master'
- 'release-'
tags: ['*']
pull_request:
jobs:
test:
name: Julia ${{ matrix.version }} - ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
version:
- '1.10'
- '1'
- 'nightly'
os:
- ubuntu-latest
include:
- os: windows-latest
version: '1'
- os: macOS-latest
version: '1'
steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v2
with:
version: ${{ matrix.version }}
- uses: julia-actions/cache@v2
- uses: julia-actions/julia-buildpkg@v1
- uses: julia-actions/julia-runtest@v1
- uses: julia-actions/julia-processcoverage@v1
- uses: codecov/codecov-action@v5
with:
files: lcov.info
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

55
.github/workflows/ci.yml

@ -1,55 +0,0 @@ @@ -1,55 +0,0 @@
name: CI
on:
push:
branches:
- 'master'
- 'release-'
tags: '*'
pull_request:
jobs:
test:
name: Julia ${{ matrix.version }} - ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
version:
- '1.6'
- '1'
- 'nightly'
os:
- ubuntu-latest
include:
- os: windows-latest
version: '1'
- os: macOS-latest
version: '1'
steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@v1
with:
version: ${{ matrix.version }}
- uses: julia-actions/cache@v1
- uses: julia-actions/julia-buildpkg@v1
- uses: julia-actions/julia-runtest@v1
- uses: julia-actions/julia-processcoverage@v1
- uses: codecov/codecov-action@v2
with:
files: ./lcov.info
docs:
name: Documentation
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@v1
with:
version: '1'
- uses: julia-actions/cache@v1
- name: Install dependencies
run: julia --project=docs -e 'using Pkg; Pkg.instantiate()'
- name: Build and deploy
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }}
run: julia --project=docs --color=yes docs/make.jl

11
.pre-commit-config.yaml

@ -0,0 +1,11 @@ @@ -0,0 +1,11 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0
hooks:
- id: check-added-large-files
- id: check-case-conflict
- id: check-toml
- id: check-yaml
- id: end-of-file-fixer
- id: mixed-line-ending
- id: trailing-whitespace

112
CHANGELOG.md

@ -0,0 +1,112 @@ @@ -0,0 +1,112 @@
# HYPRE.jl changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [v1.7.0] - 2024-10-09
### Changed
- Support for Julia 1.6 have been dropped and for this and future releases Julia 1.10 or
later will be required. ([#27])
- Constant struct fields of `HYPREMatrix` and `HYPREVector` are now marked with `const`.
([#28])
## [v1.6.0] - 2024-09-29
### Changed
- PartitionedArrays.jl dependency upgraded from release series 0.3.x to release series
0.5.x. ([#17], [#18])
- CEnum.jl dependency upgraded to release series 0.5.x (release series 0.4.x still
allowed). ([#17], [#18])
- PartitionedArrays.jl support (`PSparseMatrix`, `PVector`) is now provided by a package
extension. ([#23])
- SparseMatricesCSR.jl support (`SparseMatrixCSR`) is now provided by a package extension.
([#24])
- SparseArrays.jl support (`SparseMatrixCSC`) is now provided by a package extension.
([#25])
## [v1.5.0] - 2023-05-26
### Changed
- PartitionedArrays.jl dependency upgraded from version 0.2.x to version 0.3.x.
([#16])
## [v1.4.0] - 2023-01-20
### Added
- New function `HYPRE.GetFinalRelativeResidualNorm(s::HYPRESolver)` for getting the final
residual norm from a solver. This function dispatches on the solver to the corresponding
C API wrapper `LibHYPRE.HYPRE_${Solver}GetFinalRelativeResidualNorm`. ([#14])
- New function `HYPRE.GetNumIterations(s::HYPRESolver)` for getting the number of
iterations from a solver. This function dispatches on the solver to the corresponding C
API wrapper `LibHYPRE.HYPRE_${Solver}GetNumIterations`. ([#14])
## [v1.3.1] - 2023-01-14
### Fixed
- Solvers now keep an reference to the added preconditioner to make sure the preconditioner
is not finalized before the solver. This fixes crashes (segfaults) that could happen in
case no other reference to the preconditioner existed in the program. ([#12])
- The proper conversion methods for `ccall` are now defined for `HYPREMatrix`,
`HYPREVector`, and `HYPRESolver` such that they can be passed direcly to `HYPRE_*`
functions and let `ccall` guarantee the GC preservation of these objects. Although not
observed in practice, this fixes a possible race condition where the matrix/vector/solver
could be finalized too early. ([#13])
## [v1.3.0] - 2022-12-30
### Added
- Rectangular matrices can now be assembled by the new method
`HYPRE.assemble!(::HYPREMatrixAssembler, i::Vector, j::Vector, a::Matrix)` where `i` are
the rows and `j` the columns. ([#7])
### Fixed
- All created HYPRE objects (`HYPREMatrix`, `HYPREVector`, and `HYPRESolver`s) are now kept
track of internally and explicitly `finalize`d (if they haven't been GC'd) before
finalizing HYPRE. This fixes a "race condition" where MPI and/or HYPRE would finalize
before these Julia objects are garbage collected and finalized. ([#8])
### Deprecated
- The method `HYPRE.assemble!(A::HYPREMatrixAssembler, ij::Vector, a::Matrix)` have been
deprecated in favor of `HYPRE.assemble!(A::HYPREMatrixAssembler, i::Vector, j::Vector,
a::Matrix)`, i.e. it is now required to explicitly pass rows and column indices
individually. The motivation behind this is to support assembling of rectangular
matrices. Note that `HYPRE.assemble!(A::HYPREAssembler, ij::Vector, a::Matrix,
b::Vector)` is still supported, where `ij` are used as row and column indices for `a`, as
well as row indices for `b`. ([#6])
## [v1.2.0] - 2022-10-12
### Added
- Added assembler interface to assemble `HYPREMatrix` and/or `HYPREVector` directly without
an intermediate sparse structure in Julia. ([#5])
## [v1.1.0] - 2022-10-05
### Added
- Added support for MPI.jl version 0.20.x (in addition to the existing version 0.19.x
support). ([#2])
## [v1.0.0] - 2022-07-28
Initial release of HYPRE.jl.
<!-- Links generated by Changelog.jl -->
[v1.0.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.0.0
[v1.1.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.1.0
[v1.2.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.2.0
[v1.3.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.3.0
[v1.3.1]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.3.1
[v1.4.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.4.0
[v1.5.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.5.0
[v1.6.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.6.0
[v1.7.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.7.0
[#2]: https://github.com/fredrikekre/HYPRE.jl/issues/2
[#5]: https://github.com/fredrikekre/HYPRE.jl/issues/5
[#6]: https://github.com/fredrikekre/HYPRE.jl/issues/6
[#7]: https://github.com/fredrikekre/HYPRE.jl/issues/7
[#8]: https://github.com/fredrikekre/HYPRE.jl/issues/8
[#12]: https://github.com/fredrikekre/HYPRE.jl/issues/12
[#13]: https://github.com/fredrikekre/HYPRE.jl/issues/13
[#14]: https://github.com/fredrikekre/HYPRE.jl/issues/14
[#16]: https://github.com/fredrikekre/HYPRE.jl/issues/16
[#17]: https://github.com/fredrikekre/HYPRE.jl/issues/17
[#18]: https://github.com/fredrikekre/HYPRE.jl/issues/18
[#23]: https://github.com/fredrikekre/HYPRE.jl/issues/23
[#24]: https://github.com/fredrikekre/HYPRE.jl/issues/24
[#25]: https://github.com/fredrikekre/HYPRE.jl/issues/25
[#27]: https://github.com/fredrikekre/HYPRE.jl/issues/27
[#28]: https://github.com/fredrikekre/HYPRE.jl/issues/28

21
Project.toml

@ -1,6 +1,6 @@ @@ -1,6 +1,6 @@
name = "HYPRE"
uuid = "b5ffcf37-a2bd-41ab-a3da-4bd9bc8ad771"
version = "1.1.0"
version = "1.7.0"
[deps]
CEnum = "fa961155-64e5-5f13-b03f-caf6b980ea82"
@ -8,22 +8,33 @@ HYPRE_jll = "0a602bbd-b08b-5d75-8d32-0de6eef44785" @@ -8,22 +8,33 @@ HYPRE_jll = "0a602bbd-b08b-5d75-8d32-0de6eef44785"
Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195"
[weakdeps]
PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
SparseMatricesCSR = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1"
[extensions]
HYPREPartitionedArrays = ["PartitionedArrays", "SparseArrays", "SparseMatricesCSR"]
HYPRESparseArrays = "SparseArrays"
HYPRESparseMatricesCSR = ["SparseArrays", "SparseMatricesCSR"]
[compat]
CEnum = "0.4"
CEnum = "0.4, 0.5"
LinearAlgebra = "1"
LinearSolve = "3"
MPI = "0.19, 0.20"
PartitionedArrays = "0.2"
PartitionedArrays = "0.5"
SparseMatricesCSR = "0.6"
julia = "1.6"
julia = "1.10"
[extras]
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae"
PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
SparseMatricesCSR = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[targets]
test = ["LinearSolve", "Test"]
test = ["LinearSolve", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR", "Test"]

61
README.md

@ -1,12 +1,13 @@ @@ -1,12 +1,13 @@
# HYPRE.jl
| **Documentation** | **Build Status** |
|:------------------------- |:------------------------------------- |
| [![][docs-img]][docs-url] | [![][gh-actions-img]][gh-actions-url] |
[![Documentation](https://img.shields.io/badge/docs-latest%20release-blue.svg)](https://fredrikekre.github.io/HYPRE.jl/)
[![Test](https://github.com/fredrikekre/HYPRE.jl/actions/workflows/Test.yml/badge.svg?branch=master&event=push)](https://github.com/fredrikekre/HYPRE.jl/actions/workflows/Test.yml)
[![Codecov](https://codecov.io/github/fredrikekre/HYPRE.jl/graph/badge.svg)](https://codecov.io/github/fredrikekre/HYPRE.jl)
[![code style: runic](https://img.shields.io/badge/code_style-%E1%9A%B1%E1%9A%A2%E1%9A%BE%E1%9B%81%E1%9A%B2-black)](https://github.com/fredrikekre/Runic.jl)
[Julia][julia] interface to [HYPRE][hypre] ("high performance preconditioners and solvers
featuring multigrid methods for the solution of large, sparse linear systems of equations on
massively parallel computers").
[Julia](https://julialang.org) interface to [HYPRE](https://github.com/hypre-space/hypre)
("high performance preconditioners and solvers featuring multigrid methods for the solution
of large, sparse linear systems of equations on massively parallel computers").
While the main purpose of HYPRE is to solve problems on multiple cores, it can also be used
for single core problems. HYPRE.jl aims to make it easy to use both modes of operation, with
@ -14,12 +15,31 @@ an interface that should be familiar to Julia programmers. This README includes @@ -14,12 +15,31 @@ an interface that should be familiar to Julia programmers. This README includes
examples -- refer to the [documentation][docs-url] for more details, and for information
about the included solvers and preconditioners and how to configure them.
## Example: Single-core solve with standard sparse matrices
## Installation
HYPRE.jl can be installed from the Pkg REPL (press `]` in the Julia REPL to enter):
```
(@v1) pkg> add HYPRE
```
To configure MPI, see the [documentation for MPI.jl](https://juliaparallel.org/MPI.jl/).
## Changes
All notable changes are documented in [CHANGELOG.md](CHANGELOG.md).
## Usage
Some basic usage examples are shown below. See the [documentation][docs-url] for details.
### Example: Single-core solve with standard sparse matrices
It is possible to use Julia's standard sparse arrays (`SparseMatrixCSC` from the
[SparseArrays.jl][sparse-stdlib] standard library, and `SparseMatrixCSR` from the
[SparseMatricesCSR.jl][sparsecsr] package) directly in HYPRE.jl. For example, to solve
`Ax = b` with conjugate gradients:
[SparseArrays.jl](https://github.com/JuliaSparse/SparseArrays.jl) standard library, and
`SparseMatrixCSR` from the
[SparseMatricesCSR.jl](https://github.com/gridap/SparseMatricesCSR.jl) package) directly in
HYPRE.jl. For example, to solve `Ax = b` with conjugate gradients:
```julia
# Initialize linear system
@ -33,12 +53,12 @@ cg = HYPRE.PCG() @@ -33,12 +53,12 @@ cg = HYPRE.PCG()
x = HYPRE.solve(cg, A, b)
```
## Example: Multi-core solve using PartitionedArrays.jl
### Example: Multi-core solve using PartitionedArrays.jl
For multi-core problems it is possible to use [PartitionedArrays.jl][partarrays] directly
with HYPRE.jl. Once the linear system is setup the solver interface is identical. For
example, to solve `Ax = b` with bi-conjugate gradients and an algebraic multigrid
preconditioner:
For multi-core problems it is possible to use
[PartitionedArrays.jl](https://github.com/fverdugo/PartitionedArrays.jl) directly with
HYPRE.jl. Once the linear system is setup the solver interface is identical. For example, to
solve `Ax = b` with bi-conjugate gradients and an algebraic multigrid preconditioner:
```julia
# Initialize linear system
@ -54,14 +74,3 @@ bicg = HYPRE.BiCGSTAB(; Precond = precond) @@ -54,14 +74,3 @@ bicg = HYPRE.BiCGSTAB(; Precond = precond)
# Compute the solution
x = HYPRE.solve(bicg, A, b)
```
[julia]: https://julialang.org/
[hypre]: https://github.com/hypre-space/hypre
[sparse-stdlib]: https://github.com/JuliaSparse/SparseArrays.jl
[sparsecsr]: https://github.com/gridap/SparseMatricesCSR.jl
[partarrays]: https://github.com/fverdugo/PartitionedArrays.jl
[docs-img]: https://img.shields.io/badge/docs-stable%20release-blue.svg
[docs-url]: https://fredrikekre.github.io/HYPRE.jl/
[gh-actions-img]: https://github.com/fredrikekre/HYPRE.jl/workflows/CI/badge.svg
[gh-actions-url]: https://github.com/fredrikekre/HYPRE.jl/actions?query=workflow%3ACI

11
docs/Makefile

@ -1,11 +1,8 @@ @@ -1,11 +1,8 @@
SRCDIR:=$(shell dirname $(abspath $(firstword $(MAKEFILE_LIST))))
default: livedocs
default: liveserver
instantiate:
julia --project=${SRCDIR} -e 'using Pkg; Pkg.instantiate()'
liveserver:
julia --project=${SRCDIR} ${SRCDIR}/liveserver.jl
livedocs: instantiate
julia --project=${SRCDIR} -e 'using LiveServer; LiveServer.servedocs(; foldername=pwd())' -- liveserver
.PHONY: default instantiate livedocs
.PHONY: default liveserver

316
docs/Manifest.toml

@ -1,8 +1,8 @@ @@ -1,8 +1,8 @@
# This file is machine-generated - editing it directly is not advised
julia_version = "1.8.2"
julia_version = "1.11.0"
manifest_format = "2.0"
project_hash = "7c98a97551e318432a6ba3bc3fd4758623a247ac"
project_hash = "59b08f4b60c862a102ba6a5a40dd11b11cb0ae51"
[[deps.ANSIColoredPrinters]]
git-tree-sha1 = "574baf8110975760d391c710b6341da1afa48d8c"
@ -10,71 +10,107 @@ uuid = "a4c015fc-c6ff-483c-b24f-f7ea428134e9" @@ -10,71 +10,107 @@ uuid = "a4c015fc-c6ff-483c-b24f-f7ea428134e9"
version = "0.0.1"
[[deps.AbstractTrees]]
git-tree-sha1 = "5c0b629df8a5566a06f5fef5100b53ea56e465a0"
git-tree-sha1 = "2d9c9a55f9c93e8887ad391fbae72f8ef55e1177"
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
version = "0.4.2"
version = "0.4.5"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1"
version = "1.1.2"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
version = "1.11.0"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
version = "1.11.0"
[[deps.CEnum]]
git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90"
git-tree-sha1 = "389ad5c84de1ae7cf0e28e381131c98ea87d54fc"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.2"
version = "0.5.0"
[[deps.Changelog]]
git-tree-sha1 = "e579c6157598169ad4ef17263bdf3452b4a3e316"
uuid = "5217a498-cd5d-4ec6-b8c2-9b85a09b6e3e"
version = "1.1.0"
[[deps.CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "bce6804e5e6044c6daab27bb533d1295e4a2e759"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.6"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "0.5.2+0"
version = "1.1.1+0"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.Distances]]
deps = ["LinearAlgebra", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "3258d0659f812acde79e8a74b11f17ac06d0ca04"
uuid = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
version = "0.10.7"
version = "1.11.0"
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
version = "1.11.0"
[[deps.DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "5158c2b41018c5f7eb1470d558127ac274eca0c9"
git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.9.1"
version = "0.9.3"
[[deps.Documenter]]
deps = ["ANSIColoredPrinters", "AbstractTrees", "Base64", "Dates", "DocStringExtensions", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "MarkdownAST", "REPL", "Test", "Unicode"]
git-tree-sha1 = "540cb30edf31561e99df05a502c1922107d50ae1"
repo-rev = "master"
repo-url = "https://github.com/JuliaDocs/Documenter.jl.git"
deps = ["ANSIColoredPrinters", "AbstractTrees", "Base64", "CodecZlib", "Dates", "DocStringExtensions", "Downloads", "Git", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "MarkdownAST", "Pkg", "PrecompileTools", "REPL", "RegistryInstances", "SHA", "TOML", "Test", "Unicode"]
git-tree-sha1 = "5a1ee886566f2fa9318df1273d8b778b9d42712d"
uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
version = "0.28.0-DEV"
version = "1.7.0"
[[deps.Downloads]]
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
version = "1.6.0"
[[deps.Expat_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "1c6317308b9dc757616f0b5cb379db10494443a7"
uuid = "2e619515-83b5-522b-bb60-26c02a35a201"
version = "2.6.2+0"
[[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
version = "1.11.0"
[[deps.Git]]
deps = ["Git_jll"]
git-tree-sha1 = "04eff47b1354d702c3a85e8ab23d539bb7d5957e"
uuid = "d7ba0133-e1db-5d97-8f8c-041e4b3a1eb2"
version = "1.3.1"
[[deps.Git_jll]]
deps = ["Artifacts", "Expat_jll", "JLLWrappers", "LibCURL_jll", "Libdl", "Libiconv_jll", "OpenSSL_jll", "PCRE2_jll", "Zlib_jll"]
git-tree-sha1 = "ea372033d09e4552a04fd38361cd019f9003f4f4"
uuid = "f8c6e375-362e-5223-8a59-34ff63f689eb"
version = "2.46.2+0"
[[deps.HYPRE]]
deps = ["CEnum", "HYPRE_jll", "Libdl", "MPI", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR"]
deps = ["CEnum", "HYPRE_jll", "Libdl", "MPI"]
path = ".."
uuid = "b5ffcf37-a2bd-41ab-a3da-4bd9bc8ad771"
version = "1.1.0"
version = "1.7.0"
[deps.HYPRE.extensions]
HYPREPartitionedArrays = ["PartitionedArrays", "SparseArrays", "SparseMatricesCSR"]
HYPRESparseArrays = "SparseArrays"
HYPRESparseMatricesCSR = ["SparseArrays", "SparseMatricesCSR"]
[deps.HYPRE.weakdeps]
PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
SparseMatricesCSR = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1"
[[deps.HYPRE_jll]]
deps = ["Artifacts", "JLLWrappers", "LAPACK_jll", "LazyArtifacts", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenBLAS_jll", "OpenMPI_jll", "Pkg", "TOML"]
@ -82,124 +118,151 @@ git-tree-sha1 = "b77d3eca75f8442e034ccf415c87405a49e77985" @@ -82,124 +118,151 @@ git-tree-sha1 = "b77d3eca75f8442e034ccf415c87405a49e77985"
uuid = "0a602bbd-b08b-5d75-8d32-0de6eef44785"
version = "2.23.1+1"
[[deps.Hwloc_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "dd3b49277ec2bb2c6b94eb1604d4d0616016f7a6"
uuid = "e33a78d0-f292-5ffc-b300-72abe9b543c8"
version = "2.11.2+0"
[[deps.IOCapture]]
deps = ["Logging", "Random"]
git-tree-sha1 = "f7be53659ab06ddc986428d3a9dcc95f6fa6705a"
git-tree-sha1 = "b6d6bfdd7ce25b0f9b2f6b3dd56b2673a66c8770"
uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89"
version = "0.2.2"
version = "0.2.5"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.IterativeSolvers]]
deps = ["LinearAlgebra", "Printf", "Random", "RecipesBase", "SparseArrays"]
git-tree-sha1 = "1169632f425f79429f245113b775a0e3d121457c"
uuid = "42fd0dbc-a981-5370-80f2-aaf504508153"
version = "0.9.2"
version = "1.11.0"
[[deps.JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1"
deps = ["Artifacts", "Preferences"]
git-tree-sha1 = "f389674c99bfcde17dc57454011aa44d5a260a40"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.4.1"
version = "1.6.0"
[[deps.JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "3c837543ddb02250ef42f4738347454f95079d4e"
git-tree-sha1 = "31e996f0a15c7b280ba9f76636b3ff9e2ae58c9a"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.3"
version = "0.21.4"
[[deps.LAPACK_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg", "libblastrampoline_jll"]
git-tree-sha1 = "a539affa8228208f5a3396037165b04bff9a2ba6"
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "libblastrampoline_jll"]
git-tree-sha1 = "1b25c30fa49db281be615793e0f85282a8f22822"
uuid = "51474c39-65e3-53ba-86ba-03b1b862ec14"
version = "3.10.0+1"
version = "3.12.0+2"
[[deps.LazilyInitializedFields]]
git-tree-sha1 = "8f7f3cabab0fd1800699663533b6d5cb3fc0e612"
uuid = "0e77f7df-68c5-4e49-93ce-4cd80f5598bf"
version = "1.2.2"
[[deps.LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
version = "1.11.0"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
version = "0.6.3"
version = "0.6.4"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
version = "7.84.0+0"
version = "8.6.0+0"
[[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
deps = ["Base64", "LibGit2_jll", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
version = "1.11.0"
[[deps.LibGit2_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll"]
uuid = "e37daf67-58a4-590a-8e99-b0245dd2ffc5"
version = "1.7.2+0"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
version = "1.10.2+0"
version = "1.11.0+1"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
version = "1.11.0"
[[deps.LinearAlgebra]]
deps = ["Libdl", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.Libiconv_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "f9557a255370125b405568f9767d6d195822a175"
uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531"
version = "1.17.0+0"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
version = "1.11.0"
[[deps.MPI]]
deps = ["Distributed", "DocStringExtensions", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "Requires", "Serialization", "Sockets"]
git-tree-sha1 = "97d9313b6bb7ac04f5b8cfb33569cd30d0441efe"
deps = ["Distributed", "DocStringExtensions", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "PkgVersion", "PrecompileTools", "Requires", "Serialization", "Sockets"]
git-tree-sha1 = "892676019c58f34e38743bc989b0eca5bce5edc5"
uuid = "da04e1cc-30fd-572f-bb4f-1f8673147195"
version = "0.20.0"
version = "0.20.22"
[deps.MPI.extensions]
AMDGPUExt = "AMDGPU"
CUDAExt = "CUDA"
[deps.MPI.weakdeps]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
[[deps.MPICH_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"]
git-tree-sha1 = "6d4fa43afab4611d090b11617ecea1a144b21d35"
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Hwloc_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"]
git-tree-sha1 = "7715e65c47ba3941c502bffb7f266a41a7f54423"
uuid = "7cb0a576-ebde-5e09-9194-50597f1243b4"
version = "4.0.2+5"
version = "4.2.3+0"
[[deps.MPIPreferences]]
deps = ["Libdl", "Preferences"]
git-tree-sha1 = "9959c42b41220206eeda9004f695d913e2245658"
git-tree-sha1 = "c105fe467859e7f6e9a852cb15cb4301126fac07"
uuid = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267"
version = "0.1.5"
version = "0.1.11"
[[deps.MPItrampoline_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"]
git-tree-sha1 = "b3f9e42685b4ad614eca0b44bd863cd41b1c86ea"
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"]
git-tree-sha1 = "70e830dab5d0775183c99fc75e4c24c614ed7142"
uuid = "f1f71cc9-e9ae-5b93-9b94-4fe0e1ad3748"
version = "5.0.2+1"
version = "5.5.1+0"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
version = "1.11.0"
[[deps.MarkdownAST]]
deps = ["AbstractTrees", "Markdown"]
git-tree-sha1 = "1dfa364acc47225afdc57c8998c988bc107ff0d2"
git-tree-sha1 = "465a70f0fc7d443a00dcdc3267a497397b8a3899"
uuid = "d0879d2d-cac2-40c8-9cee-1863dc0c7391"
version = "0.1.0"
version = "0.1.2"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
version = "2.28.0+0"
version = "2.28.6+0"
[[deps.MicrosoftMPI_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "a16aa086d335ed7e0170c5265247db29172af2f9"
git-tree-sha1 = "f12a29c4400ba812841c6ace3f4efbb6dbb3ba01"
uuid = "9237b28f-5490-5468-be7b-bb81f5f5e6cf"
version = "10.1.3+2"
version = "10.1.4+2"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
version = "1.11.0"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
version = "2022.2.1"
version = "2023.12.12"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
@ -208,54 +271,78 @@ version = "1.2.0" @@ -208,54 +271,78 @@ version = "1.2.0"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
version = "0.3.20+0"
version = "0.3.27+1"
[[deps.OpenMPI_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"]
git-tree-sha1 = "346d6b357a480300ed7854dbc70e746ac52e10fd"
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Hwloc_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML", "Zlib_jll"]
git-tree-sha1 = "bfce6d523861a6c562721b262c0d1aaeead2647f"
uuid = "fe0851c0-eecd-5654-98d4-656369965a5c"
version = "4.1.3+3"
version = "5.0.5+0"
[[deps.OpenSSL_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "7493f61f55a6cce7325f197443aa80d32554ba10"
uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95"
version = "3.0.15+1"
[[deps.PCRE2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "efcefdf7-47ab-520b-bdef-62a2eaa19f15"
version = "10.42.0+1"
[[deps.Parsers]]
deps = ["Dates"]
git-tree-sha1 = "3d5bf43e3e8b412656404ed9466f1dcbf7c50269"
deps = ["Dates", "PrecompileTools", "UUIDs"]
git-tree-sha1 = "8489905bcdbcfac64d1daa51ca07c0d8f0283821"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.4.0"
[[deps.PartitionedArrays]]
deps = ["Distances", "IterativeSolvers", "LinearAlgebra", "MPI", "Printf", "SparseArrays", "SparseMatricesCSR"]
git-tree-sha1 = "94291b7ddeac39816572660383055870b41bca64"
uuid = "5a9dfac6-5c52-46f7-8278-5e2210713be9"
version = "0.2.11"
version = "2.8.1"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "Random", "SHA", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
version = "1.8.0"
version = "1.11.0"
weakdeps = ["REPL"]
[deps.Pkg.extensions]
REPLExt = "REPL"
[[deps.PkgVersion]]
deps = ["Pkg"]
git-tree-sha1 = "f9501cc0430a26bc3d156ae1b5b0c1b47af4d6da"
uuid = "eebad327-c553-4316-9ea0-9fa01ccd7688"
version = "0.3.3"
[[deps.PrecompileTools]]
deps = ["Preferences"]
git-tree-sha1 = "5aa36f7049a63a1528fe8f7c3f2113413ffd4e1f"
uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
version = "1.2.1"
[[deps.Preferences]]
deps = ["TOML"]
git-tree-sha1 = "47e5f437cc0e7ef2ce8406ce1e7e24d44915f88d"
git-tree-sha1 = "9306f6085165d270f7e3db02af26a400d580f5c6"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.3.0"
version = "1.4.3"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
version = "1.11.0"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
deps = ["InteractiveUtils", "Markdown", "Sockets", "StyledStrings", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
version = "1.11.0"
[[deps.Random]]
deps = ["SHA", "Serialization"]
deps = ["SHA"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
version = "1.11.0"
[[deps.RecipesBase]]
deps = ["SnoopPrecompile"]
git-tree-sha1 = "612a4d76ad98e9722c8ba387614539155a59e30c"
uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01"
version = "1.3.0"
[[deps.RegistryInstances]]
deps = ["LazilyInitializedFields", "Pkg", "TOML", "Tar"]
git-tree-sha1 = "ffd19052caf598b8653b99404058fce14828be51"
uuid = "2792f1a3-b283-48e8-9a74-f99dce5104f3"
version = "0.1.0"
[[deps.Requires]]
deps = ["UUIDs"]
@ -269,76 +356,61 @@ version = "0.7.0" @@ -269,76 +356,61 @@ version = "0.7.0"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.SnoopPrecompile]]
git-tree-sha1 = "f604441450a3c0569830946e5b33b78c928e1a85"
uuid = "66db9d55-30c0-4569-8b51-7e840670fc0c"
version = "1.0.1"
version = "1.11.0"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
version = "1.11.0"
[[deps.SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.SparseMatricesCSR]]
deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"]
git-tree-sha1 = "4870b3e7db7063927b163fb981bd579410b68b2d"
uuid = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1"
version = "0.6.6"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[deps.StatsAPI]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "f9af7f195fb13589dd2e2d57fdb401717d2eb1f6"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.5.0"
[[deps.SuiteSparse]]
deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"]
uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9"
[[deps.StyledStrings]]
uuid = "f489334b-da3d-4c2e-b8f0-e476e12c162b"
version = "1.11.0"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
version = "1.0.0"
version = "1.0.3"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
version = "1.10.1"
version = "1.10.0"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
version = "1.11.0"
[[deps.TranscodingStreams]]
git-tree-sha1 = "0c45878dcfdcfa8480052b6ab162cdd138781742"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
version = "0.11.3"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
version = "1.11.0"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
version = "1.11.0"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.12+3"
version = "1.2.13+1"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl", "OpenBLAS_jll"]
deps = ["Artifacts", "Libdl"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
version = "5.1.1+0"
version = "5.11.0+0"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
version = "1.48.0+0"
version = "1.59.0+0"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
version = "17.4.0+0"
version = "17.4.0+2"

1
docs/Project.toml

@ -1,3 +1,4 @@ @@ -1,3 +1,4 @@
[deps]
Changelog = "5217a498-cd5d-4ec6-b8c2-9b85a09b6e3e"
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
HYPRE = "b5ffcf37-a2bd-41ab-a3da-4bd9bc8ad771"

24
docs/liveserver.jl

@ -0,0 +1,24 @@ @@ -0,0 +1,24 @@
#!/usr/bin/env julia
# Root of the repository
const repo_root = dirname(@__DIR__)
# Make sure docs environment is active and instantiated
import Pkg
Pkg.activate(@__DIR__)
Pkg.instantiate()
# Communicate with docs/make.jl that we are running in live mode
push!(ARGS, "liveserver")
# Run LiveServer.servedocs(...)
import LiveServer
LiveServer.servedocs(;
# Documentation root where make.jl and src/ are located
foldername = joinpath(repo_root, "docs"),
# Extra source folder to watch for changes
include_dirs = [
# Watch the src folder so docstrings can be Revise'd
joinpath(repo_root, "src"),
],
)

10
docs/make.jl

@ -7,6 +7,15 @@ end @@ -7,6 +7,15 @@ end
using Documenter
using HYPRE
using Changelog
# Changelog
Changelog.generate(
Changelog.Documenter(),
joinpath(@__DIR__, "..", "CHANGELOG.md"),
joinpath(@__DIR__, "src", "changelog.md");
repo = "Ferrite-FEM/Ferrite.jl",
)
makedocs(
sitename = "HYPRE.jl",
@ -16,6 +25,7 @@ makedocs( @@ -16,6 +25,7 @@ makedocs(
modules = [HYPRE],
pages = Any[
"Home" => "index.md",
hide("Changelog" => "changelog.md"),
"matrix-vector.md",
"solvers-preconditioners.md",
"libhypre.md",

13
docs/src/api.md

@ -6,6 +6,14 @@ @@ -6,6 +6,14 @@
HYPRE.Init
```
## Matrix/vector creation
```@docs
HYPRE.start_assemble!
HYPRE.assemble!
HYPRE.finish_assemble!
```
## Solvers and preconditioners
```@docs
@ -24,3 +32,8 @@ HYPRE.ILU @@ -24,3 +32,8 @@ HYPRE.ILU
HYPRE.PCG
HYPRE.ParaSails
```
```@docs
HYPRE.GetNumIterations
HYPRE.GetFinalRelativeResidualNorm
```

16
docs/src/libhypre.md

@ -15,15 +15,13 @@ directly. @@ -15,15 +15,13 @@ directly.
Functions from the `LibHYPRE` submodule can be used together with the high level interface.
This is useful when you need some functionality from the library which isn't exposed in the
high level interface. Many functions require passing a reference to a matrix/vector or a
solver. These can be obtained as follows:
| C type signature | Argument to pass |
|:---------------------|:-------------------------------------|
| `HYPRE_IJMatrix` | `A.ijmatrix` where `A::HYPREMatrix` |
| `HYPRE_ParCSRMatrix` | `A.parmatrix` where `A::HYPREMatrix` |
| `HYPRE_IJVector` | `b.ijvector` where `b::HYPREVector` |
| `HYPRE_ParVector` | `b.parvector` where `b::HYPREVector` |
| `HYPRE_Solver` | `s.solver` where `s::HYPRESolver` |
solver. HYPRE.jl defines the appropriate conversion methods used by `ccall` such that
- `A::HYPREMatrix` can be passed to `HYPRE_*` functions with `HYPRE_IJMatrix` or
`HYPRE_ParCSRMatrix` in the signature
- `b::HYPREVector` can be passed to `HYPRE_*` functions with `HYPRE_IJVector` or
`HYPRE_ParVector` in the signature
- `s::HYPRESolver` can be passed to `HYPRE_*` functions with `HYPRE_Solver` in the
signature
[^1]: Bindings are generated using
[Clang.jl](https://github.com/JuliaInterop/Clang.jl), see

61
docs/src/matrix-vector.md

@ -5,15 +5,59 @@ datastructures. Specifically it uses the [IJ System @@ -5,15 +5,59 @@ datastructures. Specifically it uses the [IJ System
Interface](https://hypre.readthedocs.io/en/latest/api-int-ij.html) which can be used for
general sparse matrices.
HYPRE.jl defines conversion methods from standard Julia datastructures to `HYPREMatrix` and
`HYPREVector`, respectively. See the following sections for details:
`HYPREMatrix` and `HYPREVector` can be constructed either by assembling directly, or by
first assembling into a Julia datastructure and the converting it. These various methods are
outlined in the following sections:
```@contents
Pages = ["hypre-matrix-vector.md"]
Pages = ["matrix-vector.md"]
Depth = 2:2
```
## PartitionedArrays.jl (multi-process)
## Direct assembly (multi-/single-process)
Creating `HYPREMatrix` and/or `HYPREVector` directly is possible by first creating an
assembler which is used to add all individual contributions to the matrix/vector. The
required steps are:
1. Create a new matrix and/or vector using the constructor.
2. Create an assembler and initialize the assembling procedure using
[`HYPRE.start_assemble!`](@ref).
3. Assemble all non-zero contributions (e.g. element matrix/vector in a finite element
simulation) using [`HYPRE.assemble!`](@ref).
4. Finalize the assembly using [`HYPRE.finish_assemble!`](@ref).
After these steps the matrix and vector are ready to pass to the solver. In case of multiple
consecutive solves with the same sparsity pattern (e.g. multiple Newton steps, multiple time
steps, ...) it is possible to reuse the same matrix by simply skipping the first step above.
**Example pseudocode**
```julia
# MPI communicator
comm = MPI.COMM_WORLD # MPI.COMM_SELF for single-process setups
# Create empty matrix and vector -- this process owns rows ilower to iupper
A = HYPREMatrix(comm, ilower, iupper)
b = HYPREVector(comm, ilower, iupper)
# Create assembler
assembler = HYPRE.start_assemble!(A, b)
# Assemble contributions from all elements owned by this process
for element in owned_elements
Ae, be = compute_element_contribution(...)
global_indices = get_global_indices(...)
HYPRE.assemble!(assembler, global_indices, Ae, be)
end
# Finalize the assembly
A, b = HYPRE.finish_assemble!(assembler)
```
## Create from PartitionedArrays.jl (multi-process)
HYPRE.jl integrates seemlessly with `PSparseMatrix` and `PVector` from the
[PartitionedArrays.jl](https://github.com/fverdugo/PartitionedArrays.jl) package. These can
@ -71,7 +115,7 @@ copy!(x, x_h) @@ -71,7 +115,7 @@ copy!(x, x_h)
```
## `SparseMatrixCSC` / `SparseMatrixCSR` (single-process)
## Create from `SparseMatrixCSC` / `SparseMatrixCSR` (single-process)
HYPRE.jl also support working directly with `SparseMatrixCSC` (from the
[SparseArrays.jl](https://github.com/JuliaSparse/SparseArrays.jl) standard library) and
@ -100,10 +144,3 @@ x = solve(solver, A, b) @@ -100,10 +144,3 @@ x = solve(solver, A, b)
x = zeros(length(b))
solve!(solver, x, A, b)
```
## `SparseMatrixCSC` / `SparseMatrixCSR` (multi-process)
!!! warning
This interface isn't finalized yet and is therefore not documented since it
is subject to change.

33
examples/ex5.jl

@ -85,7 +85,9 @@ function main(argc, argv) @@ -85,7 +85,9 @@ function main(argc, argv)
end
# Preliminaries: want at least one processor per row
if n * n < num_procs; n = trunc(Int, sqrt(n)) + 1; end
if n * n < num_procs
n = trunc(Int, sqrt(n)) + 1
end
N = n * n # global number of rows
h = 1.0 / (n + 1) # mesh size
h2 = h * h
@ -257,8 +259,7 @@ function main(argc, argv) @@ -257,8 +259,7 @@ function main(argc, argv)
num_iterations = Ref{Cint}()
final_res_norm = Ref{Cdouble}()
# AMG
if solver_id == 0
if solver_id == 0 # AMG
# Create solver
HYPRE_BoomerAMGCreate(solver_ref)
solver = solver_ref[]
@ -270,7 +271,7 @@ function main(argc, argv) @@ -270,7 +271,7 @@ function main(argc, argv)
HYPRE_BoomerAMGSetRelaxOrder(solver, 1) # uses C/F relaxation
HYPRE_BoomerAMGSetNumSweeps(solver, 1) # Sweeeps on each level
HYPRE_BoomerAMGSetMaxLevels(solver, 20) # maximum number of levels
HYPRE_BoomerAMGSetTol(solver, 1e-7) # conv. tolerance
HYPRE_BoomerAMGSetTol(solver, 1.0e-7) # conv. tolerance
# Now setup and solve!
HYPRE_BoomerAMGSetup(solver, parcsr_A, par_b, par_x)
@ -289,15 +290,14 @@ function main(argc, argv) @@ -289,15 +290,14 @@ function main(argc, argv)
# Destroy solver
HYPRE_BoomerAMGDestroy(solver)
# PCG
elseif solver_id == 50
elseif solver_id == 50 # PCG
# Create solver
HYPRE_ParCSRPCGCreate(MPI_COMM_WORLD, solver_ref)
solver = solver_ref[]
# Set some parameters (See Reference Manual for more parameters)
HYPRE_PCGSetMaxIter(solver, 1000) # max iterations
HYPRE_PCGSetTol(solver, 1e-7) # conv. tolerance
HYPRE_PCGSetTol(solver, 1.0e-7) # conv. tolerance
HYPRE_PCGSetTwoNorm(solver, 1) # use the two norm as the stopping criteria
HYPRE_PCGSetPrintLevel(solver, 2) # prints out the iteration info
HYPRE_PCGSetLogging(solver, 1) # needed to get run info later
@ -319,15 +319,14 @@ function main(argc, argv) @@ -319,15 +319,14 @@ function main(argc, argv)
# Destroy solver
HYPRE_ParCSRPCGDestroy(solver)
# PCG with AMG preconditioner
elseif solver_id == 1
elseif solver_id == 1 # PCG with AMG preconditioner
# Create solver
HYPRE_ParCSRPCGCreate(MPI_COMM_WORLD, solver_ref)
solver = solver_ref[]
# Set some parameters (See Reference Manual for more parameters)
HYPRE_PCGSetMaxIter(solver, 1000) # max iterations
HYPRE_PCGSetTol(solver, 1e-7) # conv. tolerance
HYPRE_PCGSetTol(solver, 1.0e-7) # conv. tolerance
HYPRE_PCGSetTwoNorm(solver, 1) # use the two norm as the stopping criteria
HYPRE_PCGSetPrintLevel(solver, 2) # print solve info
HYPRE_PCGSetLogging(solver, 1) # needed to get run info later
@ -364,15 +363,14 @@ function main(argc, argv) @@ -364,15 +363,14 @@ function main(argc, argv)
HYPRE_ParCSRPCGDestroy(solver)
HYPRE_BoomerAMGDestroy(precond)
# PCG with Parasails Preconditioner
elseif solver_id == 8
elseif solver_id == 8 # PCG with Parasails Preconditioner
# Create solver
HYPRE_ParCSRPCGCreate(MPI_COMM_WORLD, solver_ref)
solver = solver_ref[]
# Set some parameters (See Reference Manual for more parameters)
HYPRE_PCGSetMaxIter(solver, 1000) # max iterations
HYPRE_PCGSetTol(solver, 1e-7) # conv. tolerance
HYPRE_PCGSetTol(solver, 1.0e-7) # conv. tolerance
HYPRE_PCGSetTwoNorm(solver, 1) # use the two norm as the stopping criteria
HYPRE_PCGSetPrintLevel(solver, 2) # print solve info
HYPRE_PCGSetLogging(solver, 1) # needed to get run info later
@ -412,8 +410,7 @@ function main(argc, argv) @@ -412,8 +410,7 @@ function main(argc, argv)
HYPRE_ParCSRPCGDestroy(solver)
HYPRE_ParaSailsDestroy(precond)
# Flexible GMRES with AMG Preconditioner
elseif solver_id == 61
elseif solver_id == 61 # Flexible GMRES with AMG Preconditioner
# Create solver
HYPRE_ParCSRFlexGMRESCreate(MPI_COMM_WORLD, solver_ref)
@ -422,7 +419,7 @@ function main(argc, argv) @@ -422,7 +419,7 @@ function main(argc, argv)
# Set some parameters (See Reference Manual for more parameters)
HYPRE_FlexGMRESSetKDim(solver, 30) # restart
HYPRE_FlexGMRESSetMaxIter(solver, 1000) # max iterations
HYPRE_FlexGMRESSetTol(solver, 1e-7) # conv. tolerance
HYPRE_FlexGMRESSetTol(solver, 1.0e-7) # conv. tolerance
HYPRE_FlexGMRESSetPrintLevel(solver, 2) # print solve info
HYPRE_FlexGMRESSetLogging(solver, 1) # needed to get run info later
@ -459,7 +456,9 @@ function main(argc, argv) @@ -459,7 +456,9 @@ function main(argc, argv)
HYPRE_BoomerAMGDestroy(precond)
else
if myid == 0; println("Invalid solver id specified."); end
if myid == 0
println("Invalid solver id specified.")
end
end
# Clean up

300
ext/HYPREPartitionedArrays.jl

@ -0,0 +1,300 @@ @@ -0,0 +1,300 @@
module HYPREPartitionedArrays
using HYPRE.LibHYPRE: @check, HYPRE_BigInt, HYPRE_Complex, HYPRE_IJMatrixSetValues,
HYPRE_IJVectorGetValues, HYPRE_IJVectorInitialize, HYPRE_IJVectorSetValues, HYPRE_Int
using HYPRE: HYPRE, HYPREMatrix, HYPRESolver, HYPREVector, Internals
using MPI: MPI
using PartitionedArrays: PartitionedArrays, AbstractLocalIndices, MPIArray, PSparseMatrix,
PVector, SplitMatrix, ghost_to_global, local_values, own_to_global, own_values,
partition
using SparseArrays: SparseArrays, SparseMatrixCSC, nonzeros, nzrange, rowvals
using SparseMatricesCSR: SparseMatrixCSR, colvals
##################################################
# PartitionedArrays.PSparseMatrix -> HYPREMatrix #
##################################################
function Internals.to_hypre_data(
A::SplitMatrix{<:SparseMatrixCSC}, r::AbstractLocalIndices, c::AbstractLocalIndices
)
# Own/ghost to global index mappings
own_to_global_row = own_to_global(r)
own_to_global_col = own_to_global(c)
ghost_to_global_col = ghost_to_global(c)
# HYPRE requires contiguous row indices
ilower = own_to_global_row[1]
iupper = own_to_global_row[end]
@assert iupper - ilower + 1 == length(own_to_global_row)
# Extract sparse matrices from the SplitMatrix. We are only interested in the owned
# rows, so only consider own-own and own-ghost blocks.
Aoo = A.blocks.own_own::SparseMatrixCSC
Aoo_rows = rowvals(Aoo)
Aoo_vals = nonzeros(Aoo)
Aog = A.blocks.own_ghost::SparseMatrixCSC
Aog_rows = rowvals(Aog)
Aog_vals = nonzeros(Aog)
@assert size(Aoo, 1) == size(Aog, 1) == length(own_to_global_row)
# Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(length(own_to_global_row)) # Total number of rows
ncols = zeros(HYPRE_Int, nrows) # Number of colums for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
# cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
# values = Vector{HYPRE_Complex}(undef, nnz) # The values
# First pass to count nnz per row (note that global column indices and column
# permutation doesn't matter for this pass)
@inbounds for own_col in 1:size(Aoo, 2)
for k in nzrange(Aoo, own_col)
own_row = Aoo_rows[k]
ncols[own_row] += 1
end
end
@inbounds for ghost_col in 1:size(Aog, 2)
for k in nzrange(Aog, ghost_col)
own_row = Aog_rows[k]
ncols[own_row] += 1
end
end
# Initialize remaining buffers now that nnz is known
nnz = sum(ncols)
cols = Vector{HYPRE_BigInt}(undef, nnz)
values = Vector{HYPRE_Complex}(undef, nnz)
# Keep track of the last index used for every row
lastinds = zeros(Int, nrows)
cumsum!((@view lastinds[2:end]), (@view ncols[1:(end - 1)]))
# Second pass to populate the output. Here we need to map column
# indices from own/ghost to global
@inbounds for own_col in 1:size(Aoo, 2)
for k in nzrange(Aoo, own_col)
own_row = Aoo_rows[k]
i = lastinds[own_row] += 1
values[i] = Aoo_vals[k]
cols[i] = own_to_global_col[own_col]
end
end
@inbounds for ghost_col in 1:size(Aog, 2)
for k in nzrange(Aog, ghost_col)
own_row = Aog_rows[k]
i = lastinds[own_row] += 1
values[i] = Aog_vals[k]
cols[i] = ghost_to_global_col[ghost_col]
end
end
# Sanity checks and return
@assert nrows == length(ncols) == length(rows)
return nrows, ncols, rows, cols, values
end
function Internals.to_hypre_data(
A::SplitMatrix{<:SparseMatrixCSR}, r::AbstractLocalIndices, c::AbstractLocalIndices
)
# Own/ghost to global index mappings
own_to_global_row = own_to_global(r)
own_to_global_col = own_to_global(c)
ghost_to_global_col = ghost_to_global(c)
# HYPRE requires contiguous row indices
ilower = own_to_global_row[1]
iupper = own_to_global_row[end]
@assert iupper - ilower + 1 == length(own_to_global_row)
# Extract sparse matrices from the SplitMatrix. We are only interested in the owned
# rows, so only consider own-own and own-ghost blocks.
Aoo = A.blocks.own_own::SparseMatrixCSR
Aoo_cols = colvals(Aoo)
Aoo_vals = nonzeros(Aoo)
Aog = A.blocks.own_ghost::SparseMatrixCSR
Aog_cols = colvals(Aog)
Aog_vals = nonzeros(Aog)
@assert size(Aoo, 1) == size(Aog, 1) == length(own_to_global_row)
# Initialize the data buffers HYPRE wants
nnz = SparseArrays.nnz(Aoo) + SparseArrays.nnz(Aog)
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = zeros(HYPRE_Int, nrows) # Number of columns for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
values = Vector{HYPRE_Complex}(undef, nnz) # The values
# For CSR we only need a single pass to over the owned rows to collect everything
i = 0
for own_row in 1:size(Aoo, 1)
nzro = nzrange(Aoo, own_row)
nzrg = nzrange(Aog, own_row)
ncols[own_row] = length(nzro) + length(nzrg)
for k in nzro
i += 1
own_col = Aoo_cols[k]
cols[i] = own_to_global_col[own_col]
values[i] = Aoo_vals[k]
end
for k in nzrg
i += 1
ghost_col = Aog_cols[k]
cols[i] = ghost_to_global_col[ghost_col]
values[i] = Aog_vals[k]
end
end
# Sanity checks and return
@assert nnz == i
@assert nrows == length(ncols) == length(rows)
return nrows, ncols, rows, cols, values
end
function Internals.get_comm(A::Union{PSparseMatrix{<:Any, <:M}, PVector{<:Any, <:M}}) where {M <: MPIArray}
return partition(A).comm
end
Internals.get_comm(_::Union{PSparseMatrix, PVector}) = MPI.COMM_SELF
function Internals.get_proc_rows(A::Union{PSparseMatrix, PVector})
ilower::HYPRE_BigInt = typemax(HYPRE_BigInt)
iupper::HYPRE_BigInt = typemin(HYPRE_BigInt)
map(partition(axes(A, 1))) do a
# This is a map over the local process' owned indices. For MPI it will
# be a single value but for DebugArray / Array it will have multiple
# values.
o_to_g = own_to_global(a)
ilower_part = o_to_g[1]
iupper_part = o_to_g[end]
ilower = min(ilower, convert(HYPRE_BigInt, ilower_part))
iupper = max(iupper, convert(HYPRE_BigInt, iupper_part))
end
return ilower, iupper
end
function HYPRE.HYPREMatrix(B::PSparseMatrix)
# Use the same communicator as the matrix
comm = Internals.get_comm(B)
# Fetch rows owned by this process
ilower, iupper = Internals.get_proc_rows(B)
# Create the IJ matrix
A = HYPREMatrix(comm, ilower, iupper)
# Set all the values
map(local_values(B), partition(axes(B, 1)), partition(axes(B, 2))) do Bv, Br, Bc
nrows, ncols, rows, cols, values = Internals.to_hypre_data(Bv, Br, Bc)
@check HYPRE_IJMatrixSetValues(A, nrows, ncols, rows, cols, values)
return nothing
end
# Finalize
Internals.assemble_matrix(A)
return A
end
############################################
# PartitionedArrays.PVector -> HYPREVector #
############################################
function HYPRE.HYPREVector(v::PVector)
# Use the same communicator as the matrix
comm = Internals.get_comm(v)
# Fetch rows owned by this process
ilower, iupper = Internals.get_proc_rows(v)
# Create the IJ vector
b = HYPREVector(comm, ilower, iupper)
# Set all the values
map(own_values(v), partition(axes(v, 1))) do vo, vr
o_to_g = own_to_global(vr)
ilower_part = o_to_g[1]
iupper_part = o_to_g[end]
# Option 1: Set all values
nvalues = HYPRE_Int(iupper_part - ilower_part + 1)
indices = collect(HYPRE_BigInt, ilower_part:iupper_part)
# TODO: Could probably just pass the full vector even if it is too long
# values = convert(Vector{HYPRE_Complex}, vv)
values = collect(HYPRE_Complex, vo)
# # Option 2: Set only non-zeros
# indices = HYPRE_BigInt[]
# values = HYPRE_Complex[]
# for (i, vi) in zip(ilower_part:iupper_part, vo)
# if !iszero(vi)
# push!(indices, i)
# push!(values, vi)
# end
# end
# nvalues = length(indices)
@check HYPRE_IJVectorSetValues(b, nvalues, indices, values)
return nothing
end
# Finalize
Internals.assemble_vector(b)
return b
end
function copy_check(dst::HYPREVector, src::PVector)
il_dst, iu_dst = Internals.get_proc_rows(dst)
il_src, iu_src = Internals.get_proc_rows(src)
if il_dst != il_src && iu_dst != iu_src
# TODO: Why require this?
msg = "row owner mismatch between dst ($(il_dst:iu_dst)) and src ($(il_src:iu_src))"
throw(ArgumentError(msg))
end
return
end
# TODO: Other eltypes could be support by using a intermediate buffer
function Base.copy!(dst::PVector{<:AbstractVector{HYPRE_Complex}}, src::HYPREVector)
copy_check(src, dst)
map(own_values(dst), partition(axes(dst, 1))) do ov, vr
o_to_g = own_to_global(vr)
il_src_part = o_to_g[1]
iu_src_part = o_to_g[end]
nvalues = HYPRE_Int(iu_src_part - il_src_part + 1)
indices = collect(HYPRE_BigInt, il_src_part:iu_src_part)
values = ov
@check HYPRE_IJVectorGetValues(src, nvalues, indices, values)
end
return dst
end
function Base.copy!(dst::HYPREVector, src::PVector{<:AbstractVector{HYPRE_Complex}})
copy_check(dst, src)
# Re-initialize the vector
@check HYPRE_IJVectorInitialize(dst)
map(own_values(src), partition(axes(src, 1))) do ov, vr
o_to_g = own_to_global(vr)
ilower_src_part = o_to_g[1]
iupper_src_part = o_to_g[end]
nvalues = HYPRE_Int(iupper_src_part - ilower_src_part + 1)
indices = collect(HYPRE_BigInt, ilower_src_part:iupper_src_part)
values = ov
@check HYPRE_IJVectorSetValues(dst, nvalues, indices, values)
end
# TODO: It shouldn't be necessary to assemble here since we only set owned rows (?)
# @check HYPRE_IJVectorAssemble(dst)
# TODO: Necessary to recreate the ParVector? Running some examples it seems like it is
# not needed.
return dst
end
######################################
# PartitionedArrays solver interface #
######################################
# TODO: Would it be useful with a method that copied the solution to b instead?
function HYPRE.solve(solver::HYPRESolver, A::PSparseMatrix, b::PVector)
hypre_x = HYPRE.solve(solver, HYPREMatrix(A), HYPREVector(b))
x = copy!(similar(b, HYPRE_Complex), hypre_x)
return x
end
function HYPRE.solve!(solver::HYPRESolver, x::PVector, A::PSparseMatrix, b::PVector)
hypre_x = HYPREVector(x)
HYPRE.solve!(solver, hypre_x, HYPREMatrix(A), HYPREVector(b))
copy!(x, hypre_x)
return x
end
end # module HYPREPartitionedArrays

86
ext/HYPRESparseArrays.jl

@ -0,0 +1,86 @@ @@ -0,0 +1,86 @@
module HYPRESparseArrays
using HYPRE.LibHYPRE: @check, HYPRE_BigInt, HYPRE_Complex, HYPRE_Int
using HYPRE:
HYPRE, HYPREMatrix, HYPRESolver, HYPREVector, HYPRE_IJMatrixSetValues, Internals
using MPI: MPI
using SparseArrays: SparseArrays, SparseMatrixCSC, nonzeros, nzrange, rowvals
##################################
# SparseMatrixCSC -> HYPREMatrix #
##################################
function Internals.to_hypre_data(A::SparseMatrixCSC, ilower, iupper)
Internals.check_n_rows(A, ilower, iupper)
nnz = SparseArrays.nnz(A)
A_rows = rowvals(A)
A_vals = nonzeros(A)
# Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = zeros(HYPRE_Int, nrows) # Number of colums for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
values = Vector{HYPRE_Complex}(undef, nnz) # The values
# First pass to count nnz per row
@inbounds for j in 1:size(A, 2)
for i in nzrange(A, j)
row = A_rows[i]
ncols[row] += 1
end
end
# Keep track of the last index used for every row
lastinds = zeros(Int, nrows)
cumsum!((@view lastinds[2:end]), (@view ncols[1:(end - 1)]))
# Second pass to populate the output
@inbounds for j in 1:size(A, 2)
for i in nzrange(A, j)
row = A_rows[i]
k = lastinds[row] += 1
val = A_vals[i]
cols[k] = j
values[k] = val
end
end
@assert nrows == length(ncols) == length(rows)
return nrows, ncols, rows, cols, values
end
# Note: keep in sync with the SparseMatrixCSR method
function HYPRE.HYPREMatrix(comm::MPI.Comm, B::SparseMatrixCSC, ilower, iupper)
A = HYPREMatrix(comm, ilower, iupper)
nrows, ncols, rows, cols, values = Internals.to_hypre_data(B, ilower, iupper)
@check HYPRE_IJMatrixSetValues(A, nrows, ncols, rows, cols, values)
Internals.assemble_matrix(A)
return A
end
# Note: keep in sync with the SparseMatrixCSC method
function HYPRE.HYPREMatrix(B::SparseMatrixCSC, ilower = 1, iupper = size(B, 1))
return HYPREMatrix(MPI.COMM_SELF, B, ilower, iupper)
end
####################################
# SparseMatrixCSC solver interface #
####################################
# Note: keep in sync with the SparseMatrixCSR method
function HYPRE.solve(solver::HYPRESolver, A::SparseMatrixCSC, b::Vector)
hypre_x = HYPRE.solve(solver, HYPREMatrix(A), HYPREVector(b))
x = copy!(similar(b, HYPRE_Complex), hypre_x)
return x
end
# Note: keep in sync with the SparseMatrixCSR method
function HYPRE.solve!(solver::HYPRESolver, x::Vector, A::SparseMatrixCSC, b::Vector)
hypre_x = HYPREVector(x)
HYPRE.solve!(solver, hypre_x, HYPREMatrix(A), HYPREVector(b))
copy!(x, hypre_x)
return x
end
end # module HYPRESparseMatricesCSR

80
ext/HYPRESparseMatricesCSR.jl

@ -0,0 +1,80 @@ @@ -0,0 +1,80 @@
module HYPRESparseMatricesCSR
using HYPRE.LibHYPRE: @check, HYPRE_BigInt, HYPRE_Complex, HYPRE_Int
using HYPRE: HYPRE, HYPREMatrix, HYPRESolver, HYPREVector, HYPRE_IJMatrixSetValues, Internals
using MPI: MPI
using SparseArrays: SparseArrays, nonzeros, nzrange
using SparseMatricesCSR: SparseMatrixCSR, colvals
##################################
# SparseMatrixCSR -> HYPREMatrix #
##################################
function Internals.to_hypre_data(A::SparseMatrixCSR, ilower, iupper)
Internals.check_n_rows(A, ilower, iupper)
nnz = SparseArrays.nnz(A)
A_cols = colvals(A)
A_vals = nonzeros(A)
# Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = Vector{HYPRE_Int}(undef, nrows) # Number of colums for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
values = Vector{HYPRE_Complex}(undef, nnz) # The values
# Loop over the rows and collect all values
k = 0
@inbounds for i in 1:size(A, 1)
nzr = nzrange(A, i)
ncols[i] = length(nzr)
for j in nzr
k += 1
col = A_cols[j]
val = A_vals[j]
cols[k] = col
values[k] = val
end
end
@assert nnz == k
@assert nrows == length(ncols) == length(rows)
return nrows, ncols, rows, cols, values
end
# Note: keep in sync with the SparseMatrixCSC method
function HYPRE.HYPREMatrix(comm::MPI.Comm, B::SparseMatrixCSR, ilower, iupper)
A = HYPREMatrix(comm, ilower, iupper)
nrows, ncols, rows, cols, values = Internals.to_hypre_data(B, ilower, iupper)
@check HYPRE_IJMatrixSetValues(A, nrows, ncols, rows, cols, values)
Internals.assemble_matrix(A)
return A
end
# Note: keep in sync with the SparseMatrixCSC method
function HYPRE.HYPREMatrix(B::SparseMatrixCSR, ilower = 1, iupper = size(B, 1))
return HYPREMatrix(MPI.COMM_SELF, B, ilower, iupper)
end
####################################
# SparseMatrixCSR solver interface #
####################################
# Note: keep in sync with the SparseMatrixCSC method
function HYPRE.solve(solver::HYPRESolver, A::SparseMatrixCSR, b::Vector)
hypre_x = HYPRE.solve(solver, HYPREMatrix(A), HYPREVector(b))
x = copy!(similar(b, HYPRE_Complex), hypre_x)
return x
end
# Note: keep in sync with the SparseMatrixCSC method
function HYPRE.solve!(solver::HYPRESolver, x::Vector, A::SparseMatrixCSR, b::Vector)
hypre_x = HYPREVector(x)
HYPRE.solve!(solver, hypre_x, HYPREMatrix(A), HYPREVector(b))
copy!(x, hypre_x)
return x
end
end # module HYPRESparseMatricesCSR

16
gen/Makefile

@ -1,2 +1,14 @@ @@ -1,2 +1,14 @@
default:
julia --project generator.jl
MAKEDIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
LIBHYPRE:=$(shell dirname $(MAKEDIR))/lib/LibHYPRE.jl
generate: $(LIBHYPRE)
clean:
rm -f $(LIBHYPRE)
.PHONY: generate clean
$(LIBHYPRE): Project.toml Manifest.toml $(MAKEDIR)/generator.toml $(MAKEDIR)/generator.jl
julia --project generator.jl && \
sed -i -e '1s/^/local libHYPRE # Silence of the Langs(erver)\n\n/' -e 's/using HYPRE_jll/using HYPRE_jll: HYPRE_jll, libHYPRE/' -e 's/using CEnum/using CEnum: @cenum/' $(LIBHYPRE) && \
julia-1.11 --project=@runic -e 'using Runic; exit(Runic.main(ARGS))' -- -i $(LIBHYPRE)

140
gen/Manifest.toml

@ -1,44 +1,47 @@ @@ -1,44 +1,47 @@
# This file is machine-generated - editing it directly is not advised
julia_version = "1.8.2"
julia_version = "1.11.2"
manifest_format = "2.0"
project_hash = "cc39013dba1e9068883c1b156d3b25864ebc62f8"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1"
version = "1.1.2"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
version = "1.11.0"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
version = "1.11.0"
[[deps.CEnum]]
git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90"
git-tree-sha1 = "389ad5c84de1ae7cf0e28e381131c98ea87d54fc"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.2"
version = "0.5.0"
[[deps.Clang]]
deps = ["CEnum", "Clang_jll", "Downloads", "Pkg", "TOML"]
git-tree-sha1 = "b7e356adf44b1d4eb7aa2b0961ec130730fa208f"
git-tree-sha1 = "2397d5da17ba4970f772a9888b208a0a1d77eb5d"
uuid = "40e3b903-d033-50b4-a0cc-940c62c95e31"
version = "0.16.3"
version = "0.18.3"
[[deps.Clang_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll", "libLLVM_jll"]
git-tree-sha1 = "0dfffba1b32bb3e30cb0372bfe666a5ddffe37fb"
deps = ["Artifacts", "JLLWrappers", "Libdl", "TOML", "Zlib_jll", "libLLVM_jll"]
git-tree-sha1 = "0dc9bd89383fd6fffed127e03fc42ed409cc865b"
uuid = "0ee61d77-7f21-5576-8119-9fcc46b10100"
version = "13.0.1+3"
version = "16.0.6+4"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "0.5.2+0"
version = "1.1.1+0"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
version = "1.11.0"
[[deps.Downloads]]
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
@ -47,6 +50,7 @@ version = "1.6.0" @@ -47,6 +50,7 @@ version = "1.6.0"
[[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
version = "1.11.0"
[[deps.HYPRE_jll]]
deps = ["Artifacts", "JLLWrappers", "LAPACK_jll", "LazyArtifacts", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenBLAS_jll", "OpenMPI_jll", "Pkg", "TOML"]
@ -54,87 +58,99 @@ git-tree-sha1 = "b77d3eca75f8442e034ccf415c87405a49e77985" @@ -54,87 +58,99 @@ git-tree-sha1 = "b77d3eca75f8442e034ccf415c87405a49e77985"
uuid = "0a602bbd-b08b-5d75-8d32-0de6eef44785"
version = "2.23.1+1"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.Hwloc_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "50aedf345a709ab75872f80a2779568dc0bb461b"
uuid = "e33a78d0-f292-5ffc-b300-72abe9b543c8"
version = "2.11.2+3"
[[deps.JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1"
deps = ["Artifacts", "Preferences"]
git-tree-sha1 = "a007feb38b422fbdab534406aeca1b86823cb4d6"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.4.1"
version = "1.7.0"
[[deps.LAPACK_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg", "libblastrampoline_jll"]
git-tree-sha1 = "a539affa8228208f5a3396037165b04bff9a2ba6"
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "libblastrampoline_jll"]
git-tree-sha1 = "47a6ccfc4b78494669cd7c502ba112ee2b24eb45"
uuid = "51474c39-65e3-53ba-86ba-03b1b862ec14"
version = "3.10.0+1"
version = "3.12.0+3"
[[deps.LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
version = "1.11.0"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
version = "0.6.3"
version = "0.6.4"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
version = "7.84.0+0"
version = "8.6.0+0"
[[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
deps = ["Base64", "LibGit2_jll", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
version = "1.11.0"
[[deps.LibGit2_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll"]
uuid = "e37daf67-58a4-590a-8e99-b0245dd2ffc5"
version = "1.7.2+0"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
version = "1.10.2+0"
version = "1.11.0+1"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
version = "1.11.0"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
version = "1.11.0"
[[deps.MPICH_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"]
git-tree-sha1 = "6d4fa43afab4611d090b11617ecea1a144b21d35"
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Hwloc_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"]
git-tree-sha1 = "7715e65c47ba3941c502bffb7f266a41a7f54423"
uuid = "7cb0a576-ebde-5e09-9194-50597f1243b4"
version = "4.0.2+5"
version = "4.2.3+0"
[[deps.MPIPreferences]]
deps = ["Libdl", "Preferences"]
git-tree-sha1 = "9959c42b41220206eeda9004f695d913e2245658"
git-tree-sha1 = "c105fe467859e7f6e9a852cb15cb4301126fac07"
uuid = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267"
version = "0.1.5"
version = "0.1.11"
[[deps.MPItrampoline_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"]
git-tree-sha1 = "b3f9e42685b4ad614eca0b44bd863cd41b1c86ea"
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"]
git-tree-sha1 = "70e830dab5d0775183c99fc75e4c24c614ed7142"
uuid = "f1f71cc9-e9ae-5b93-9b94-4fe0e1ad3748"
version = "5.0.2+1"
version = "5.5.1+2"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
version = "1.11.0"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
version = "2.28.0+0"
version = "2.28.6+0"
[[deps.MicrosoftMPI_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "a16aa086d335ed7e0170c5265247db29172af2f9"
git-tree-sha1 = "bc95bf4149bf535c09602e3acdf950d9b4376227"
uuid = "9237b28f-5490-5468-be7b-bb81f5f5e6cf"
version = "10.1.3+2"
version = "10.1.4+3"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
version = "2022.2.1"
version = "2023.12.12"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
@ -143,85 +159,85 @@ version = "1.2.0" @@ -143,85 +159,85 @@ version = "1.2.0"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
version = "0.3.20+0"
version = "0.3.27+1"
[[deps.OpenMPI_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"]
git-tree-sha1 = "346d6b357a480300ed7854dbc70e746ac52e10fd"
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Hwloc_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML", "Zlib_jll"]
git-tree-sha1 = "2dace87e14256edb1dd0724ab7ba831c779b96bd"
uuid = "fe0851c0-eecd-5654-98d4-656369965a5c"
version = "4.1.3+3"
version = "5.0.6+0"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "Random", "SHA", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
version = "1.8.0"
version = "1.11.0"
[deps.Pkg.extensions]
REPLExt = "REPL"
[deps.Pkg.weakdeps]
REPL = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Preferences]]
deps = ["TOML"]
git-tree-sha1 = "47e5f437cc0e7ef2ce8406ce1e7e24d44915f88d"
git-tree-sha1 = "9306f6085165d270f7e3db02af26a400d580f5c6"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.3.0"
version = "1.4.3"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
version = "1.11.0"
[[deps.Random]]
deps = ["SHA", "Serialization"]
deps = ["SHA"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
version = "1.11.0"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
version = "0.7.0"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
version = "1.0.0"
version = "1.0.3"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
version = "1.10.1"
version = "1.10.0"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
version = "1.11.0"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
version = "1.11.0"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.12+3"
version = "1.2.13+1"
[[deps.libLLVM_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8f36deef-c2a5-5394-99ed-8e07531fb29a"
version = "13.0.1+3"
version = "16.0.6+4"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl", "OpenBLAS_jll"]
deps = ["Artifacts", "Libdl"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
version = "5.1.1+0"
version = "5.11.0+0"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
version = "1.48.0+0"
version = "1.59.0+0"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
version = "17.4.0+0"
version = "17.4.0+2"

7
gen/generator.jl

@ -21,12 +21,15 @@ push!(args, "-DHYPRE_ENABLE_CUDA_STREAMS=OFF") @@ -21,12 +21,15 @@ push!(args, "-DHYPRE_ENABLE_CUDA_STREAMS=OFF")
push!(args, "-DHYPRE_ENABLE_CUSPARSE=OFF")
push!(args, "-DHYPRE_ENABLE_CURAND=OFF")
headers = joinpath.(hypre_include_dir, [
headers = joinpath.(
hypre_include_dir,
[
"HYPRE.h",
"HYPRE_IJ_mv.h",
"HYPRE_parcsr_mv.h",
"HYPRE_parcsr_ls.h",
])
]
)
ctx = create_context(headers, args, options)

7
gen/generator.toml

@ -13,5 +13,10 @@ output_ignorelist = [ @@ -13,5 +13,10 @@ output_ignorelist = [
# Bogus expression: const HYPRE_VERSION = ((("HYPRE_RELEASE_NAME Date Compiled: ")(__DATE__))(" "))(__TIME__)
"HYPRE_VERSION",
# Filter out MPI stuff
"^[PQ]?MPI"
"^[PQ]?MPI",
# Included in prologue.jl
"MPI_Comm",
]
[codegen]
use_ccall_macro = true

15
gen/prologue.jl

@ -1,6 +1,17 @@ @@ -1,6 +1,17 @@
###########################
## Start gen/prologue.jl ##
###########################
using MPI: MPI, MPI_Comm
if isdefined(MPI, :API) # MPI >= 0.20.0
if isdefined(MPI, :API)
# MPI >= 0.20.0
using MPI.API: MPI_INT, MPI_DOUBLE
else # MPI < 0.20.0
else
# MPI < 0.20.0
using MPI: MPI_INT, MPI_DOUBLE
end
#########################
## End gen/prologue.jl ##
#########################

6
gen/solver_options.jl

@ -2,8 +2,7 @@ using HYPRE.LibHYPRE @@ -2,8 +2,7 @@ using HYPRE.LibHYPRE
function generate_options(io, structname, prefixes...)
println(io, "")
println(io, "function Internals.set_options(s::$(structname), kwargs)")
println(io, " solver = s.solver")
println(io, "function Internals.set_options(solver::$(structname), kwargs)")
println(io, " for (k, v) in kwargs")
ns = Tuple{Symbol, String}[]
@ -29,7 +28,7 @@ function generate_options(io, structname, prefixes...) @@ -29,7 +28,7 @@ function generate_options(io, structname, prefixes...)
println(io)
if k == "Precond"
println(io, " Internals.set_precond_defaults(v)")
println(io, " Internals.set_precond(s, v)")
println(io, " Internals.set_precond(solver, v)")
elseif nargs == 1
println(io, " @check ", n, "(solver)")
elseif nargs == 2
@ -44,6 +43,7 @@ function generate_options(io, structname, prefixes...) @@ -44,6 +43,7 @@ function generate_options(io, structname, prefixes...)
println(io, " end")
println(io, " end")
println(io, "end")
return
end
open(joinpath(@__DIR__, "..", "src", "solver_options.jl"), "w") do io

1768
lib/LibHYPRE.jl

File diff suppressed because it is too large Load Diff

531
src/HYPRE.jl

@ -3,14 +3,9 @@ @@ -3,14 +3,9 @@
module HYPRE
using MPI: MPI
using PartitionedArrays: IndexRange, MPIData, PSparseMatrix, PVector, PartitionedArrays,
SequentialData, map_parts
using SparseArrays: SparseArrays, AbstractSparseMatrixCSC, SparseMatrixCSC, nnz, nonzeros, nzrange, rowvals
using SparseMatricesCSR: SparseMatrixCSR, colvals, getrowptr
export HYPREMatrix, HYPREVector
# Clang.jl auto-generated bindings and some manual methods
include("LibHYPRE.jl")
using .LibHYPRE
@ -38,7 +33,12 @@ function Init(; finalize_atexit=true) @@ -38,7 +33,12 @@ function Init(; finalize_atexit=true)
if finalize_atexit
# TODO: MPI only calls the finalizer if not exiting due to a Julia exeption. Does
# the same reasoning apply here?
atexit(HYPRE_Finalize)
atexit() do
# Finalize any HYPRE objects that are still alive
foreach(finalize, keys(Internals.HYPRE_OBJECTS))
# Finalize the library
HYPRE_Finalize()
end
end
return nothing
end
@ -49,28 +49,43 @@ end @@ -49,28 +49,43 @@ end
###############
mutable struct HYPREMatrix # <: AbstractMatrix{HYPRE_Complex}
#= const =# comm::MPI.Comm
#= const =# ilower::HYPRE_BigInt
#= const =# iupper::HYPRE_BigInt
#= const =# jlower::HYPRE_BigInt
#= const =# jupper::HYPRE_BigInt
const comm::MPI.Comm
const ilower::HYPRE_BigInt
const iupper::HYPRE_BigInt
const jlower::HYPRE_BigInt
const jupper::HYPRE_BigInt
ijmatrix::HYPRE_IJMatrix
parmatrix::HYPRE_ParCSRMatrix
end
function HYPREMatrix(comm::MPI.Comm, ilower::Integer, iupper::Integer,
jlower::Integer=ilower, jupper::Integer=iupper)
# Defining unsafe_convert enables ccall to automatically convert A::HYPREMatrix to
# HYPRE_IJMatrix and HYPRE_ParCSRMatrix while also making sure A won't be GC'd and
# finalized.
Base.unsafe_convert(::Type{HYPRE_IJMatrix}, A::HYPREMatrix) = A.ijmatrix
Base.unsafe_convert(::Type{HYPRE_ParCSRMatrix}, A::HYPREMatrix) = A.parmatrix
function HYPREMatrix(
comm::MPI.Comm,
ilower::Integer, iupper::Integer,
jlower::Integer = ilower, jupper::Integer = iupper
)
# Create the IJ matrix
A = HYPREMatrix(comm, ilower, iupper, jlower, jupper, C_NULL, C_NULL)
ijmatrix_ref = Ref{HYPRE_IJMatrix}(C_NULL)
@check HYPRE_IJMatrixCreate(comm, ilower, iupper, ilower, iupper, ijmatrix_ref)
A.ijmatrix = ijmatrix_ref[]
# Attach a finalizer
finalizer(x -> HYPRE_IJMatrixDestroy(x.ijmatrix), A)
finalizer(A) do x
if x.ijmatrix != C_NULL
HYPRE_IJMatrixDestroy(x)
x.ijmatrix = x.parmatrix = C_NULL
end
end
push!(Internals.HYPRE_OBJECTS, A => nothing)
# Set storage type
@check HYPRE_IJMatrixSetObjectType(A.ijmatrix, HYPRE_PARCSR)
@check HYPRE_IJMatrixSetObjectType(A, HYPRE_PARCSR)
# Initialize to make ready for setting values
@check HYPRE_IJMatrixInitialize(A.ijmatrix)
@check HYPRE_IJMatrixInitialize(A)
return A
end
@ -78,10 +93,10 @@ end @@ -78,10 +93,10 @@ end
# This should be called after setting all the values
function Internals.assemble_matrix(A::HYPREMatrix)
# Finalize after setting all values
@check HYPRE_IJMatrixAssemble(A.ijmatrix)
@check HYPRE_IJMatrixAssemble(A)
# Fetch the assembled CSR matrix
parmatrix_ref = Ref{Ptr{Cvoid}}(C_NULL)
@check HYPRE_IJMatrixGetObject(A.ijmatrix, parmatrix_ref)
@check HYPRE_IJMatrixGetObject(A, parmatrix_ref)
A.parmatrix = convert(Ptr{HYPRE_ParCSRMatrix}, parmatrix_ref[])
return A
end
@ -91,13 +106,18 @@ end @@ -91,13 +106,18 @@ end
###############
mutable struct HYPREVector # <: AbstractVector{HYPRE_Complex}
#= const =# comm::MPI.Comm
#= const =# ilower::HYPRE_BigInt
#= const =# iupper::HYPRE_BigInt
const comm::MPI.Comm
const ilower::HYPRE_BigInt
const iupper::HYPRE_BigInt
ijvector::HYPRE_IJVector
parvector::HYPRE_ParVector
end
# Defining unsafe_convert enables ccall to automatically convert b::HYPREVector to
# HYPRE_IJVector and HYPRE_ParVector while also making sure b won't be GC'd and finalized.
Base.unsafe_convert(::Type{HYPRE_IJVector}, b::HYPREVector) = b.ijvector
Base.unsafe_convert(::Type{HYPRE_ParVector}, b::HYPREVector) = b.parvector
function HYPREVector(comm::MPI.Comm, ilower::Integer, iupper::Integer)
# Create the IJ vector
b = HYPREVector(comm, ilower, iupper, C_NULL, C_NULL)
@ -105,20 +125,26 @@ function HYPREVector(comm::MPI.Comm, ilower::Integer, iupper::Integer) @@ -105,20 +125,26 @@ function HYPREVector(comm::MPI.Comm, ilower::Integer, iupper::Integer)
@check HYPRE_IJVectorCreate(comm, ilower, iupper, ijvector_ref)
b.ijvector = ijvector_ref[]
# Attach a finalizer
finalizer(x -> HYPRE_IJVectorDestroy(x.ijvector), b)
finalizer(b) do x
if x.ijvector != C_NULL
HYPRE_IJVectorDestroy(x)
x.ijvector = x.parvector = C_NULL
end
end
push!(Internals.HYPRE_OBJECTS, b => nothing)
# Set storage type
@check HYPRE_IJVectorSetObjectType(b.ijvector, HYPRE_PARCSR)
@check HYPRE_IJVectorSetObjectType(b, HYPRE_PARCSR)
# Initialize to make ready for setting values
@check HYPRE_IJVectorInitialize(b.ijvector)
@check HYPRE_IJVectorInitialize(b)
return b
end
function Internals.assemble_vector(b::HYPREVector)
# Finalize after setting all values
@check HYPRE_IJVectorAssemble(b.ijvector)
@check HYPRE_IJVectorAssemble(b)
# Fetch the assembled vector
parvector_ref = Ref{Ptr{Cvoid}}(C_NULL)
@check HYPRE_IJVectorGetObject(b.ijvector, parvector_ref)
@check HYPRE_IJVectorGetObject(b, parvector_ref)
b.parvector = convert(Ptr{HYPRE_ParVector}, parvector_ref[])
return b
end
@ -126,7 +152,7 @@ end @@ -126,7 +152,7 @@ end
function Internals.get_proc_rows(b::HYPREVector)
# ilower_ref = Ref{HYPRE_BigInt}()
# iupper_ref = Ref{HYPRE_BigInt}()
# @check HYPRE_IJVectorGetLocalRange(b.ijvector, ilower_ref, iupper_ref)
# @check HYPRE_IJVectorGetLocalRange(b, ilower_ref, iupper_ref)
# ilower = ilower_ref[]
# iupper = iupper_ref[]
# return ilower, iupper
@ -152,105 +178,24 @@ function Base.zero(b::HYPREVector) @@ -152,105 +178,24 @@ function Base.zero(b::HYPREVector)
nvalues = jupper - jlower + 1
indices = collect(HYPRE_BigInt, jlower:jupper)
values = zeros(HYPRE_Complex, nvalues)
@check HYPRE_IJVectorSetValues(x.ijvector, nvalues, indices, values)
@check HYPRE_IJVectorSetValues(x, nvalues, indices, values)
# Finalize and return
Internals.assemble_vector(x)
return x
end
######################################
# SparseMatrixCS(C|R) -> HYPREMatrix #
######################################
#########################
# Vector -> HYPREVector #
#########################
function Internals.check_n_rows(A, ilower, iupper)
if size(A, 1) != (iupper - ilower + 1)
throw(ArgumentError("number of rows in matrix does not match global start/end rows ilower and iupper"))
end
return
end
function Internals.to_hypre_data(A::SparseMatrixCSC, ilower, iupper)
Internals.check_n_rows(A, ilower, iupper)
nnz = SparseArrays.nnz(A)
A_rows = rowvals(A)
A_vals = nonzeros(A)
# Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = zeros(HYPRE_Int, nrows) # Number of colums for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
values = Vector{HYPRE_Complex}(undef, nnz) # The values
# First pass to count nnz per row
@inbounds for j in 1:size(A, 2)
for i in nzrange(A, j)
row = A_rows[i]
ncols[row] += 1
end
end
# Keep track of the last index used for every row
lastinds = zeros(Int, nrows)
cumsum!((@view lastinds[2:end]), (@view ncols[1:end-1]))
# Second pass to populate the output
@inbounds for j in 1:size(A, 2)
for i in nzrange(A, j)
row = A_rows[i]
k = lastinds[row] += 1
val = A_vals[i]
cols[k] = j
values[k] = val
end
end
return nrows, ncols, rows, cols, values
end
function Internals.to_hypre_data(A::SparseMatrixCSR, ilower, iupper)
Internals.check_n_rows(A, ilower, iupper)
nnz = SparseArrays.nnz(A)
A_cols = colvals(A)
A_vals = nonzeros(A)
# Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = Vector{HYPRE_Int}(undef, nrows) # Number of colums for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
values = Vector{HYPRE_Complex}(undef, nnz) # The values
# Loop over the rows and collect all values
k = 0
@inbounds for i in 1:size(A, 1)
nzr = nzrange(A, i)
ncols[i] = length(nzr)
for j in nzr
k += 1
col = A_cols[j]
val = A_vals[j]
cols[k] = col
values[k] = val
end
end
@assert nnz == k
return nrows, ncols, rows, cols, values
end
function HYPREMatrix(comm::MPI.Comm, B::Union{SparseMatrixCSC,SparseMatrixCSR}, ilower, iupper)
A = HYPREMatrix(comm, ilower, iupper)
nrows, ncols, rows, cols, values = Internals.to_hypre_data(B, ilower, iupper)
@check HYPRE_IJMatrixSetValues(A.ijmatrix, nrows, ncols, rows, cols, values)
Internals.assemble_matrix(A)
return A
end
HYPREMatrix(B::Union{SparseMatrixCSC,SparseMatrixCSR}, ilower=1, iupper=size(B, 1)) =
HYPREMatrix(MPI.COMM_SELF, B, ilower, iupper)
#########################
# Vector -> HYPREVector #
#########################
function Internals.to_hypre_data(x::Vector, ilower, iupper)
Internals.check_n_rows(x, ilower, iupper)
indices = collect(HYPRE_BigInt, ilower:iupper)
@ -262,13 +207,14 @@ end @@ -262,13 +207,14 @@ end
function HYPREVector(comm::MPI.Comm, x::Vector, ilower, iupper)
b = HYPREVector(comm, ilower, iupper)
nvalues, indices, values = Internals.to_hypre_data(x, ilower, iupper)
@check HYPRE_IJVectorSetValues(b.ijvector, nvalues, indices, values)
@check HYPRE_IJVectorSetValues(b, nvalues, indices, values)
Internals.assemble_vector(b)
return b
end
HYPREVector(x::Vector, ilower=1, iupper=length(x)) =
HYPREVector(MPI.COMM_SELF, x, ilower, iupper)
function HYPREVector(x::Vector, ilower = 1, iupper = length(x))
return HYPREVector(MPI.COMM_SELF, x, ilower, iupper)
end
# TODO: Other eltypes could be support by using a intermediate buffer
function Base.copy!(dst::Vector{HYPRE_Complex}, src::HYPREVector)
@ -278,7 +224,7 @@ function Base.copy!(dst::Vector{HYPRE_Complex}, src::HYPREVector) @@ -278,7 +224,7 @@ function Base.copy!(dst::Vector{HYPRE_Complex}, src::HYPREVector)
throw(ArgumentError("length of dst and src does not match"))
end
indices = collect(HYPRE_BigInt, ilower:iupper)
@check HYPRE_IJVectorGetValues(src.ijvector, nvalues, indices, dst)
@check HYPRE_IJVectorGetValues(src, nvalues, indices, dst)
return dst
end
@ -289,242 +235,187 @@ function Base.copy!(dst::HYPREVector, src::Vector{HYPRE_Complex}) @@ -289,242 +235,187 @@ function Base.copy!(dst::HYPREVector, src::Vector{HYPRE_Complex})
throw(ArgumentError("length of dst and src does not match"))
end
# Re-initialize the vector
@check HYPRE_IJVectorInitialize(dst.ijvector)
@check HYPRE_IJVectorInitialize(dst)
# Set all the values
indices = collect(HYPRE_BigInt, ilower:iupper)
@check HYPRE_IJVectorSetValues(dst.ijvector, nvalues, indices, src)
@check HYPRE_IJVectorSetValues(dst, nvalues, indices, src)
# TODO: It shouldn't be necessary to assemble here since we only set owned rows (?)
# @check HYPRE_IJVectorAssemble(dst.ijvector)
# @check HYPRE_IJVectorAssemble(dst)
# TODO: Necessary to recreate the ParVector? Running some examples it seems like it is
# not needed.
return dst
end
##################################################
# PartitionedArrays.PSparseMatrix -> HYPREMatrix #
##################################################
# TODO: This has some duplicated code with to_hypre_data(::SparseMatrixCSC, ilower, iupper)
function Internals.to_hypre_data(A::SparseMatrixCSC, r::IndexRange, c::IndexRange)
@assert r.oid_to_lid isa UnitRange && r.oid_to_lid.start == 1
ilower = r.lid_to_gid[r.oid_to_lid.start]
iupper = r.lid_to_gid[r.oid_to_lid.stop]
a_rows = rowvals(A)
a_vals = nonzeros(A)
# Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = zeros(HYPRE_Int, nrows) # Number of colums for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
# cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
# values = Vector{HYPRE_Complex}(undef, nnz) # The values
# First pass to count nnz per row (note that the fact that columns are permuted
# doesn't matter for this pass)
a_rows = rowvals(A)
a_vals = nonzeros(A)
@inbounds for j in 1:size(A, 2)
for i in nzrange(A, j)
row = a_rows[i]
row > r.oid_to_lid.stop && continue # Skip ghost rows
# grow = r.lid_to_gid[lrow]
ncols[row] += 1
end
end
# Initialize remaining buffers now that nnz is known
nnz = sum(ncols)
cols = Vector{HYPRE_BigInt}(undef, nnz)
values = Vector{HYPRE_Complex}(undef, nnz)
# Keep track of the last index used for every row
lastinds = zeros(Int, nrows)
cumsum!((@view lastinds[2:end]), (@view ncols[1:end-1]))
# Second pass to populate the output -- here we need to take care of the permutation
# of columns. TODO: Problem that they are not sorted?
@inbounds for j in 1:size(A, 2)
for i in nzrange(A, j)
row = a_rows[i]
row > r.oid_to_lid.stop && continue # Skip ghost rows
k = lastinds[row] += 1
val = a_vals[i]
cols[k] = c.lid_to_gid[j]
values[k] = val
end
####################
## HYPREAssembler ##
####################
struct HYPREMatrixAssembler
A::HYPREMatrix
ncols::Vector{HYPRE_Int}
rows::Vector{HYPRE_BigInt}
cols::Vector{HYPRE_BigInt}
values::Vector{HYPRE_Complex}
end
return nrows, ncols, rows, cols, values
struct HYPREVectorAssembler
b::HYPREVector
indices::Vector{HYPRE_BigInt}
values::Vector{HYPRE_Complex}
end
# TODO: Possibly this can be optimized if it is possible to pass overlong vectors to HYPRE.
# At least values should be possible to directly share, but cols needs to translated
# to global ids.
function Internals.to_hypre_data(A::SparseMatrixCSR, r::IndexRange, c::IndexRange)
@assert r.oid_to_lid isa UnitRange && r.oid_to_lid.start == 1
ilower = r.lid_to_gid[r.oid_to_lid.start]
iupper = r.lid_to_gid[r.oid_to_lid.stop]
a_cols = colvals(A)
a_vals = nonzeros(A)
nnz = getrowptr(A)[r.oid_to_lid.stop + 1] - 1
# Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = zeros(HYPRE_Int, nrows) # Number of colums for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
values = Vector{HYPRE_Complex}(undef, nnz) # The values
# Loop over the (owned) rows and collect all values
k = 0
@inbounds for i in r.oid_to_lid
nzr = nzrange(A, i)
ncols[i] = length(nzr)
for j in nzr
k += 1
col = a_cols[j]
val = a_vals[j]
cols[k] = c.lid_to_gid[col]
values[k] = val
end
end
@assert nnz == k
return nrows, ncols, rows, cols, values
struct HYPREAssembler
A::HYPREMatrixAssembler
b::HYPREVectorAssembler
end
function Internals.get_comm(A::Union{PSparseMatrix{<:Any,<:M}, PVector{<:Any,<:M}}) where M <: MPIData
return A.rows.partition.comm
"""
HYPRE.start_assemble!(A::HYPREMatrix) -> HYPREMatrixAssembler
HYPRE.start_assemble!(b::HYPREVector) -> HYPREVectorAssembler
HYPRE.start_assemble!(A::HYPREMatrix, b::HYPREVector) -> HYPREAssembler
Initialize a new assembly for matrix `A`, vector `b`, or for both. This zeroes out any
previous data in the arrays. Return a `HYPREAssembler` with allocated data buffers needed to
perform the assembly efficiently.
See also: [`HYPRE.assemble!`](@ref), [`HYPRE.finish_assemble!`](@ref).
"""
start_assemble!
function start_assemble!(A::HYPREMatrix)
if A.parmatrix != C_NULL
# This matrix have been assembled before, reset to 0
@check HYPRE_IJMatrixSetConstantValues(A, 0)
end
@check HYPRE_IJMatrixInitialize(A)
return HYPREMatrixAssembler(A, HYPRE_Int[], HYPRE_BigInt[], HYPRE_BigInt[], HYPRE_Complex[])
end
Internals.get_comm(_::Union{PSparseMatrix,PVector}) = MPI.COMM_SELF
function Internals.get_proc_rows(A::Union{PSparseMatrix{<:Any,<:M}, PVector{<:Any,<:M}}) where M <: MPIData
r = A.rows.partition.part
ilower::HYPRE_BigInt = r.lid_to_gid[r.oid_to_lid[1]]
iupper::HYPRE_BigInt = r.lid_to_gid[r.oid_to_lid[end]]
return ilower, iupper
function start_assemble!(b::HYPREVector)
if b.parvector != C_NULL
# This vector have been assembled before, reset to 0
# See https://github.com/hypre-space/hypre/pull/689
# @check HYPRE_IJVectorSetConstantValues(b, 0)
end
function Internals.get_proc_rows(A::Union{PSparseMatrix{<:Any,<:S}, PVector{<:Any,<:S}}) where S <: SequentialData
ilower::HYPRE_BigInt = typemax(HYPRE_BigInt)
iupper::HYPRE_BigInt = typemin(HYPRE_BigInt)
for r in A.rows.partition.parts
ilower = min(r.lid_to_gid[r.oid_to_lid[1]], ilower)
iupper = max(r.lid_to_gid[r.oid_to_lid[end]], iupper)
@check HYPRE_IJVectorInitialize(b)
if b.parvector != C_NULL
nvalues = HYPRE_Int(b.iupper - b.ilower + 1)
indices = collect(HYPRE_BigInt, b.ilower:b.iupper)
values = zeros(HYPRE_Complex, nvalues)
@check HYPRE_IJVectorSetValues(b, nvalues, indices, values)
# TODO: Do I need to assemble here?
end
return ilower, iupper
return HYPREVectorAssembler(b, HYPRE_BigInt[], HYPRE_Complex[])
end
function HYPREMatrix(B::PSparseMatrix)
# Use the same communicator as the matrix
comm = Internals.get_comm(B)
# Fetch rows owned by this process
ilower, iupper = Internals.get_proc_rows(B)
# Create the IJ matrix
A = HYPREMatrix(comm, ilower, iupper)
# Set all the values
map_parts(B.values, B.rows.partition, B.cols.partition) do Bv, Br, Bc
nrows, ncols, rows, cols, values = Internals.to_hypre_data(Bv, Br, Bc)
@check HYPRE_IJMatrixSetValues(A.ijmatrix, nrows, ncols, rows, cols, values)
return nothing
end
# Finalize
Internals.assemble_matrix(A)
return A
function start_assemble!(A::HYPREMatrix, b::HYPREVector)
return HYPREAssembler(start_assemble!(A), start_assemble!(b))
end
############################################
# PartitionedArrays.PVector -> HYPREVector #
############################################
"""
HYPRE.assemble!(A::HYPREMatrixAssembler, i, j, a::Matrix)
HYPRE.assemble!(A::HYPREVectorAssembler, i, b::Vector)
HYPRE.assemble!(A::HYPREAssembler, ij, a::Matrix, b::Vector)
Assemble (by adding) matrix contribution `a`, vector contribution `b`, into the underlying
array(s) of the assembler at global row indices `i` and column indices `j`.
function HYPREVector(v::PVector)
# Use the same communicator as the matrix
comm = Internals.get_comm(v)
# Fetch rows owned by this process
ilower, iupper = Internals.get_proc_rows(v)
# Create the IJ vector
b = HYPREVector(comm, ilower, iupper)
# Set all the values
map_parts(v.values, v.owned_values, v.rows.partition) do _, vo, vr
ilower_part = vr.lid_to_gid[vr.oid_to_lid.start]
iupper_part = vr.lid_to_gid[vr.oid_to_lid.stop]
# Option 1: Set all values
nvalues = HYPRE_Int(iupper_part - ilower_part + 1)
indices = collect(HYPRE_BigInt, ilower_part:iupper_part)
# TODO: Could probably just pass the full vector even if it is too long
# values = convert(Vector{HYPRE_Complex}, vv)
values = collect(HYPRE_Complex, vo)
# # Option 2: Set only non-zeros
# indices = HYPRE_BigInt[]
# values = HYPRE_Complex[]
# for (i, vi) in zip(ilower_part:iupper_part, vo)
# if !iszero(vi)
# push!(indices, i)
# push!(values, vi)
# end
# end
# nvalues = length(indices)
@check HYPRE_IJVectorSetValues(b.ijvector, nvalues, indices, values)
return nothing
This is roughly equivalent to:
```julia
# A.A::HYPREMatrix
A.A[i, j] += a
# A.b::HYPREVector
A.b[i] += b
```
See also: [`HYPRE.start_assemble!`](@ref), [`HYPRE.finish_assemble!`](@ref).
"""
assemble!
function assemble!(A::HYPREMatrixAssembler, i::Vector, j::Vector, a::Matrix)
nrows, ncols, rows, cols, values = Internals.to_hypre_data(A, a, i, j)
@check HYPRE_IJMatrixAddToValues(A.A, nrows, ncols, rows, cols, values)
return A
end
# Finalize
Internals.assemble_vector(b)
return b
@deprecate assemble!(A::HYPREMatrixAssembler, ij::Vector, a::Matrix) assemble!(A, ij, ij, a) false
function assemble!(A::HYPREVectorAssembler, ij::Vector, a::Vector)
nvalues, indices, values = Internals.to_hypre_data(A, a, ij)
@check HYPRE_IJVectorAddToValues(A.b, nvalues, indices, values)
return A
end
function Internals.copy_check(dst::HYPREVector, src::PVector)
il_dst, iu_dst = Internals.get_proc_rows(dst)
il_src, iu_src = Internals.get_proc_rows(src)
if il_dst != il_src && iu_dst != iu_src
# TODO: Why require this?
throw(ArgumentError(
"row owner mismatch between dst ($(il_dst:iu_dst)) and src ($(il_dst:iu_dst))"
))
function assemble!(A::HYPREAssembler, ij::Vector, a::Matrix, b::Vector)
assemble!(A.A, ij, ij, a)
assemble!(A.b, ij, b)
return A
end
function Internals.to_hypre_data(A::HYPREMatrixAssembler, a::Matrix, I::Vector, J::Vector)
size(a, 1) == length(I) || error("mismatching number of rows")
size(a, 2) == length(J) || error("mismatching number of cols")
nrows = HYPRE_Int(length(I))
# Resize cache vectors
ncols = resize!(A.ncols, nrows)
rows = resize!(A.rows, nrows)
cols = resize!(A.cols, length(a))
values = resize!(A.values, length(a))
# Fill vectors
ncols = fill!(ncols, HYPRE_Int(length(J)))
copyto!(rows, I)
idx = 0
for i in 1:length(I), j in 1:length(J)
idx += 1
cols[idx] = J[j]
values[idx] = a[i, j]
end
@assert idx == length(a)
@assert nrows == length(ncols) == length(rows)
return nrows, ncols, rows, cols, values
end
# TODO: Other eltypes could be support by using a intermediate buffer
function Base.copy!(dst::PVector{HYPRE_Complex}, src::HYPREVector)
Internals.copy_check(src, dst)
map_parts(dst.values, dst.owned_values, dst.rows.partition) do vv, _, vr
il_src_part = vr.lid_to_gid[vr.oid_to_lid.start]
iu_src_part = vr.lid_to_gid[vr.oid_to_lid.stop]
nvalues = HYPRE_Int(iu_src_part - il_src_part + 1)
indices = collect(HYPRE_BigInt, il_src_part:iu_src_part)
# Assumption: the dst vector is assembled, and should thus have 0s on the ghost
# entries (??). If this is not true, we must call fill!(vv, 0) here. This should be
# fairly cheap anyway, so might as well do it...
fill!(vv, 0)
# TODO: Safe to use vv here? Owned values are always first?
@check HYPRE_IJVectorGetValues(src.ijvector, nvalues, indices, vv)
function Internals.to_hypre_data(A::HYPREVectorAssembler, b::Vector, I::Vector)
length(b) == length(I) || error("mismatching number of entries")
nvalues = HYPRE_Int(length(I))
# Resize cache vectors
indices = resize!(A.indices, nvalues)
values = resize!(A.values, nvalues)
# Fill vectors
copyto!(indices, I)
copyto!(values, b)
return nvalues, indices, values
end
return dst
"""
HYPRE.finish_assemble!(A::HYPREMatrixAssembler)
HYPRE.finish_assemble!(A::HYPREVectorAssembler)
HYPRE.finish_assemble!(A::HYPREAssembler)
Finish the assembly. This synchronizes the data between processors.
"""
finish_assemble!
function finish_assemble!(A::HYPREMatrixAssembler)
Internals.assemble_matrix(A.A)
return A.A
end
function Base.copy!(dst::HYPREVector, src::PVector{HYPRE_Complex})
Internals.copy_check(dst, src)
# Re-initialize the vector
@check HYPRE_IJVectorInitialize(dst.ijvector)
map_parts(src.values, src.owned_values, src.rows.partition) do vv, _, vr
ilower_src_part = vr.lid_to_gid[vr.oid_to_lid.start]
iupper_src_part = vr.lid_to_gid[vr.oid_to_lid.stop]
nvalues = HYPRE_Int(iupper_src_part - ilower_src_part + 1)
indices = collect(HYPRE_BigInt, ilower_src_part:iupper_src_part)
# TODO: Safe to use vv here? Owned values are always first?
@check HYPRE_IJVectorSetValues(dst.ijvector, nvalues, indices, vv)
function finish_assemble!(A::HYPREVectorAssembler)
Internals.assemble_vector(A.b)
return A.b
end
# TODO: It shouldn't be necessary to assemble here since we only set owned rows (?)
# @check HYPRE_IJVectorAssemble(dst.ijvector)
# TODO: Necessary to recreate the ParVector? Running some examples it seems like it is
# not needed.
return dst
function finish_assemble!(A::HYPREAssembler)
return finish_assemble!(A.A), finish_assemble!(A.b)
end
# Solver interface
######################
## Solver interface ##
######################
include("solvers.jl")
include("solver_options.jl")

2
src/Internals.jl

@ -16,4 +16,6 @@ function setup_func end @@ -16,4 +16,6 @@ function setup_func end
function solve_func end
function to_hypre_data end
const HYPRE_OBJECTS = WeakKeyDict{Any, Nothing}()
end # module Internals

1
src/LibHYPRE.jl

@ -92,6 +92,7 @@ function __init__() @@ -92,6 +92,7 @@ function __init__()
patch_ref = Ref{HYPRE_Int}(-1)
@check HYPRE_VersionNumber(major_ref, minor_ref, patch_ref, C_NULL)
global VERSION = VersionNumber(major_ref[], minor_ref[], patch_ref[])
return
end
end

2
src/precs.jl

@ -28,7 +28,7 @@ function construct_boomeramg_prec_builder(settings_fun!; kwargs...) @@ -28,7 +28,7 @@ function construct_boomeramg_prec_builder(settings_fun!; kwargs...)
return BoomerAMGPrecBuilder(settings_fun!, kwargs)
end
function (b::BoomerAMGPrecBuilder)(A::AbstractSparseMatrixCSC, p)
function (b::BoomerAMGPrecBuilder)(A, p)
amg = HYPRE.BoomerAMG(; b.kwargs)
settings_fun!(amg, A, p)
return (BoomerAMGPrecWrapper(amg, A), I)

42
src/solver_options.jl

@ -4,8 +4,7 @@ @@ -4,8 +4,7 @@
Internals.set_options(::HYPRESolver, kwargs) = nothing
function Internals.set_options(s::BiCGSTAB, kwargs)
solver = s.solver
function Internals.set_options(solver::BiCGSTAB, kwargs)
for (k, v) in kwargs
if k === :ConvergenceFactorTol
@check HYPRE_BiCGSTABSetConvergenceFactorTol(solver, v)
@ -19,7 +18,7 @@ function Internals.set_options(s::BiCGSTAB, kwargs) @@ -19,7 +18,7 @@ function Internals.set_options(s::BiCGSTAB, kwargs)
@check HYPRE_ParCSRBiCGSTABSetMinIter(solver, v)
elseif k === :Precond
Internals.set_precond_defaults(v)
Internals.set_precond(s, v)
Internals.set_precond(solver, v)
elseif k === :PrintLevel
@check HYPRE_ParCSRBiCGSTABSetPrintLevel(solver, v)
elseif k === :StopCrit
@ -30,10 +29,10 @@ function Internals.set_options(s::BiCGSTAB, kwargs) @@ -30,10 +29,10 @@ function Internals.set_options(s::BiCGSTAB, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.BiCGSTAB"))
end
end
return
end
function Internals.set_options(s::BoomerAMG, kwargs)
solver = s.solver
function Internals.set_options(solver::BoomerAMG, kwargs)
for (k, v) in kwargs
if k === :ADropTol
@check HYPRE_BoomerAMGSetADropTol(solver, v)
@ -287,10 +286,10 @@ function Internals.set_options(s::BoomerAMG, kwargs) @@ -287,10 +286,10 @@ function Internals.set_options(s::BoomerAMG, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.BoomerAMG"))
end
end
return
end
function Internals.set_options(s::FlexGMRES, kwargs)
solver = s.solver
function Internals.set_options(solver::FlexGMRES, kwargs)
for (k, v) in kwargs
if k === :ConvergenceFactorTol
@check HYPRE_FlexGMRESSetConvergenceFactorTol(solver, v)
@ -308,7 +307,7 @@ function Internals.set_options(s::FlexGMRES, kwargs) @@ -308,7 +307,7 @@ function Internals.set_options(s::FlexGMRES, kwargs)
@check HYPRE_ParCSRFlexGMRESSetModifyPC(solver, v)
elseif k === :Precond
Internals.set_precond_defaults(v)
Internals.set_precond(s, v)
Internals.set_precond(solver, v)
elseif k === :PrintLevel
@check HYPRE_ParCSRFlexGMRESSetPrintLevel(solver, v)
elseif k === :Tol
@ -317,10 +316,10 @@ function Internals.set_options(s::FlexGMRES, kwargs) @@ -317,10 +316,10 @@ function Internals.set_options(s::FlexGMRES, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.FlexGMRES"))
end
end
return
end
function Internals.set_options(s::GMRES, kwargs)
solver = s.solver
function Internals.set_options(solver::GMRES, kwargs)
for (k, v) in kwargs
if k === :ConvergenceFactorTol
@check HYPRE_GMRESSetConvergenceFactorTol(solver, v)
@ -340,7 +339,7 @@ function Internals.set_options(s::GMRES, kwargs) @@ -340,7 +339,7 @@ function Internals.set_options(s::GMRES, kwargs)
@check HYPRE_ParCSRGMRESSetMinIter(solver, v)
elseif k === :Precond
Internals.set_precond_defaults(v)
Internals.set_precond(s, v)
Internals.set_precond(solver, v)
elseif k === :PrintLevel
@check HYPRE_ParCSRGMRESSetPrintLevel(solver, v)
elseif k === :StopCrit
@ -351,10 +350,10 @@ function Internals.set_options(s::GMRES, kwargs) @@ -351,10 +350,10 @@ function Internals.set_options(s::GMRES, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.GMRES"))
end
end
return
end
function Internals.set_options(s::Hybrid, kwargs)
solver = s.solver
function Internals.set_options(solver::Hybrid, kwargs)
for (k, v) in kwargs
if k === :AbsoluteTol
@check HYPRE_ParCSRHybridSetAbsoluteTol(solver, v)
@ -424,7 +423,7 @@ function Internals.set_options(s::Hybrid, kwargs) @@ -424,7 +423,7 @@ function Internals.set_options(s::Hybrid, kwargs)
@check HYPRE_ParCSRHybridSetPMaxElmts(solver, v)
elseif k === :Precond
Internals.set_precond_defaults(v)
Internals.set_precond(s, v)
Internals.set_precond(solver, v)
elseif k === :PrintLevel
@check HYPRE_ParCSRHybridSetPrintLevel(solver, v)
elseif k === :RecomputeResidual
@ -461,10 +460,10 @@ function Internals.set_options(s::Hybrid, kwargs) @@ -461,10 +460,10 @@ function Internals.set_options(s::Hybrid, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.Hybrid"))
end
end
return
end
function Internals.set_options(s::ILU, kwargs)
solver = s.solver
function Internals.set_options(solver::ILU, kwargs)
for (k, v) in kwargs
if k === :DropThreshold
@check HYPRE_ILUSetDropThreshold(solver, v)
@ -496,10 +495,10 @@ function Internals.set_options(s::ILU, kwargs) @@ -496,10 +495,10 @@ function Internals.set_options(s::ILU, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.ILU"))
end
end
return
end
function Internals.set_options(s::ParaSails, kwargs)
solver = s.solver
function Internals.set_options(solver::ParaSails, kwargs)
for (k, v) in kwargs
if k === :Filter
@check HYPRE_ParCSRParaSailsSetFilter(solver, v)
@ -517,10 +516,10 @@ function Internals.set_options(s::ParaSails, kwargs) @@ -517,10 +516,10 @@ function Internals.set_options(s::ParaSails, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.ParaSails"))
end
end
return
end
function Internals.set_options(s::PCG, kwargs)
solver = s.solver
function Internals.set_options(solver::PCG, kwargs)
for (k, v) in kwargs
if k === :AbsoluteTolFactor
@check HYPRE_PCGSetAbsoluteTolFactor(solver, v)
@ -540,7 +539,7 @@ function Internals.set_options(s::PCG, kwargs) @@ -540,7 +539,7 @@ function Internals.set_options(s::PCG, kwargs)
@check HYPRE_ParCSRPCGSetMaxIter(solver, v)
elseif k === :Precond
Internals.set_precond_defaults(v)
Internals.set_precond(s, v)
Internals.set_precond(solver, v)
elseif k === :PrintLevel
@check HYPRE_ParCSRPCGSetPrintLevel(solver, v)
elseif k === :RelChange
@ -555,4 +554,5 @@ function Internals.set_options(s::PCG, kwargs) @@ -555,4 +554,5 @@ function Internals.set_options(s::PCG, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.PCG"))
end
end
return
end

199
src/solvers.jl

@ -7,16 +7,23 @@ Abstract super type of all the wrapped HYPRE solvers. @@ -7,16 +7,23 @@ Abstract super type of all the wrapped HYPRE solvers.
"""
abstract type HYPRESolver end
function Internals.safe_finalizer(Destroy)
# Only calls the Destroy if pointer not C_NULL
return function(solver)
if solver.solver != C_NULL
Destroy(solver.solver)
solver.solver = C_NULL
function Internals.safe_finalizer(Destroy, solver)
# Add the solver to object tracker for possible atexit finalizing
push!(Internals.HYPRE_OBJECTS, solver => nothing)
# Add a finalizer that only calls Destroy if pointer not C_NULL
finalizer(solver) do s
if s.solver != C_NULL
Destroy(s)
s.solver = C_NULL
end
end
return
end
# Defining unsafe_convert enables ccall to automatically convert solver::HYPRESolver to
# HYPRE_Solver while also making sure solver won't be GC'd and finalized.
Base.unsafe_convert(::Type{HYPRE_Solver}, solver::HYPRESolver) = solver.solver
# Fallback for the solvers that doesn't have required defaults
Internals.set_precond_defaults(::HYPRESolver) = nothing
@ -45,43 +52,6 @@ See also [`solve`](@ref). @@ -45,43 +52,6 @@ See also [`solve`](@ref).
solve!(pcg::HYPRESolver, x::HYPREVector, A::HYPREMatrix, ::HYPREVector)
######################################
# PartitionedArrays solver interface #
######################################
# TODO: Would it be useful with a method that copied the solution to b instead?
function solve(solver::HYPRESolver, A::PSparseMatrix, b::PVector)
hypre_x = solve(solver, HYPREMatrix(A), HYPREVector(b))
x = copy!(similar(b, HYPRE_Complex), hypre_x)
return x
end
function solve!(solver::HYPRESolver, x::PVector, A::PSparseMatrix, b::PVector)
hypre_x = HYPREVector(x)
solve!(solver, hypre_x, HYPREMatrix(A), HYPREVector(b))
copy!(x, hypre_x)
return x
end
########################################
# SparseMatrixCS(C|R) solver interface #
########################################
# TODO: This could use the HYPRE compile flag for sequential mode to avoid MPI overhead
function solve(solver::HYPRESolver, A::Union{SparseMatrixCSC,SparseMatrixCSR}, b::Vector)
hypre_x = solve(solver, HYPREMatrix(A), HYPREVector(b))
x = copy!(similar(b, HYPRE_Complex), hypre_x)
return x
end
function solve!(solver::HYPRESolver, x::Vector, A::Union{SparseMatrixCSC,SparseMatrixCSR}, b::Vector)
hypre_x = HYPREVector(x)
solve!(solver, hypre_x, HYPREMatrix(A), HYPREVector(b))
copy!(x, hypre_x)
return x
end
#####################################
## Concrete solver implementations ##
#####################################
@ -102,14 +72,15 @@ Create a `BiCGSTAB` solver. See HYPRE API reference for details and supported se @@ -102,14 +72,15 @@ Create a `BiCGSTAB` solver. See HYPRE API reference for details and supported se
mutable struct BiCGSTAB <: HYPRESolver
comm::MPI.Comm
solver::HYPRE_Solver
precond::Union{HYPRESolver, Nothing}
function BiCGSTAB(comm::MPI.Comm = MPI.COMM_NULL; kwargs...)
# comm defaults to COMM_NULL since it is unused in HYPRE_ParCSRBiCGSTABCreate
solver = new(comm, C_NULL)
solver = new(comm, C_NULL, nothing)
solver_ref = Ref{HYPRE_Solver}(C_NULL)
@check HYPRE_ParCSRBiCGSTABCreate(comm, solver_ref)
solver.solver = solver_ref[]
# Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_ParCSRBiCGSTABDestroy), solver)
Internals.safe_finalizer(HYPRE_ParCSRBiCGSTABDestroy, solver)
# Set the options
Internals.set_options(solver, kwargs)
return solver
@ -119,8 +90,8 @@ end @@ -119,8 +90,8 @@ end
const ParCSRBiCGSTAB = BiCGSTAB
function solve!(bicg::BiCGSTAB, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
@check HYPRE_ParCSRBiCGSTABSetup(bicg.solver, A.parmatrix, b.parvector, x.parvector)
@check HYPRE_ParCSRBiCGSTABSolve(bicg.solver, A.parmatrix, b.parvector, x.parvector)
@check HYPRE_ParCSRBiCGSTABSetup(bicg, A, b, x)
@check HYPRE_ParCSRBiCGSTABSolve(bicg, A, b, x)
return x
end
@ -128,9 +99,10 @@ Internals.setup_func(::BiCGSTAB) = HYPRE_ParCSRBiCGSTABSetup @@ -128,9 +99,10 @@ Internals.setup_func(::BiCGSTAB) = HYPRE_ParCSRBiCGSTABSetup
Internals.solve_func(::BiCGSTAB) = HYPRE_ParCSRBiCGSTABSolve
function Internals.set_precond(bicg::BiCGSTAB, p::HYPRESolver)
bicg.precond = p
solve_f = Internals.solve_func(p)
setup_f = Internals.setup_func(p)
@check HYPRE_ParCSRBiCGSTABSetPrecond(bicg.solver, solve_f, setup_f, p.solver)
@check HYPRE_ParCSRBiCGSTABSetPrecond(bicg, solve_f, setup_f, p)
return nothing
end
@ -157,7 +129,7 @@ mutable struct BoomerAMG <: HYPRESolver @@ -157,7 +129,7 @@ mutable struct BoomerAMG <: HYPRESolver
@check HYPRE_BoomerAMGCreate(solver_ref)
solver.solver = solver_ref[]
# Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_BoomerAMGDestroy), solver)
Internals.safe_finalizer(HYPRE_BoomerAMGDestroy, solver)
# Set the options
Internals.set_options(solver, kwargs)
return solver
@ -165,8 +137,8 @@ mutable struct BoomerAMG <: HYPRESolver @@ -165,8 +137,8 @@ mutable struct BoomerAMG <: HYPRESolver
end
function solve!(amg::BoomerAMG, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
@check HYPRE_BoomerAMGSetup(amg.solver, A.parmatrix, b.parvector, x.parvector)
@check HYPRE_BoomerAMGSolve(amg.solver, A.parmatrix, b.parvector, x.parvector)
@check HYPRE_BoomerAMGSetup(amg, A, b, x)
@check HYPRE_BoomerAMGSolve(amg, A, b, x)
return x
end
@ -195,14 +167,15 @@ Create a `FlexGMRES` solver. See HYPRE API reference for details and supported s @@ -195,14 +167,15 @@ Create a `FlexGMRES` solver. See HYPRE API reference for details and supported s
mutable struct FlexGMRES <: HYPRESolver
comm::MPI.Comm
solver::HYPRE_Solver
precond::Union{HYPRESolver, Nothing}
function FlexGMRES(comm::MPI.Comm = MPI.COMM_NULL; kwargs...)
# comm defaults to COMM_NULL since it is unused in HYPRE_ParCSRFlexGMRESCreate
solver = new(comm, C_NULL)
solver = new(comm, C_NULL, nothing)
solver_ref = Ref{HYPRE_Solver}(C_NULL)
@check HYPRE_ParCSRFlexGMRESCreate(comm, solver_ref)
solver.solver = solver_ref[]
# Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_ParCSRFlexGMRESDestroy), solver)
Internals.safe_finalizer(HYPRE_ParCSRFlexGMRESDestroy, solver)
# Set the options
Internals.set_options(solver, kwargs)
return solver
@ -210,8 +183,8 @@ mutable struct FlexGMRES <: HYPRESolver @@ -210,8 +183,8 @@ mutable struct FlexGMRES <: HYPRESolver
end
function solve!(flex::FlexGMRES, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
@check HYPRE_ParCSRFlexGMRESSetup(flex.solver, A.parmatrix, b.parvector, x.parvector)
@check HYPRE_ParCSRFlexGMRESSolve(flex.solver, A.parmatrix, b.parvector, x.parvector)
@check HYPRE_ParCSRFlexGMRESSetup(flex, A, b, x)
@check HYPRE_ParCSRFlexGMRESSolve(flex, A, b, x)
return x
end
@ -219,9 +192,10 @@ Internals.setup_func(::FlexGMRES) = HYPRE_ParCSRFlexGMRESSetup @@ -219,9 +192,10 @@ Internals.setup_func(::FlexGMRES) = HYPRE_ParCSRFlexGMRESSetup
Internals.solve_func(::FlexGMRES) = HYPRE_ParCSRFlexGMRESSolve
function Internals.set_precond(flex::FlexGMRES, p::HYPRESolver)
flex.precond = p
solve_f = Internals.solve_func(p)
setup_f = Internals.setup_func(p)
@check HYPRE_ParCSRFlexGMRESSetPrecond(flex.solver, solve_f, setup_f, p.solver)
@check HYPRE_ParCSRFlexGMRESSetPrecond(flex, solve_f, setup_f, p)
return nothing
end
@ -248,8 +222,8 @@ end @@ -248,8 +222,8 @@ end
#end
#function solve!(fsai::FSAI, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
# @check HYPRE_FSAISetup(fsai.solver, A.parmatrix, b.parvector, x.parvector)
# @check HYPRE_FSAISolve(fsai.solver, A.parmatrix, b.parvector, x.parvector)
# @check HYPRE_FSAISetup(fsai, A, b, x)
# @check HYPRE_FSAISolve(fsai, A, b, x)
# return x
#end
@ -278,14 +252,15 @@ Create a `GMRES` solver. See HYPRE API reference for details and supported setti @@ -278,14 +252,15 @@ Create a `GMRES` solver. See HYPRE API reference for details and supported setti
mutable struct GMRES <: HYPRESolver
comm::MPI.Comm
solver::HYPRE_Solver
precond::Union{HYPRESolver, Nothing}
function GMRES(comm::MPI.Comm = MPI.COMM_NULL; kwargs...)
# comm defaults to COMM_NULL since it is unused in HYPRE_ParCSRGMRESCreate
solver = new(comm, C_NULL)
solver = new(comm, C_NULL, nothing)
solver_ref = Ref{HYPRE_Solver}(C_NULL)
@check HYPRE_ParCSRGMRESCreate(comm, solver_ref)
solver.solver = solver_ref[]
# Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_ParCSRGMRESDestroy), solver)
Internals.safe_finalizer(HYPRE_ParCSRGMRESDestroy, solver)
# Set the options
Internals.set_options(solver, kwargs)
return solver
@ -293,8 +268,8 @@ mutable struct GMRES <: HYPRESolver @@ -293,8 +268,8 @@ mutable struct GMRES <: HYPRESolver
end
function solve!(gmres::GMRES, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
@check HYPRE_ParCSRGMRESSetup(gmres.solver, A.parmatrix, b.parvector, x.parvector)
@check HYPRE_ParCSRGMRESSolve(gmres.solver, A.parmatrix, b.parvector, x.parvector)
@check HYPRE_ParCSRGMRESSetup(gmres, A, b, x)
@check HYPRE_ParCSRGMRESSolve(gmres, A, b, x)
return x
end
@ -302,9 +277,10 @@ Internals.setup_func(::GMRES) = HYPRE_ParCSRGMRESSetup @@ -302,9 +277,10 @@ Internals.setup_func(::GMRES) = HYPRE_ParCSRGMRESSetup
Internals.solve_func(::GMRES) = HYPRE_ParCSRGMRESSolve
function Internals.set_precond(gmres::GMRES, p::HYPRESolver)
gmres.precond = p
solve_f = Internals.solve_func(p)
setup_f = Internals.setup_func(p)
@check HYPRE_ParCSRGMRESSetPrecond(gmres.solver, solve_f, setup_f, p.solver)
@check HYPRE_ParCSRGMRESSetPrecond(gmres, solve_f, setup_f, p)
return nothing
end
@ -324,13 +300,14 @@ Create a `Hybrid` solver. See HYPRE API reference for details and supported sett @@ -324,13 +300,14 @@ Create a `Hybrid` solver. See HYPRE API reference for details and supported sett
"""
mutable struct Hybrid <: HYPRESolver
solver::HYPRE_Solver
precond::Union{HYPRESolver, Nothing}
function Hybrid(; kwargs...)
solver = new(C_NULL)
solver = new(C_NULL, nothing)
solver_ref = Ref{HYPRE_Solver}(C_NULL)
@check HYPRE_ParCSRHybridCreate(solver_ref)
solver.solver = solver_ref[]
# Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_ParCSRHybridDestroy), solver)
Internals.safe_finalizer(HYPRE_ParCSRHybridDestroy, solver)
# Set the options
Internals.set_options(solver, kwargs)
return solver
@ -338,8 +315,8 @@ mutable struct Hybrid <: HYPRESolver @@ -338,8 +315,8 @@ mutable struct Hybrid <: HYPRESolver
end
function solve!(hybrid::Hybrid, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
@check HYPRE_ParCSRHybridSetup(hybrid.solver, A.parmatrix, b.parvector, x.parvector)
@check HYPRE_ParCSRHybridSolve(hybrid.solver, A.parmatrix, b.parvector, x.parvector)
@check HYPRE_ParCSRHybridSetup(hybrid, A, b, x)
@check HYPRE_ParCSRHybridSolve(hybrid, A, b, x)
return x
end
@ -347,12 +324,13 @@ Internals.setup_func(::Hybrid) = HYPRE_ParCSRHybridSetup @@ -347,12 +324,13 @@ Internals.setup_func(::Hybrid) = HYPRE_ParCSRHybridSetup
Internals.solve_func(::Hybrid) = HYPRE_ParCSRHybridSolve
function Internals.set_precond(hybrid::Hybrid, p::HYPRESolver)
hybrid.precond = p
solve_f = Internals.solve_func(p)
setup_f = Internals.setup_func(p)
# Deactivate the finalizer of p since the HYBRIDDestroy function does this,
# see https://github.com/hypre-space/hypre/issues/699
finalizer(x -> (x.solver = C_NULL), p)
@check HYPRE_ParCSRHybridSetPrecond(hybrid.solver, solve_f, setup_f, p.solver)
@check HYPRE_ParCSRHybridSetPrecond(hybrid, solve_f, setup_f, p)
return nothing
end
@ -379,7 +357,7 @@ mutable struct ILU <: HYPRESolver @@ -379,7 +357,7 @@ mutable struct ILU <: HYPRESolver
@check HYPRE_ILUCreate(solver_ref)
solver.solver = solver_ref[]
# Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_ILUDestroy), solver)
Internals.safe_finalizer(HYPRE_ILUDestroy, solver)
# Set the options
Internals.set_options(solver, kwargs)
return solver
@ -387,8 +365,8 @@ mutable struct ILU <: HYPRESolver @@ -387,8 +365,8 @@ mutable struct ILU <: HYPRESolver
end
function solve!(ilu::ILU, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
@check HYPRE_ILUSetup(ilu.solver, A.parmatrix, b.parvector, x.parvector)
@check HYPRE_ILUSolve(ilu.solver, A.parmatrix, b.parvector, x.parvector)
@check HYPRE_ILUSetup(ilu, A, b, x)
@check HYPRE_ILUSolve(ilu, A, b, x)
return x
end
@ -426,7 +404,7 @@ mutable struct ParaSails <: HYPRESolver @@ -426,7 +404,7 @@ mutable struct ParaSails <: HYPRESolver
@check HYPRE_ParCSRParaSailsCreate(comm, solver_ref)
solver.solver = solver_ref[]
# Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_ParCSRParaSailsDestroy), solver)
Internals.safe_finalizer(HYPRE_ParCSRParaSailsDestroy, solver)
# Set the options
Internals.set_options(solver, kwargs)
return solver
@ -454,14 +432,15 @@ Create a `PCG` solver. See HYPRE API reference for details and supported setting @@ -454,14 +432,15 @@ Create a `PCG` solver. See HYPRE API reference for details and supported setting
mutable struct PCG <: HYPRESolver
comm::MPI.Comm
solver::HYPRE_Solver
precond::Union{HYPRESolver, Nothing}
function PCG(comm::MPI.Comm = MPI.COMM_NULL; kwargs...)
# comm defaults to COMM_NULL since it is unused in HYPRE_ParCSRPCGCreate
solver = new(comm, C_NULL)
solver = new(comm, C_NULL, nothing)
solver_ref = Ref{HYPRE_Solver}(C_NULL)
@check HYPRE_ParCSRPCGCreate(comm, solver_ref)
solver.solver = solver_ref[]
# Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_ParCSRPCGDestroy), solver)
Internals.safe_finalizer(HYPRE_ParCSRPCGDestroy, solver)
# Set the options
Internals.set_options(solver, kwargs)
return solver
@ -471,8 +450,8 @@ end @@ -471,8 +450,8 @@ end
const ParCSRPCG = PCG
function solve!(pcg::PCG, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
@check HYPRE_ParCSRPCGSetup(pcg.solver, A.parmatrix, b.parvector, x.parvector)
@check HYPRE_ParCSRPCGSolve(pcg.solver, A.parmatrix, b.parvector, x.parvector)
@check HYPRE_ParCSRPCGSetup(pcg, A, b, x)
@check HYPRE_ParCSRPCGSolve(pcg, A, b, x)
return x
end
@ -480,8 +459,74 @@ Internals.setup_func(::PCG) = HYPRE_ParCSRPCGSetup @@ -480,8 +459,74 @@ Internals.setup_func(::PCG) = HYPRE_ParCSRPCGSetup
Internals.solve_func(::PCG) = HYPRE_ParCSRPCGSolve
function Internals.set_precond(pcg::PCG, p::HYPRESolver)
pcg.precond = p
solve_f = Internals.solve_func(p)
setup_f = Internals.setup_func(p)
@check HYPRE_ParCSRPCGSetPrecond(pcg.solver, solve_f, setup_f, p.solver)
@check HYPRE_ParCSRPCGSetPrecond(pcg, solve_f, setup_f, p)
return nothing
end
##########################################################
# Extracting information about the solution from solvers #
##########################################################
"""
HYPRE.GetFinalRelativeResidualNorm(s::HYPRESolver)
Return the final relative residual norm from the last solve with solver `s`.
This function dispatches on the solver to the corresponding C API wrapper
`LibHYPRE.HYPRE_\$(Solver)GetFinalRelativeResidualNorm`.
"""
function GetFinalRelativeResidualNorm(s::HYPRESolver)
r = Ref{HYPRE_Real}()
if s isa BiCGSTAB
@check HYPRE_ParCSRBiCGSTABGetFinalRelativeResidualNorm(s, r)
elseif s isa BoomerAMG
@check HYPRE_BoomerAMGGetFinalRelativeResidualNorm(s, r)
elseif s isa FlexGMRES
@check HYPRE_ParCSRFlexGMRESGetFinalRelativeResidualNorm(s, r)
elseif s isa GMRES
@check HYPRE_ParCSRGMRESGetFinalRelativeResidualNorm(s, r)
elseif s isa Hybrid
@check HYPRE_ParCSRHybridGetFinalRelativeResidualNorm(s, r)
elseif s isa ILU
@check HYPRE_ILUGetFinalRelativeResidualNorm(s, r)
elseif s isa PCG
@check HYPRE_ParCSRPCGGetFinalRelativeResidualNorm(s, r)
else
throw(ArgumentError("cannot get residual norm for $(typeof(s))"))
end
return r[]
end
"""
HYPRE.GetNumIterations(s::HYPRESolver)
Return number of iterations during the last solve with solver `s`.
This function dispatches on the solver to the corresponding C API wrapper
`LibHYPRE.HYPRE_\$(Solver)GetNumIterations`.
"""
function GetNumIterations(s::HYPRESolver)
r = Ref{HYPRE_Int}()
if s isa BiCGSTAB
@check HYPRE_ParCSRBiCGSTABGetNumIterations(s, r)
elseif s isa BoomerAMG
@check HYPRE_BoomerAMGGetNumIterations(s, r)
elseif s isa FlexGMRES
@check HYPRE_ParCSRFlexGMRESGetNumIterations(s, r)
elseif s isa GMRES
@check HYPRE_ParCSRGMRESGetNumIterations(s, r)
elseif s isa Hybrid
@check HYPRE_ParCSRHybridGetNumIterations(s, r)
elseif s isa ILU
@check HYPRE_ILUGetNumIterations(s, r)
elseif s isa PCG
@check HYPRE_ParCSRPCGGetNumIterations(s, r)
else
throw(ArgumentError("cannot get number of iterations for $(typeof(s))"))
end
return r[]
end

379
test/runtests.jl

@ -11,6 +11,8 @@ using SparseMatricesCSR @@ -11,6 +11,8 @@ using SparseMatricesCSR
using Test
using LinearSolve
include("test_utils.jl")
# Init HYPRE and MPI
HYPRE.Init()
@ -52,11 +54,10 @@ end @@ -52,11 +54,10 @@ end
@testset "HYPREMatrix(::SparseMatrixCS(C|R))" begin
ilower, iupper = 4, 6
CSC = convert(SparseMatrixCSC{HYPRE_Complex, HYPRE_Int}, sparse([
1 2 0 0 3
0 4 0 5 0
0 6 7 0 8
]))
CSC = convert(
SparseMatrixCSC{HYPRE_Complex, HYPRE_Int},
sparse([1 2 0 0 3; 0 4 0 5 0; 0 6 7 0 8])
)
CSR = sparsecsr(findnz(CSC)..., size(CSC)...)
@test CSC == CSR
csc = Internals.to_hypre_data(CSC, ilower, iupper)
@ -112,105 +113,98 @@ end @@ -112,105 +113,98 @@ end
@test H.iupper == H.jupper == 10
end
function tomain(x)
g = gather(copy(x))
be = get_backend(g.values)
if be isa SequentialBackend
return g.values.parts[1]
else # if be isa MPIBackend
return g.values.part
function distribute_as_parray(parts, backend)
if backend == :debug
parts = DebugArray(parts)
elseif backend == :mpi
parts = distribute_with_mpi(parts)
else
@assert backend == :native
parts = collect(parts)
end
return parts
end
@testset "HYPREMatrix(::PSparseMatrix)" begin
# Sequential backend
function diag_data(backend, parts)
is_seq = backend isa SequentialBackend
rows = PRange(parts, 10)
cols = PRange(parts, 10)
I, J, V = map_parts(parts) do p
function diag_data(parts)
rows = uniform_partition(parts, 10)
cols = uniform_partition(parts, 10)
np = length(parts)
IJV = map(parts) do p
i = Int[]
j = Int[]
v = Float64[]
if (is_seq && p == 1) || !is_seq
if np == 1
# MPI case is special, we only have one MPI process.
@assert p == 1
append!(i, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
append!(j, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
append!(v, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
elseif p == 1
@assert np == 2
append!(i, [1, 2, 3, 4, 5, 6])
append!(j, [1, 2, 3, 4, 5, 6])
append!(v, [1, 2, 3, 4, 5, 6])
end
if (is_seq && p == 2) || !is_seq
else
@assert np == 2
@assert p == 2
append!(i, [4, 5, 6, 7, 8, 9, 10])
append!(j, [4, 5, 6, 7, 8, 9, 10])
append!(v, [4, 5, 6, 7, 8, 9, 10])
end
return i, j, v
end
add_gids!(rows, I)
assemble!(I, J, V, rows)
add_gids!(cols, J)
I, J, V = tuple_of_arrays(IJV)
return I, J, V, rows, cols
end
backend = SequentialBackend()
parts = get_part_ids(backend, 2)
CSC = PSparseMatrix(diag_data(backend, parts)...; ids=:global)
CSR = PSparseMatrix(sparsecsr, diag_data(backend, parts)...; ids=:global)
@test tomain(CSC) == tomain(CSR) ==
Diagonal([1, 2, 3, 8, 10, 12, 7, 8, 9, 10])
for backend in [:native, :debug, :mpi]
@testset "Backend=$backend" begin
if backend == :mpi
parts = 1:1
else
parts = 1:2
end
parts = distribute_as_parray(parts, backend)
CSC = psparse(diag_data(parts)...) |> fetch
CSR = psparse(sparsecsr, diag_data(parts)...) |> fetch
map_parts(CSC.values, CSC.rows.partition, CSC.cols.partition,
CSR.values, CSR.rows.partition, CSR.cols.partition, parts) do args...
cscvalues, cscrows, csccols, csrvalues, csrrows, csrcols, p = args
csc = Internals.to_hypre_data(cscvalues, cscrows, csccols)
csr = Internals.to_hypre_data(csrvalues, csrrows, csrcols)
if p == 1
for A in [CSC, CSR]
map(local_values(A), A.row_partition, A.col_partition, parts) do values, rows, cols, p
hypre_data = Internals.to_hypre_data(values, rows, cols)
if backend == :mpi
@assert p == 1
nrows = 10
ncols = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
cols = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
elseif p == 1
nrows = 5
ncols = [1, 1, 1, 1, 1]
rows = [1, 2, 3, 4, 5]
cols = [1, 2, 3, 4, 5]
values = [1, 2, 3, 8, 10]
else # if p == 1
else
@assert p == 2
nrows = 5
ncols = [1, 1, 1, 1, 1]
rows = [6, 7, 8, 9, 10]
cols = [6, 7, 8, 9, 10]
values = [12, 7, 8, 9, 10]
end
@test csc[1]::HYPRE_Int == csr[1]::HYPRE_Int == nrows
@test csc[2]::Vector{HYPRE_Int} == csr[2]::Vector{HYPRE_Int} == ncols
@test csc[3]::Vector{HYPRE_BigInt} == csr[3]::Vector{HYPRE_BigInt} == rows
@test csc[4]::Vector{HYPRE_BigInt} == csr[4]::Vector{HYPRE_BigInt} == cols
@test csc[5]::Vector{HYPRE_Complex} == csr[5]::Vector{HYPRE_Complex} == values
@test hypre_data[1]::HYPRE_Int == nrows
@test hypre_data[2]::Vector{HYPRE_Int} == ncols
@test hypre_data[3]::Vector{HYPRE_BigInt} == rows
@test hypre_data[4]::Vector{HYPRE_BigInt} == cols
@test hypre_data[5]::Vector{HYPRE_Complex} == values
end
end
end
# MPI backend
backend = MPIBackend()
parts = MPIData(1, MPI.COMM_WORLD, (1,)) # get_part_ids duplicates the comm
CSC = PSparseMatrix(diag_data(backend, parts)...; ids=:global)
CSR = PSparseMatrix(sparsecsr, diag_data(backend, parts)...; ids=:global)
@test tomain(CSC) == tomain(CSR) ==
Diagonal([1, 2, 3, 8, 10, 12, 7, 8, 9, 10])
map_parts(CSC.values, CSC.rows.partition, CSC.cols.partition,
CSR.values, CSR.rows.partition, CSR.cols.partition, parts) do args...
cscvalues, cscrows, csccols, csrvalues, csrrows, csrcols, p = args
csc = Internals.to_hypre_data(cscvalues, cscrows, csccols)
csr = Internals.to_hypre_data(csrvalues, csrrows, csrcols)
nrows = 10
ncols = fill(1, 10)
rows = collect(1:10)
cols = collect(1:10)
values = [1, 2, 3, 8, 10, 12, 7, 8, 9, 10]
@test csc[1]::HYPRE_Int == csr[1]::HYPRE_Int == nrows
@test csc[2]::Vector{HYPRE_Int} == csr[2]::Vector{HYPRE_Int} == ncols
@test csc[3]::Vector{HYPRE_BigInt} == csr[3]::Vector{HYPRE_BigInt} == rows
@test csc[4]::Vector{HYPRE_BigInt} == csr[4]::Vector{HYPRE_BigInt} == cols
@test csc[5]::Vector{HYPRE_Complex} == csr[5]::Vector{HYPRE_Complex} == values
end
end
@testset "HYPREVector" begin
h = HYPREVector(MPI.COMM_WORLD, 1, 5)
@test h.ijvector != HYPRE_IJVector(C_NULL)
@ -271,52 +265,106 @@ end @@ -271,52 +265,106 @@ end
end
@testset "HYPREVector(::PVector)" begin
# Sequential backend
backend = SequentialBackend()
parts = get_part_ids(backend, 2)
rows = PRange(parts, 10)
for backend in [:native, :debug, :mpi]
if backend == :mpi
parts = distribute_as_parray(1:1, backend)
else
parts = distribute_as_parray(1:2, backend)
end
rows = uniform_partition(parts, 10)
b = rand(10)
I, V = map_parts(parts) do p
if p == 1
return collect(1:6), b[1:6]
IV = map(parts, rows) do p, owned
if backend == :mpi
row_indices = 1:10
elseif p == 1
row_indices = 1:6
else # p == 2
return collect(4:10), b[4:10]
row_indices = 4:10
end
values = zeros(length(row_indices))
for (i, row) in enumerate(row_indices)
if row in owned
values[i] = b[row]
end
end
add_gids!(rows, I)
pb = PVector(I, V, rows; ids=:global)
assemble!(pb)
@test tomain(pb) == [i in 4:6 ? 2x : x for (i, x) in zip(eachindex(b), b)]
return collect(row_indices), values
end
I, V = tuple_of_arrays(IV)
pb = pvector(I, V, rows) |> fetch
H = HYPREVector(pb)
# Check for valid vector
@test H.ijvector != HYPRE_IJVector(C_NULL)
@test H.parvector != HYPRE_ParVector(C_NULL)
pbc = fill!(copy(pb), 0)
copy!(pbc, H)
@test tomain(pbc) == tomain(pb)
# Copy back, check if identical
b_copy = copy!(similar(b), H)
@test b_copy == b
# Test copy to and from HYPREVector
pb2 = 2 * pb
H′ = copy!(H, pb2)
@test H === H′
pbc = similar(pb)
copy!(pbc, H)
@test tomain(pbc) == 2 * tomain(pb)
# MPI backend
backend = MPIBackend()
parts = get_part_ids(backend, 1)
rows = PRange(parts, 10)
I, V = map_parts(parts) do p
return collect(1:10), b
end
add_gids!(rows, I)
pb = PVector(I, V, rows; ids=:global)
assemble!(pb)
@test tomain(pb) == b
H = HYPREVector(pb)
@test H.ijvector != HYPRE_IJVector(C_NULL)
@test H.parvector != HYPRE_ParVector(C_NULL)
pbc = fill!(copy(pb), 0)
copy!(pbc, H)
@test tomain(pbc) == tomain(pb)
@test pbc == 2 * pb
end
end
@testset "HYPRE(Matrix|Vector)?Assembler" begin
comm = MPI.COMM_WORLD
# Assembly HYPREMatrix from ::Matrix
A = HYPREMatrix(comm, 1, 3)
AM = zeros(3, 3)
for i in 1:2
assembler = HYPRE.start_assemble!(A)
fill!(AM, 0)
for idx in ([1, 2], [3, 1])
a = rand(2, 2)
HYPRE.assemble!(assembler, idx, idx, a)
AM[idx, idx] += a
ar = rand(1, 2)
HYPRE.assemble!(assembler, [2], idx, ar)
AM[[2], idx] += ar
end
f = HYPRE.finish_assemble!(assembler)
@test f === A
@test getindex_debug(A, 1:3, 1:3) == AM
end
# Assembly HYPREVector from ::Vector
b = HYPREVector(comm, 1, 3)
bv = zeros(3)
for i in 1:2
assembler = HYPRE.start_assemble!(b)
fill!(bv, 0)
for idx in ([1, 2], [3, 1])
c = rand(2)
HYPRE.assemble!(assembler, idx, c)
bv[idx] += c
end
f = HYPRE.finish_assemble!(assembler)
@test f === b
@test getindex_debug(b, 1:3) == bv
end
# Assembly HYPREMatrix/HYPREVector from ::Array
A = HYPREMatrix(comm, 1, 3)
AM = zeros(3, 3)
b = HYPREVector(comm, 1, 3)
bv = zeros(3)
for i in 1:2
assembler = HYPRE.start_assemble!(A, b)
fill!(AM, 0)
fill!(bv, 0)
for idx in ([1, 2], [3, 1])
a = rand(2, 2)
c = rand(2)
HYPRE.assemble!(assembler, idx, a, c)
AM[idx, idx] += a
bv[idx] += c
end
F, f = HYPRE.finish_assemble!(assembler)
@test F === A
@test f === b
@test getindex_debug(A, 1:3, 1:3) == AM
@test getindex_debug(b, 1:3) == bv
end
end
@testset "BiCGSTAB" begin
@ -333,7 +381,7 @@ end @@ -333,7 +381,7 @@ end
b_h = HYPREVector(b)
x_h = HYPREVector(x)
# Solve
tol = 1e-9
tol = 1.0e-9
bicg = HYPRE.BiCGSTAB(; Tol = tol)
HYPRE.solve!(bicg, x_h, A_h, b_h)
copy!(x, x_h)
@ -343,6 +391,9 @@ end @@ -343,6 +391,9 @@ end
x_h = HYPRE.solve(bicg, A_h, b_h)
copy!(x, x_h)
@test x A \ b atol = tol
# Test solver queries
@test HYPRE.GetFinalRelativeResidualNorm(bicg) < tol
@test HYPRE.GetNumIterations(bicg) > 0
# Solve with preconditioner
precond = HYPRE.BoomerAMG(; MaxIter = 1, Tol = 0.0)
@ -381,7 +432,7 @@ end @@ -381,7 +432,7 @@ end
append!(J, [i, i, i + 1, i + 1]) # cols
end
A = sparse(I, J, V)
A[:, 1] .= 0; A[1, :] .= 0; A[:, end] .= 0; A[end, :] .= 0;
A[:, 1] .= 0; A[1, :] .= 0; A[:, end] .= 0; A[end, :] .= 0
A[1, 1] = 2; A[end, end] = 2
@test isposdef(A)
b = rand(100)
@ -391,7 +442,7 @@ end @@ -391,7 +442,7 @@ end
b_h = HYPREVector(b, ilower, iupper)
x_h = HYPREVector(b, ilower, iupper)
# Solve
tol = 1e-9
tol = 1.0e-9
amg = HYPRE.BoomerAMG(; Tol = tol)
HYPRE.solve!(amg, x_h, A_h, b_h)
copy!(x, x_h)
@ -402,6 +453,9 @@ end @@ -402,6 +453,9 @@ end
x_h = HYPRE.solve(amg, A_h, b_h)
copy!(x, x_h)
@test x A \ b atol = tol * norm(b)
# Test solver queries
@test HYPRE.GetFinalRelativeResidualNorm(amg) < tol
@test HYPRE.GetNumIterations(amg) > 0
end
@testset "FlexGMRES" begin
@ -418,7 +472,7 @@ end @@ -418,7 +472,7 @@ end
b_h = HYPREVector(b)
x_h = HYPREVector(x)
# Solve
tol = 1e-9
tol = 1.0e-9
gmres = HYPRE.FlexGMRES(; Tol = tol)
HYPRE.solve!(gmres, x_h, A_h, b_h)
copy!(x, x_h)
@ -428,6 +482,9 @@ end @@ -428,6 +482,9 @@ end
x_h = HYPRE.solve(gmres, A_h, b_h)
copy!(x, x_h)
@test x A \ b atol = tol
# Test solver queries
@test HYPRE.GetFinalRelativeResidualNorm(gmres) < tol
@test HYPRE.GetNumIterations(gmres) > 0
# Solve with preconditioner
precond = HYPRE.BoomerAMG()
@ -458,7 +515,7 @@ end @@ -458,7 +515,7 @@ end
b_h = HYPREVector(b)
x_h = HYPREVector(x)
# Solve
tol = 1e-9
tol = 1.0e-9
gmres = HYPRE.GMRES(; Tol = tol)
HYPRE.solve!(gmres, x_h, A_h, b_h)
copy!(x, x_h)
@ -468,6 +525,9 @@ end @@ -468,6 +525,9 @@ end
x_h = HYPRE.solve(gmres, A_h, b_h)
copy!(x, x_h)
@test x A \ b atol = tol
# Test solver queries
@test HYPRE.GetFinalRelativeResidualNorm(gmres) < tol
@test HYPRE.GetNumIterations(gmres) > 0
# Solve with preconditioner
precond = HYPRE.BoomerAMG(; MaxIter = 1, Tol = 0.0)
@ -497,7 +557,7 @@ end @@ -497,7 +557,7 @@ end
b_h = HYPREVector(b)
x_h = HYPREVector(x)
# Solve
tol = 1e-9
tol = 1.0e-9
hybrid = HYPRE.Hybrid(; Tol = tol)
HYPRE.solve!(hybrid, x_h, A_h, b_h)
copy!(x, x_h)
@ -507,6 +567,9 @@ end @@ -507,6 +567,9 @@ end
x_h = HYPRE.solve(hybrid, A_h, b_h)
copy!(x, x_h)
@test x A \ b atol = tol
# Test solver queries
@test HYPRE.GetFinalRelativeResidualNorm(hybrid) < tol
@test HYPRE.GetNumIterations(hybrid) > 0
# Solve with given preconditioner
precond = HYPRE.BoomerAMG()
@ -537,7 +600,7 @@ end @@ -537,7 +600,7 @@ end
b_h = HYPREVector(b)
x_h = HYPREVector(x)
# Solve
tol = 1e-9
tol = 1.0e-9
ilu = HYPRE.ILU(; Tol = tol)
HYPRE.solve!(ilu, x_h, A_h, b_h)
copy!(x, x_h)
@ -547,6 +610,9 @@ end @@ -547,6 +610,9 @@ end
x_h = HYPRE.solve(ilu, A_h, b_h)
copy!(x, x_h)
@test x A \ b atol = tol
# Test solver queries
@test HYPRE.GetFinalRelativeResidualNorm(ilu) < tol
@test HYPRE.GetNumIterations(ilu) > 0
# Use as preconditioner to PCG
precond = HYPRE.ILU()
@ -578,13 +644,16 @@ end @@ -578,13 +644,16 @@ end
b_h = HYPREVector(b, ilower, iupper)
x_h = HYPREVector(b, ilower, iupper)
# Solve with ParaSails as preconditioner
tol = 1e-9
tol = 1.0e-9
parasails = HYPRE.ParaSails()
pcg = HYPRE.PCG(; Tol = tol, Precond = parasails)
HYPRE.solve!(pcg, x_h, A_h, b_h)
copy!(x, x_h)
# Test result with direct solver
@test x A \ b atol = tol
# Test solver queries (should error)
@test_throws ArgumentError("cannot get residual norm for HYPRE.ParaSails") HYPRE.GetFinalRelativeResidualNorm(parasails)
@test_throws ArgumentError("cannot get number of iterations for HYPRE.ParaSails") HYPRE.GetNumIterations(parasails)
end
@testset "(ParCSR)PCG" begin
@ -602,7 +671,7 @@ end @@ -602,7 +671,7 @@ end
b_h = HYPREVector(b, ilower, iupper)
x_h = HYPREVector(b, ilower, iupper)
# Solve
tol = 1e-9
tol = 1.0e-9
pcg = HYPRE.PCG(; Tol = tol)
HYPRE.solve!(pcg, x_h, A_h, b_h)
copy!(x, x_h)
@ -612,6 +681,10 @@ end @@ -612,6 +681,10 @@ end
x_h = HYPRE.solve(pcg, A_h, b_h)
copy!(x, x_h)
@test x A \ b atol = tol
# Test solver queries
@test HYPRE.GetFinalRelativeResidualNorm(pcg) < tol
@test HYPRE.GetNumIterations(pcg) > 0
# Solve with AMG preconditioner
precond = HYPRE.BoomerAMG(; MaxIter = 1, Tol = 0.0)
pcg = HYPRE.PCG(; Tol = tol, Precond = precond)
@ -626,55 +699,89 @@ end @@ -626,55 +699,89 @@ end
@test x A \ b atol = tol
end
function topartitioned(x::Vector, A::SparseMatrixCSC, b::Vector)
parts = get_part_ids(SequentialBackend(), 1)
rows = PRange(parts, size(A, 1))
cols = PRange(parts, size(A, 2))
II, JJ, VV, bb, xx = map_parts(parts) do _
function topartitioned(x::Vector, A::SparseMatrixCSC, b::Vector, backend)
parts = distribute_as_parray(1:1, backend)
n = size(A, 1)
rows = uniform_partition(parts, n)
cols = uniform_partition(parts, n)
tmp = map(parts) do _
return findnz(A)..., b, x
end
add_gids!(rows, II)
assemble!(II, JJ, VV, rows)
add_gids!(cols, JJ)
A_p = PSparseMatrix(II, JJ, VV, rows, cols; ids = :global)
II, JJ, VV, bb, xx = tuple_of_arrays(tmp)
A_p = psparse(II, JJ, VV, rows, cols) |> fetch
b_p = PVector(bb, rows)
x_p = PVector(xx, cols)
return x_p, A_p, b_p
end
@testset "solve with PartitionedArrays" begin
for backend in [:native, :debug, :mpi]
# Setup
A = sprand(100, 100, 0.05); A = A'A + 5I
b = rand(100)
x = zeros(100)
x_p, A_p, b_p = topartitioned(x, A, b)
@test A == tomain(A_p)
@test b == tomain(b_p)
@test x == tomain(x_p)
x_p, A_p, b_p = topartitioned(x, A, b, :native)
# Data is distributed over a single process. We can then check the following
# as local_values is the entire matrix/vector.
map(local_values(x_p)) do x_l
@test x_l == x
end
map(local_values(b_p)) do b_l
@test b_l == b
end
map(local_values(A_p)) do A_l
@test A_l == A
end
# Solve
tol = 1e-9
tol = 1.0e-9
pcg = HYPRE.PCG(; Tol = tol)
## solve!
HYPRE.solve!(pcg, x_p, A_p, b_p)
@test tomain(x_p) A \ b atol=tol
ref = A \ b
map(local_values(x_p)) do x
@test x ref atol = tol
end
## solve
x_p = HYPRE.solve(pcg, A_p, b_p)
@test tomain(x_p) A \ b atol=tol
map(local_values(x_p)) do x
@test x ref atol = tol
end
end
end
@testset "solve with SparseMatrixCS(C|R)" begin
# Setup
A = sprand(100, 100, 0.05); A = A'A + 5I
CSC = sprand(100, 100, 0.05); CSC = CSC'CSC + 5I
CSR = sparsecsr(findnz(CSC)..., size(CSC)...)
b = rand(100)
x = zeros(100)
xcsc = zeros(100)
xcsr = zeros(100)
# Solve
tol = 1e-9
tol = 1.0e-9
pcg = HYPRE.PCG(; Tol = tol)
## solve!
HYPRE.solve!(pcg, x, A, b)
@test x A \ b atol=tol
HYPRE.solve!(pcg, xcsc, CSC, b)
@test xcsc CSC \ b atol = tol
HYPRE.solve!(pcg, xcsr, CSR, b)
@test xcsr CSC \ b atol = tol # TODO: CSR \ b fails
## solve
x = HYPRE.solve(pcg, A, b)
@test x A \ b atol=tol
xcsc = HYPRE.solve(pcg, CSC, b)
@test xcsc CSC \ b atol = tol
xcsr = HYPRE.solve(pcg, CSR, b)
@test xcsr CSC \ b atol = tol # TODO: CSR \ b fails
end
@testset "MPI execution" begin
testfiles = joinpath.(
@__DIR__,
[
"test_assembler.jl",
]
)
for file in testfiles
r = run(ignorestatus(`$(mpiexec()) -n 2 $(Base.julia_cmd()) $(file)`))
@test r.exitcode == 0
end
end

117
test/test_assembler.jl

@ -0,0 +1,117 @@ @@ -0,0 +1,117 @@
# SPDX-License-Identifier: MIT
using HYPRE
using MPI
using Test
MPI.Init()
HYPRE.Init()
include("test_utils.jl")
comm = MPI.COMM_WORLD
comm_rank = MPI.Comm_rank(comm)
comm_size = MPI.Comm_size(comm)
if comm_size != 2
error("Must run with 2 ranks.")
end
if comm_rank == 0
ilower = 1
iupper = 10
N = 2:10
else
ilower = 11
iupper = 20
N = 11:19
end
function values_and_indices(n)
idx = [n - 1, n, n + 1]
a = Float64[
# runic: off
n -2n -n
-2n n -2n
-n -2n n
# runic: on
]
b = Float64[n, n / 2, n / 3]
return idx, a, b
end
##########################
## HYPREMatrixAssembler ##
##########################
# Dense local matrix
A = HYPREMatrix(comm, ilower, iupper)
AM = zeros(20, 20)
for i in 1:2
assembler = HYPRE.start_assemble!(A)
fill!(AM, 0)
for n in N
idx, a, _ = values_and_indices(n)
HYPRE.assemble!(assembler, idx, idx, a)
AM[idx, idx] += a
end
f = HYPRE.finish_assemble!(assembler)
@test f === A
MPI.Allreduce!(AM, +, comm)
@test getindex_debug(A, ilower:iupper, 1:20) == AM[ilower:iupper, 1:20]
MPI.Barrier(comm)
end
##########################
## HYPREVectorAssembler ##
##########################
# Dense local vector
b = HYPREVector(comm, ilower, iupper)
bv = zeros(20)
for i in 1:2
assembler = HYPRE.start_assemble!(b)
fill!(bv, 0)
for n in N
idx, _, a = values_and_indices(n)
HYPRE.assemble!(assembler, idx, a)
bv[idx] += a
end
f = HYPRE.finish_assemble!(assembler)
@test f === b
MPI.Allreduce!(bv, +, comm)
@test getindex_debug(b, ilower:iupper) == bv[ilower:iupper]
MPI.Barrier(comm)
end
####################
## HYPREAssembler ##
####################
# Dense local arrays
A = HYPREMatrix(comm, ilower, iupper)
AM = zeros(20, 20)
b = HYPREVector(comm, ilower, iupper)
bv = zeros(20)
for i in 1:2
assembler = HYPRE.start_assemble!(A, b)
fill!(AM, 0)
fill!(bv, 0)
for n in N
idx, a, c = values_and_indices(n)
HYPRE.assemble!(assembler, idx, a, c)
AM[idx, idx] += a
bv[idx] += c
end
F, f = HYPRE.finish_assemble!(assembler)
@test F === A
@test f === b
MPI.Allreduce!(AM, +, comm)
MPI.Allreduce!(bv, +, comm)
@test getindex_debug(A, ilower:iupper, 1:20) == AM[ilower:iupper, 1:20]
@test getindex_debug(b, ilower:iupper) == bv[ilower:iupper]
MPI.Barrier(comm)
end

23
test/test_utils.jl

@ -0,0 +1,23 @@ @@ -0,0 +1,23 @@
# SPDX-License-Identifier: MIT
using HYPRE
using HYPRE.LibHYPRE
using HYPRE.LibHYPRE: @check
function getindex_debug(A::HYPREMatrix, i::AbstractVector, j::AbstractVector)
nrows = HYPRE_Int(length(i))
ncols = fill(HYPRE_Int(length(j)), length(i))
rows = convert(Vector{HYPRE_BigInt}, i)
cols = convert(Vector{HYPRE_BigInt}, repeat(j, length(i)))
values = Vector{HYPRE_Complex}(undef, length(i) * length(j))
@check HYPRE_IJMatrixGetValues(A.ijmatrix, nrows, ncols, rows, cols, values)
return permutedims(reshape(values, (length(j), length(i))))
end
function getindex_debug(b::HYPREVector, i::AbstractVector)
nvalues = HYPRE_Int(length(i))
indices = convert(Vector{HYPRE_BigInt}, i)
values = Vector{HYPRE_Complex}(undef, length(i))
@check HYPRE_IJVectorGetValues(b.ijvector, nvalues, indices, values)
return values
end
Loading…
Cancel
Save