Browse Source

Merge main

pull/35/head
termi-official 9 months ago
parent
commit
b59e6a16c6
  1. 6
      .git-blame-ignore-revs
  2. 7
      .github/dependabot.yml
  3. 73
      .github/workflows/Check.yml
  4. 27
      .github/workflows/Documentation.yml
  5. 41
      .github/workflows/Test.yml
  6. 55
      .github/workflows/ci.yml
  7. 11
      .pre-commit-config.yaml
  8. 112
      CHANGELOG.md
  9. 21
      Project.toml
  10. 61
      README.md
  11. 11
      docs/Makefile
  12. 316
      docs/Manifest.toml
  13. 1
      docs/Project.toml
  14. 24
      docs/liveserver.jl
  15. 10
      docs/make.jl
  16. 13
      docs/src/api.md
  17. 16
      docs/src/libhypre.md
  18. 61
      docs/src/matrix-vector.md
  19. 33
      examples/ex5.jl
  20. 300
      ext/HYPREPartitionedArrays.jl
  21. 86
      ext/HYPRESparseArrays.jl
  22. 80
      ext/HYPRESparseMatricesCSR.jl
  23. 16
      gen/Makefile
  24. 140
      gen/Manifest.toml
  25. 15
      gen/generator.jl
  26. 7
      gen/generator.toml
  27. 15
      gen/prologue.jl
  28. 8
      gen/solver_options.jl
  29. 1768
      lib/LibHYPRE.jl
  30. 537
      src/HYPRE.jl
  31. 2
      src/Internals.jl
  32. 3
      src/LibHYPRE.jl
  33. 2
      src/precs.jl
  34. 42
      src/solver_options.jl
  35. 209
      src/solvers.jl
  36. 501
      test/runtests.jl
  37. 117
      test/test_assembler.jl
  38. 23
      test/test_utils.jl

6
.git-blame-ignore-revs

@ -0,0 +1,6 @@
# Runic formatting
# https://github.com/fredrikekre/HYPRE.jl/commit/640d77944e846a1f94e248bf2dea53310314f457
640d77944e846a1f94e248bf2dea53310314f457
# Switch from ccall() to @ccall in generated output
# https://github.com/fredrikekre/HYPRE.jl/commit/b4790048a7803298004bde24658ac90215a837a4
b4790048a7803298004bde24658ac90215a837a4

7
.github/dependabot.yml

@ -0,0 +1,7 @@
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/" # Location of package manifests
schedule:
interval: "monthly"

73
.github/workflows/Check.yml

@ -0,0 +1,73 @@
name: Code checks
on:
pull_request:
push:
branches: ["master"]
jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1
explicit-imports:
runs-on: ubuntu-latest
name: "ExplicitImports.jl"
steps:
- uses: actions/checkout@v4
# - uses: julia-actions/setup-julia@v2
# with:
# version: '1'
- uses: julia-actions/cache@v2
# - uses: julia-actions/julia-buildpkg@v1
- name: Install dependencies
shell: julia --project=@explicit-imports {0}
run: |
# Add ExplicitImports.jl and packages that HYPRE has extensions for
using Pkg
Pkg.develop([
PackageSpec(name = "HYPRE", path = pwd()),
])
Pkg.add([
PackageSpec(name = "ExplicitImports", version = "1.9"),
PackageSpec(name = "PartitionedArrays"),
PackageSpec(name = "SparseArrays"),
PackageSpec(name = "SparseMatricesCSR"),
])
- name: ExplicitImports.jl code checks
shell: julia --project=@explicit-imports {0}
run: |
using HYPRE, ExplicitImports, PartitionedArrays, SparseArrays, SparseMatricesCSR
# Check HYPRE
check_no_implicit_imports(HYPRE)
check_no_stale_explicit_imports(HYPRE)
check_all_qualified_accesses_via_owners(HYPRE)
check_no_self_qualified_accesses(HYPRE)
# Check extension modules
for ext in (:HYPREPartitionedArrays, :HYPRESparseArrays, :HYPRESparseMatricesCSR)
extmod = Base.get_extension(HYPRE, ext)
if extmod !== nothing
check_no_implicit_imports(extmod)
check_no_stale_explicit_imports(extmod)
check_all_qualified_accesses_via_owners(extmod)
check_no_self_qualified_accesses(extmod)
else
@warn "$(ext) extension not available."
end
end
runic:
name: Runic
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v2
with:
version: '1'
- uses: julia-actions/cache@v2
- uses: fredrikekre/runic-action@v1
with:
version: '1'

27
.github/workflows/Documentation.yml

@ -0,0 +1,27 @@
---
name: Documentation
on:
push:
branches:
- 'master'
- 'release-'
tags: ['*']
pull_request:
jobs:
docs:
name: Julia 1.11 - ubuntu-latest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v2
with:
version: '1.11'
- uses: julia-actions/cache@v2
- name: Install dependencies
run: julia --project=docs -e 'using Pkg; Pkg.instantiate()'
- name: Build and deploy
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }}
run: julia --project=docs --color=yes docs/make.jl

41
.github/workflows/Test.yml

@ -0,0 +1,41 @@
name: Test
on:
push:
branches:
- 'master'
- 'release-'
tags: ['*']
pull_request:
jobs:
test:
name: Julia ${{ matrix.version }} - ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
version:
- '1.10'
- '1'
- 'nightly'
os:
- ubuntu-latest
include:
- os: windows-latest
version: '1'
- os: macOS-latest
version: '1'
steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v2
with:
version: ${{ matrix.version }}
- uses: julia-actions/cache@v2
- uses: julia-actions/julia-buildpkg@v1
- uses: julia-actions/julia-runtest@v1
- uses: julia-actions/julia-processcoverage@v1
- uses: codecov/codecov-action@v5
with:
files: lcov.info
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}

55
.github/workflows/ci.yml

@ -1,55 +0,0 @@
name: CI
on:
push:
branches:
- 'master'
- 'release-'
tags: '*'
pull_request:
jobs:
test:
name: Julia ${{ matrix.version }} - ${{ matrix.os }}
runs-on: ${{ matrix.os }}
strategy:
matrix:
version:
- '1.6'
- '1'
- 'nightly'
os:
- ubuntu-latest
include:
- os: windows-latest
version: '1'
- os: macOS-latest
version: '1'
steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@v1
with:
version: ${{ matrix.version }}
- uses: julia-actions/cache@v1
- uses: julia-actions/julia-buildpkg@v1
- uses: julia-actions/julia-runtest@v1
- uses: julia-actions/julia-processcoverage@v1
- uses: codecov/codecov-action@v2
with:
files: ./lcov.info
docs:
name: Documentation
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@v1
with:
version: '1'
- uses: julia-actions/cache@v1
- name: Install dependencies
run: julia --project=docs -e 'using Pkg; Pkg.instantiate()'
- name: Build and deploy
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }}
run: julia --project=docs --color=yes docs/make.jl

11
.pre-commit-config.yaml

@ -0,0 +1,11 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0
hooks:
- id: check-added-large-files
- id: check-case-conflict
- id: check-toml
- id: check-yaml
- id: end-of-file-fixer
- id: mixed-line-ending
- id: trailing-whitespace

112
CHANGELOG.md

@ -0,0 +1,112 @@
# HYPRE.jl changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [v1.7.0] - 2024-10-09
### Changed
- Support for Julia 1.6 have been dropped and for this and future releases Julia 1.10 or
later will be required. ([#27])
- Constant struct fields of `HYPREMatrix` and `HYPREVector` are now marked with `const`.
([#28])
## [v1.6.0] - 2024-09-29
### Changed
- PartitionedArrays.jl dependency upgraded from release series 0.3.x to release series
0.5.x. ([#17], [#18])
- CEnum.jl dependency upgraded to release series 0.5.x (release series 0.4.x still
allowed). ([#17], [#18])
- PartitionedArrays.jl support (`PSparseMatrix`, `PVector`) is now provided by a package
extension. ([#23])
- SparseMatricesCSR.jl support (`SparseMatrixCSR`) is now provided by a package extension.
([#24])
- SparseArrays.jl support (`SparseMatrixCSC`) is now provided by a package extension.
([#25])
## [v1.5.0] - 2023-05-26
### Changed
- PartitionedArrays.jl dependency upgraded from version 0.2.x to version 0.3.x.
([#16])
## [v1.4.0] - 2023-01-20
### Added
- New function `HYPRE.GetFinalRelativeResidualNorm(s::HYPRESolver)` for getting the final
residual norm from a solver. This function dispatches on the solver to the corresponding
C API wrapper `LibHYPRE.HYPRE_${Solver}GetFinalRelativeResidualNorm`. ([#14])
- New function `HYPRE.GetNumIterations(s::HYPRESolver)` for getting the number of
iterations from a solver. This function dispatches on the solver to the corresponding C
API wrapper `LibHYPRE.HYPRE_${Solver}GetNumIterations`. ([#14])
## [v1.3.1] - 2023-01-14
### Fixed
- Solvers now keep an reference to the added preconditioner to make sure the preconditioner
is not finalized before the solver. This fixes crashes (segfaults) that could happen in
case no other reference to the preconditioner existed in the program. ([#12])
- The proper conversion methods for `ccall` are now defined for `HYPREMatrix`,
`HYPREVector`, and `HYPRESolver` such that they can be passed direcly to `HYPRE_*`
functions and let `ccall` guarantee the GC preservation of these objects. Although not
observed in practice, this fixes a possible race condition where the matrix/vector/solver
could be finalized too early. ([#13])
## [v1.3.0] - 2022-12-30
### Added
- Rectangular matrices can now be assembled by the new method
`HYPRE.assemble!(::HYPREMatrixAssembler, i::Vector, j::Vector, a::Matrix)` where `i` are
the rows and `j` the columns. ([#7])
### Fixed
- All created HYPRE objects (`HYPREMatrix`, `HYPREVector`, and `HYPRESolver`s) are now kept
track of internally and explicitly `finalize`d (if they haven't been GC'd) before
finalizing HYPRE. This fixes a "race condition" where MPI and/or HYPRE would finalize
before these Julia objects are garbage collected and finalized. ([#8])
### Deprecated
- The method `HYPRE.assemble!(A::HYPREMatrixAssembler, ij::Vector, a::Matrix)` have been
deprecated in favor of `HYPRE.assemble!(A::HYPREMatrixAssembler, i::Vector, j::Vector,
a::Matrix)`, i.e. it is now required to explicitly pass rows and column indices
individually. The motivation behind this is to support assembling of rectangular
matrices. Note that `HYPRE.assemble!(A::HYPREAssembler, ij::Vector, a::Matrix,
b::Vector)` is still supported, where `ij` are used as row and column indices for `a`, as
well as row indices for `b`. ([#6])
## [v1.2.0] - 2022-10-12
### Added
- Added assembler interface to assemble `HYPREMatrix` and/or `HYPREVector` directly without
an intermediate sparse structure in Julia. ([#5])
## [v1.1.0] - 2022-10-05
### Added
- Added support for MPI.jl version 0.20.x (in addition to the existing version 0.19.x
support). ([#2])
## [v1.0.0] - 2022-07-28
Initial release of HYPRE.jl.
<!-- Links generated by Changelog.jl -->
[v1.0.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.0.0
[v1.1.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.1.0
[v1.2.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.2.0
[v1.3.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.3.0
[v1.3.1]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.3.1
[v1.4.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.4.0
[v1.5.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.5.0
[v1.6.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.6.0
[v1.7.0]: https://github.com/fredrikekre/HYPRE.jl/releases/tag/v1.7.0
[#2]: https://github.com/fredrikekre/HYPRE.jl/issues/2
[#5]: https://github.com/fredrikekre/HYPRE.jl/issues/5
[#6]: https://github.com/fredrikekre/HYPRE.jl/issues/6
[#7]: https://github.com/fredrikekre/HYPRE.jl/issues/7
[#8]: https://github.com/fredrikekre/HYPRE.jl/issues/8
[#12]: https://github.com/fredrikekre/HYPRE.jl/issues/12
[#13]: https://github.com/fredrikekre/HYPRE.jl/issues/13
[#14]: https://github.com/fredrikekre/HYPRE.jl/issues/14
[#16]: https://github.com/fredrikekre/HYPRE.jl/issues/16
[#17]: https://github.com/fredrikekre/HYPRE.jl/issues/17
[#18]: https://github.com/fredrikekre/HYPRE.jl/issues/18
[#23]: https://github.com/fredrikekre/HYPRE.jl/issues/23
[#24]: https://github.com/fredrikekre/HYPRE.jl/issues/24
[#25]: https://github.com/fredrikekre/HYPRE.jl/issues/25
[#27]: https://github.com/fredrikekre/HYPRE.jl/issues/27
[#28]: https://github.com/fredrikekre/HYPRE.jl/issues/28

21
Project.toml

@ -1,6 +1,6 @@
name = "HYPRE" name = "HYPRE"
uuid = "b5ffcf37-a2bd-41ab-a3da-4bd9bc8ad771" uuid = "b5ffcf37-a2bd-41ab-a3da-4bd9bc8ad771"
version = "1.1.0" version = "1.7.0"
[deps] [deps]
CEnum = "fa961155-64e5-5f13-b03f-caf6b980ea82" CEnum = "fa961155-64e5-5f13-b03f-caf6b980ea82"
@ -8,22 +8,33 @@ HYPRE_jll = "0a602bbd-b08b-5d75-8d32-0de6eef44785"
Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195"
[weakdeps]
PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
SparseMatricesCSR = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1" SparseMatricesCSR = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1"
[extensions]
HYPREPartitionedArrays = ["PartitionedArrays", "SparseArrays", "SparseMatricesCSR"]
HYPRESparseArrays = "SparseArrays"
HYPRESparseMatricesCSR = ["SparseArrays", "SparseMatricesCSR"]
[compat] [compat]
CEnum = "0.4" CEnum = "0.4, 0.5"
LinearAlgebra = "1" LinearAlgebra = "1"
LinearSolve = "3" LinearSolve = "3"
MPI = "0.19, 0.20" MPI = "0.19, 0.20"
PartitionedArrays = "0.2" PartitionedArrays = "0.5"
SparseMatricesCSR = "0.6" SparseMatricesCSR = "0.6"
julia = "1.6" julia = "1.10"
[extras] [extras]
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae" LinearSolve = "7ed4a6bd-45f5-4d41-b270-4a48e9bafcae"
PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
SparseMatricesCSR = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[targets] [targets]
test = ["LinearSolve", "Test"] test = ["LinearSolve", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR", "Test"]

61
README.md

@ -1,12 +1,13 @@
# HYPRE.jl # HYPRE.jl
| **Documentation** | **Build Status** | [![Documentation](https://img.shields.io/badge/docs-latest%20release-blue.svg)](https://fredrikekre.github.io/HYPRE.jl/)
|:------------------------- |:------------------------------------- | [![Test](https://github.com/fredrikekre/HYPRE.jl/actions/workflows/Test.yml/badge.svg?branch=master&event=push)](https://github.com/fredrikekre/HYPRE.jl/actions/workflows/Test.yml)
| [![][docs-img]][docs-url] | [![][gh-actions-img]][gh-actions-url] | [![Codecov](https://codecov.io/github/fredrikekre/HYPRE.jl/graph/badge.svg)](https://codecov.io/github/fredrikekre/HYPRE.jl)
[![code style: runic](https://img.shields.io/badge/code_style-%E1%9A%B1%E1%9A%A2%E1%9A%BE%E1%9B%81%E1%9A%B2-black)](https://github.com/fredrikekre/Runic.jl)
[Julia][julia] interface to [HYPRE][hypre] ("high performance preconditioners and solvers [Julia](https://julialang.org) interface to [HYPRE](https://github.com/hypre-space/hypre)
featuring multigrid methods for the solution of large, sparse linear systems of equations on ("high performance preconditioners and solvers featuring multigrid methods for the solution
massively parallel computers"). of large, sparse linear systems of equations on massively parallel computers").
While the main purpose of HYPRE is to solve problems on multiple cores, it can also be used While the main purpose of HYPRE is to solve problems on multiple cores, it can also be used
for single core problems. HYPRE.jl aims to make it easy to use both modes of operation, with for single core problems. HYPRE.jl aims to make it easy to use both modes of operation, with
@ -14,12 +15,31 @@ an interface that should be familiar to Julia programmers. This README includes
examples -- refer to the [documentation][docs-url] for more details, and for information examples -- refer to the [documentation][docs-url] for more details, and for information
about the included solvers and preconditioners and how to configure them. about the included solvers and preconditioners and how to configure them.
## Example: Single-core solve with standard sparse matrices ## Installation
HYPRE.jl can be installed from the Pkg REPL (press `]` in the Julia REPL to enter):
```
(@v1) pkg> add HYPRE
```
To configure MPI, see the [documentation for MPI.jl](https://juliaparallel.org/MPI.jl/).
## Changes
All notable changes are documented in [CHANGELOG.md](CHANGELOG.md).
## Usage
Some basic usage examples are shown below. See the [documentation][docs-url] for details.
### Example: Single-core solve with standard sparse matrices
It is possible to use Julia's standard sparse arrays (`SparseMatrixCSC` from the It is possible to use Julia's standard sparse arrays (`SparseMatrixCSC` from the
[SparseArrays.jl][sparse-stdlib] standard library, and `SparseMatrixCSR` from the [SparseArrays.jl](https://github.com/JuliaSparse/SparseArrays.jl) standard library, and
[SparseMatricesCSR.jl][sparsecsr] package) directly in HYPRE.jl. For example, to solve `SparseMatrixCSR` from the
`Ax = b` with conjugate gradients: [SparseMatricesCSR.jl](https://github.com/gridap/SparseMatricesCSR.jl) package) directly in
HYPRE.jl. For example, to solve `Ax = b` with conjugate gradients:
```julia ```julia
# Initialize linear system # Initialize linear system
@ -33,12 +53,12 @@ cg = HYPRE.PCG()
x = HYPRE.solve(cg, A, b) x = HYPRE.solve(cg, A, b)
``` ```
## Example: Multi-core solve using PartitionedArrays.jl ### Example: Multi-core solve using PartitionedArrays.jl
For multi-core problems it is possible to use [PartitionedArrays.jl][partarrays] directly For multi-core problems it is possible to use
with HYPRE.jl. Once the linear system is setup the solver interface is identical. For [PartitionedArrays.jl](https://github.com/fverdugo/PartitionedArrays.jl) directly with
example, to solve `Ax = b` with bi-conjugate gradients and an algebraic multigrid HYPRE.jl. Once the linear system is setup the solver interface is identical. For example, to
preconditioner: solve `Ax = b` with bi-conjugate gradients and an algebraic multigrid preconditioner:
```julia ```julia
# Initialize linear system # Initialize linear system
@ -54,14 +74,3 @@ bicg = HYPRE.BiCGSTAB(; Precond = precond)
# Compute the solution # Compute the solution
x = HYPRE.solve(bicg, A, b) x = HYPRE.solve(bicg, A, b)
``` ```
[julia]: https://julialang.org/
[hypre]: https://github.com/hypre-space/hypre
[sparse-stdlib]: https://github.com/JuliaSparse/SparseArrays.jl
[sparsecsr]: https://github.com/gridap/SparseMatricesCSR.jl
[partarrays]: https://github.com/fverdugo/PartitionedArrays.jl
[docs-img]: https://img.shields.io/badge/docs-stable%20release-blue.svg
[docs-url]: https://fredrikekre.github.io/HYPRE.jl/
[gh-actions-img]: https://github.com/fredrikekre/HYPRE.jl/workflows/CI/badge.svg
[gh-actions-url]: https://github.com/fredrikekre/HYPRE.jl/actions?query=workflow%3ACI

11
docs/Makefile

@ -1,11 +1,8 @@
SRCDIR:=$(shell dirname $(abspath $(firstword $(MAKEFILE_LIST)))) SRCDIR:=$(shell dirname $(abspath $(firstword $(MAKEFILE_LIST))))
default: livedocs default: liveserver
instantiate: liveserver:
julia --project=${SRCDIR} -e 'using Pkg; Pkg.instantiate()' julia --project=${SRCDIR} ${SRCDIR}/liveserver.jl
livedocs: instantiate .PHONY: default liveserver
julia --project=${SRCDIR} -e 'using LiveServer; LiveServer.servedocs(; foldername=pwd())' -- liveserver
.PHONY: default instantiate livedocs

316
docs/Manifest.toml

@ -1,8 +1,8 @@
# This file is machine-generated - editing it directly is not advised # This file is machine-generated - editing it directly is not advised
julia_version = "1.8.2" julia_version = "1.11.0"
manifest_format = "2.0" manifest_format = "2.0"
project_hash = "7c98a97551e318432a6ba3bc3fd4758623a247ac" project_hash = "59b08f4b60c862a102ba6a5a40dd11b11cb0ae51"
[[deps.ANSIColoredPrinters]] [[deps.ANSIColoredPrinters]]
git-tree-sha1 = "574baf8110975760d391c710b6341da1afa48d8c" git-tree-sha1 = "574baf8110975760d391c710b6341da1afa48d8c"
@ -10,71 +10,107 @@ uuid = "a4c015fc-c6ff-483c-b24f-f7ea428134e9"
version = "0.0.1" version = "0.0.1"
[[deps.AbstractTrees]] [[deps.AbstractTrees]]
git-tree-sha1 = "5c0b629df8a5566a06f5fef5100b53ea56e465a0" git-tree-sha1 = "2d9c9a55f9c93e8887ad391fbae72f8ef55e1177"
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
version = "0.4.2" version = "0.4.5"
[[deps.ArgTools]] [[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1" version = "1.1.2"
[[deps.Artifacts]] [[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
version = "1.11.0"
[[deps.Base64]] [[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
version = "1.11.0"
[[deps.CEnum]] [[deps.CEnum]]
git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90" git-tree-sha1 = "389ad5c84de1ae7cf0e28e381131c98ea87d54fc"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82" uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.2" version = "0.5.0"
[[deps.Changelog]]
git-tree-sha1 = "e579c6157598169ad4ef17263bdf3452b4a3e316"
uuid = "5217a498-cd5d-4ec6-b8c2-9b85a09b6e3e"
version = "1.1.0"
[[deps.CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "bce6804e5e6044c6daab27bb533d1295e4a2e759"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.6"
[[deps.CompilerSupportLibraries_jll]] [[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"] deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "0.5.2+0" version = "1.1.1+0"
[[deps.Dates]] [[deps.Dates]]
deps = ["Printf"] deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
version = "1.11.0"
[[deps.Distances]]
deps = ["LinearAlgebra", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "3258d0659f812acde79e8a74b11f17ac06d0ca04"
uuid = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
version = "0.10.7"
[[deps.Distributed]] [[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"] deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
version = "1.11.0"
[[deps.DocStringExtensions]] [[deps.DocStringExtensions]]
deps = ["LibGit2"] deps = ["LibGit2"]
git-tree-sha1 = "5158c2b41018c5f7eb1470d558127ac274eca0c9" git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.9.1" version = "0.9.3"
[[deps.Documenter]] [[deps.Documenter]]
deps = ["ANSIColoredPrinters", "AbstractTrees", "Base64", "Dates", "DocStringExtensions", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "MarkdownAST", "REPL", "Test", "Unicode"] deps = ["ANSIColoredPrinters", "AbstractTrees", "Base64", "CodecZlib", "Dates", "DocStringExtensions", "Downloads", "Git", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "MarkdownAST", "Pkg", "PrecompileTools", "REPL", "RegistryInstances", "SHA", "TOML", "Test", "Unicode"]
git-tree-sha1 = "540cb30edf31561e99df05a502c1922107d50ae1" git-tree-sha1 = "5a1ee886566f2fa9318df1273d8b778b9d42712d"
repo-rev = "master"
repo-url = "https://github.com/JuliaDocs/Documenter.jl.git"
uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4" uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
version = "0.28.0-DEV" version = "1.7.0"
[[deps.Downloads]] [[deps.Downloads]]
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
version = "1.6.0" version = "1.6.0"
[[deps.Expat_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "1c6317308b9dc757616f0b5cb379db10494443a7"
uuid = "2e619515-83b5-522b-bb60-26c02a35a201"
version = "2.6.2+0"
[[deps.FileWatching]] [[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
version = "1.11.0"
[[deps.Git]]
deps = ["Git_jll"]
git-tree-sha1 = "04eff47b1354d702c3a85e8ab23d539bb7d5957e"
uuid = "d7ba0133-e1db-5d97-8f8c-041e4b3a1eb2"
version = "1.3.1"
[[deps.Git_jll]]
deps = ["Artifacts", "Expat_jll", "JLLWrappers", "LibCURL_jll", "Libdl", "Libiconv_jll", "OpenSSL_jll", "PCRE2_jll", "Zlib_jll"]
git-tree-sha1 = "ea372033d09e4552a04fd38361cd019f9003f4f4"
uuid = "f8c6e375-362e-5223-8a59-34ff63f689eb"
version = "2.46.2+0"
[[deps.HYPRE]] [[deps.HYPRE]]
deps = ["CEnum", "HYPRE_jll", "Libdl", "MPI", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR"] deps = ["CEnum", "HYPRE_jll", "Libdl", "MPI"]
path = ".." path = ".."
uuid = "b5ffcf37-a2bd-41ab-a3da-4bd9bc8ad771" uuid = "b5ffcf37-a2bd-41ab-a3da-4bd9bc8ad771"
version = "1.1.0" version = "1.7.0"
[deps.HYPRE.extensions]
HYPREPartitionedArrays = ["PartitionedArrays", "SparseArrays", "SparseMatricesCSR"]
HYPRESparseArrays = "SparseArrays"
HYPRESparseMatricesCSR = ["SparseArrays", "SparseMatricesCSR"]
[deps.HYPRE.weakdeps]
PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
SparseMatricesCSR = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1"
[[deps.HYPRE_jll]] [[deps.HYPRE_jll]]
deps = ["Artifacts", "JLLWrappers", "LAPACK_jll", "LazyArtifacts", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenBLAS_jll", "OpenMPI_jll", "Pkg", "TOML"] deps = ["Artifacts", "JLLWrappers", "LAPACK_jll", "LazyArtifacts", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenBLAS_jll", "OpenMPI_jll", "Pkg", "TOML"]
@ -82,124 +118,151 @@ git-tree-sha1 = "b77d3eca75f8442e034ccf415c87405a49e77985"
uuid = "0a602bbd-b08b-5d75-8d32-0de6eef44785" uuid = "0a602bbd-b08b-5d75-8d32-0de6eef44785"
version = "2.23.1+1" version = "2.23.1+1"
[[deps.Hwloc_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "dd3b49277ec2bb2c6b94eb1604d4d0616016f7a6"
uuid = "e33a78d0-f292-5ffc-b300-72abe9b543c8"
version = "2.11.2+0"
[[deps.IOCapture]] [[deps.IOCapture]]
deps = ["Logging", "Random"] deps = ["Logging", "Random"]
git-tree-sha1 = "f7be53659ab06ddc986428d3a9dcc95f6fa6705a" git-tree-sha1 = "b6d6bfdd7ce25b0f9b2f6b3dd56b2673a66c8770"
uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89" uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89"
version = "0.2.2" version = "0.2.5"
[[deps.InteractiveUtils]] [[deps.InteractiveUtils]]
deps = ["Markdown"] deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
version = "1.11.0"
[[deps.IterativeSolvers]]
deps = ["LinearAlgebra", "Printf", "Random", "RecipesBase", "SparseArrays"]
git-tree-sha1 = "1169632f425f79429f245113b775a0e3d121457c"
uuid = "42fd0dbc-a981-5370-80f2-aaf504508153"
version = "0.9.2"
[[deps.JLLWrappers]] [[deps.JLLWrappers]]
deps = ["Preferences"] deps = ["Artifacts", "Preferences"]
git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1" git-tree-sha1 = "f389674c99bfcde17dc57454011aa44d5a260a40"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.4.1" version = "1.6.0"
[[deps.JSON]] [[deps.JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"] deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "3c837543ddb02250ef42f4738347454f95079d4e" git-tree-sha1 = "31e996f0a15c7b280ba9f76636b3ff9e2ae58c9a"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.3" version = "0.21.4"
[[deps.LAPACK_jll]] [[deps.LAPACK_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg", "libblastrampoline_jll"] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "libblastrampoline_jll"]
git-tree-sha1 = "a539affa8228208f5a3396037165b04bff9a2ba6" git-tree-sha1 = "1b25c30fa49db281be615793e0f85282a8f22822"
uuid = "51474c39-65e3-53ba-86ba-03b1b862ec14" uuid = "51474c39-65e3-53ba-86ba-03b1b862ec14"
version = "3.10.0+1" version = "3.12.0+2"
[[deps.LazilyInitializedFields]]
git-tree-sha1 = "8f7f3cabab0fd1800699663533b6d5cb3fc0e612"
uuid = "0e77f7df-68c5-4e49-93ce-4cd80f5598bf"
version = "1.2.2"
[[deps.LazyArtifacts]] [[deps.LazyArtifacts]]
deps = ["Artifacts", "Pkg"] deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3" uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
version = "1.11.0"
[[deps.LibCURL]] [[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"] deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
version = "0.6.3" version = "0.6.4"
[[deps.LibCURL_jll]] [[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
version = "7.84.0+0" version = "8.6.0+0"
[[deps.LibGit2]] [[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"] deps = ["Base64", "LibGit2_jll", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
version = "1.11.0"
[[deps.LibGit2_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll"]
uuid = "e37daf67-58a4-590a-8e99-b0245dd2ffc5"
version = "1.7.2+0"
[[deps.LibSSH2_jll]] [[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"] deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
version = "1.10.2+0" version = "1.11.0+1"
[[deps.Libdl]] [[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
version = "1.11.0"
[[deps.LinearAlgebra]] [[deps.Libiconv_jll]]
deps = ["Libdl", "libblastrampoline_jll"] deps = ["Artifacts", "JLLWrappers", "Libdl"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" git-tree-sha1 = "f9557a255370125b405568f9767d6d195822a175"
uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531"
version = "1.17.0+0"
[[deps.Logging]] [[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
version = "1.11.0"
[[deps.MPI]] [[deps.MPI]]
deps = ["Distributed", "DocStringExtensions", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "Requires", "Serialization", "Sockets"] deps = ["Distributed", "DocStringExtensions", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "PkgVersion", "PrecompileTools", "Requires", "Serialization", "Sockets"]
git-tree-sha1 = "97d9313b6bb7ac04f5b8cfb33569cd30d0441efe" git-tree-sha1 = "892676019c58f34e38743bc989b0eca5bce5edc5"
uuid = "da04e1cc-30fd-572f-bb4f-1f8673147195" uuid = "da04e1cc-30fd-572f-bb4f-1f8673147195"
version = "0.20.0" version = "0.20.22"
[deps.MPI.extensions]
AMDGPUExt = "AMDGPU"
CUDAExt = "CUDA"
[deps.MPI.weakdeps]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
[[deps.MPICH_jll]] [[deps.MPICH_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Hwloc_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"]
git-tree-sha1 = "6d4fa43afab4611d090b11617ecea1a144b21d35" git-tree-sha1 = "7715e65c47ba3941c502bffb7f266a41a7f54423"
uuid = "7cb0a576-ebde-5e09-9194-50597f1243b4" uuid = "7cb0a576-ebde-5e09-9194-50597f1243b4"
version = "4.0.2+5" version = "4.2.3+0"
[[deps.MPIPreferences]] [[deps.MPIPreferences]]
deps = ["Libdl", "Preferences"] deps = ["Libdl", "Preferences"]
git-tree-sha1 = "9959c42b41220206eeda9004f695d913e2245658" git-tree-sha1 = "c105fe467859e7f6e9a852cb15cb4301126fac07"
uuid = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267" uuid = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267"
version = "0.1.5" version = "0.1.11"
[[deps.MPItrampoline_jll]] [[deps.MPItrampoline_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"]
git-tree-sha1 = "b3f9e42685b4ad614eca0b44bd863cd41b1c86ea" git-tree-sha1 = "70e830dab5d0775183c99fc75e4c24c614ed7142"
uuid = "f1f71cc9-e9ae-5b93-9b94-4fe0e1ad3748" uuid = "f1f71cc9-e9ae-5b93-9b94-4fe0e1ad3748"
version = "5.0.2+1" version = "5.5.1+0"
[[deps.Markdown]] [[deps.Markdown]]
deps = ["Base64"] deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
version = "1.11.0"
[[deps.MarkdownAST]] [[deps.MarkdownAST]]
deps = ["AbstractTrees", "Markdown"] deps = ["AbstractTrees", "Markdown"]
git-tree-sha1 = "1dfa364acc47225afdc57c8998c988bc107ff0d2" git-tree-sha1 = "465a70f0fc7d443a00dcdc3267a497397b8a3899"
uuid = "d0879d2d-cac2-40c8-9cee-1863dc0c7391" uuid = "d0879d2d-cac2-40c8-9cee-1863dc0c7391"
version = "0.1.0" version = "0.1.2"
[[deps.MbedTLS_jll]] [[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"] deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
version = "2.28.0+0" version = "2.28.6+0"
[[deps.MicrosoftMPI_jll]] [[deps.MicrosoftMPI_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "a16aa086d335ed7e0170c5265247db29172af2f9" git-tree-sha1 = "f12a29c4400ba812841c6ace3f4efbb6dbb3ba01"
uuid = "9237b28f-5490-5468-be7b-bb81f5f5e6cf" uuid = "9237b28f-5490-5468-be7b-bb81f5f5e6cf"
version = "10.1.3+2" version = "10.1.4+2"
[[deps.Mmap]] [[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804" uuid = "a63ad114-7e13-5084-954f-fe012c677804"
version = "1.11.0"
[[deps.MozillaCACerts_jll]] [[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159" uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
version = "2022.2.1" version = "2023.12.12"
[[deps.NetworkOptions]] [[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
@ -208,54 +271,78 @@ version = "1.2.0"
[[deps.OpenBLAS_jll]] [[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
version = "0.3.20+0" version = "0.3.27+1"
[[deps.OpenMPI_jll]] [[deps.OpenMPI_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Hwloc_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML", "Zlib_jll"]
git-tree-sha1 = "346d6b357a480300ed7854dbc70e746ac52e10fd" git-tree-sha1 = "bfce6d523861a6c562721b262c0d1aaeead2647f"
uuid = "fe0851c0-eecd-5654-98d4-656369965a5c" uuid = "fe0851c0-eecd-5654-98d4-656369965a5c"
version = "4.1.3+3" version = "5.0.5+0"
[[deps.OpenSSL_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl"]
git-tree-sha1 = "7493f61f55a6cce7325f197443aa80d32554ba10"
uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95"
version = "3.0.15+1"
[[deps.PCRE2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "efcefdf7-47ab-520b-bdef-62a2eaa19f15"
version = "10.42.0+1"
[[deps.Parsers]] [[deps.Parsers]]
deps = ["Dates"] deps = ["Dates", "PrecompileTools", "UUIDs"]
git-tree-sha1 = "3d5bf43e3e8b412656404ed9466f1dcbf7c50269" git-tree-sha1 = "8489905bcdbcfac64d1daa51ca07c0d8f0283821"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.4.0" version = "2.8.1"
[[deps.PartitionedArrays]]
deps = ["Distances", "IterativeSolvers", "LinearAlgebra", "MPI", "Printf", "SparseArrays", "SparseMatricesCSR"]
git-tree-sha1 = "94291b7ddeac39816572660383055870b41bca64"
uuid = "5a9dfac6-5c52-46f7-8278-5e2210713be9"
version = "0.2.11"
[[deps.Pkg]] [[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "Random", "SHA", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
version = "1.8.0" version = "1.11.0"
weakdeps = ["REPL"]
[deps.Pkg.extensions]
REPLExt = "REPL"
[[deps.PkgVersion]]
deps = ["Pkg"]
git-tree-sha1 = "f9501cc0430a26bc3d156ae1b5b0c1b47af4d6da"
uuid = "eebad327-c553-4316-9ea0-9fa01ccd7688"
version = "0.3.3"
[[deps.PrecompileTools]]
deps = ["Preferences"]
git-tree-sha1 = "5aa36f7049a63a1528fe8f7c3f2113413ffd4e1f"
uuid = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
version = "1.2.1"
[[deps.Preferences]] [[deps.Preferences]]
deps = ["TOML"] deps = ["TOML"]
git-tree-sha1 = "47e5f437cc0e7ef2ce8406ce1e7e24d44915f88d" git-tree-sha1 = "9306f6085165d270f7e3db02af26a400d580f5c6"
uuid = "21216c6a-2e73-6563-6e65-726566657250" uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.3.0" version = "1.4.3"
[[deps.Printf]] [[deps.Printf]]
deps = ["Unicode"] deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
version = "1.11.0"
[[deps.REPL]] [[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] deps = ["InteractiveUtils", "Markdown", "Sockets", "StyledStrings", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
version = "1.11.0"
[[deps.Random]] [[deps.Random]]
deps = ["SHA", "Serialization"] deps = ["SHA"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
version = "1.11.0"
[[deps.RecipesBase]] [[deps.RegistryInstances]]
deps = ["SnoopPrecompile"] deps = ["LazilyInitializedFields", "Pkg", "TOML", "Tar"]
git-tree-sha1 = "612a4d76ad98e9722c8ba387614539155a59e30c" git-tree-sha1 = "ffd19052caf598b8653b99404058fce14828be51"
uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" uuid = "2792f1a3-b283-48e8-9a74-f99dce5104f3"
version = "1.3.0" version = "0.1.0"
[[deps.Requires]] [[deps.Requires]]
deps = ["UUIDs"] deps = ["UUIDs"]
@ -269,76 +356,61 @@ version = "0.7.0"
[[deps.Serialization]] [[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
version = "1.11.0"
[[deps.SnoopPrecompile]]
git-tree-sha1 = "f604441450a3c0569830946e5b33b78c928e1a85"
uuid = "66db9d55-30c0-4569-8b51-7e840670fc0c"
version = "1.0.1"
[[deps.Sockets]] [[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc" uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
version = "1.11.0"
[[deps.SparseArrays]] [[deps.StyledStrings]]
deps = ["LinearAlgebra", "Random"] uuid = "f489334b-da3d-4c2e-b8f0-e476e12c162b"
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" version = "1.11.0"
[[deps.SparseMatricesCSR]]
deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"]
git-tree-sha1 = "4870b3e7db7063927b163fb981bd579410b68b2d"
uuid = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1"
version = "0.6.6"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[deps.StatsAPI]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "f9af7f195fb13589dd2e2d57fdb401717d2eb1f6"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.5.0"
[[deps.SuiteSparse]]
deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"]
uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9"
[[deps.TOML]] [[deps.TOML]]
deps = ["Dates"] deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
version = "1.0.0" version = "1.0.3"
[[deps.Tar]] [[deps.Tar]]
deps = ["ArgTools", "SHA"] deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
version = "1.10.1" version = "1.10.0"
[[deps.Test]] [[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
version = "1.11.0"
[[deps.TranscodingStreams]]
git-tree-sha1 = "0c45878dcfdcfa8480052b6ab162cdd138781742"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
version = "0.11.3"
[[deps.UUIDs]] [[deps.UUIDs]]
deps = ["Random", "SHA"] deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
version = "1.11.0"
[[deps.Unicode]] [[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
version = "1.11.0"
[[deps.Zlib_jll]] [[deps.Zlib_jll]]
deps = ["Libdl"] deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a" uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.12+3" version = "1.2.13+1"
[[deps.libblastrampoline_jll]] [[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl", "OpenBLAS_jll"] deps = ["Artifacts", "Libdl"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
version = "5.1.1+0" version = "5.11.0+0"
[[deps.nghttp2_jll]] [[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"] deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
version = "1.48.0+0" version = "1.59.0+0"
[[deps.p7zip_jll]] [[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"] deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
version = "17.4.0+0" version = "17.4.0+2"

1
docs/Project.toml

@ -1,3 +1,4 @@
[deps] [deps]
Changelog = "5217a498-cd5d-4ec6-b8c2-9b85a09b6e3e"
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
HYPRE = "b5ffcf37-a2bd-41ab-a3da-4bd9bc8ad771" HYPRE = "b5ffcf37-a2bd-41ab-a3da-4bd9bc8ad771"

24
docs/liveserver.jl

@ -0,0 +1,24 @@
#!/usr/bin/env julia
# Root of the repository
const repo_root = dirname(@__DIR__)
# Make sure docs environment is active and instantiated
import Pkg
Pkg.activate(@__DIR__)
Pkg.instantiate()
# Communicate with docs/make.jl that we are running in live mode
push!(ARGS, "liveserver")
# Run LiveServer.servedocs(...)
import LiveServer
LiveServer.servedocs(;
# Documentation root where make.jl and src/ are located
foldername = joinpath(repo_root, "docs"),
# Extra source folder to watch for changes
include_dirs = [
# Watch the src folder so docstrings can be Revise'd
joinpath(repo_root, "src"),
],
)

10
docs/make.jl

@ -7,6 +7,15 @@ end
using Documenter using Documenter
using HYPRE using HYPRE
using Changelog
# Changelog
Changelog.generate(
Changelog.Documenter(),
joinpath(@__DIR__, "..", "CHANGELOG.md"),
joinpath(@__DIR__, "src", "changelog.md");
repo = "Ferrite-FEM/Ferrite.jl",
)
makedocs( makedocs(
sitename = "HYPRE.jl", sitename = "HYPRE.jl",
@ -16,6 +25,7 @@ makedocs(
modules = [HYPRE], modules = [HYPRE],
pages = Any[ pages = Any[
"Home" => "index.md", "Home" => "index.md",
hide("Changelog" => "changelog.md"),
"matrix-vector.md", "matrix-vector.md",
"solvers-preconditioners.md", "solvers-preconditioners.md",
"libhypre.md", "libhypre.md",

13
docs/src/api.md

@ -6,6 +6,14 @@
HYPRE.Init HYPRE.Init
``` ```
## Matrix/vector creation
```@docs
HYPRE.start_assemble!
HYPRE.assemble!
HYPRE.finish_assemble!
```
## Solvers and preconditioners ## Solvers and preconditioners
```@docs ```@docs
@ -24,3 +32,8 @@ HYPRE.ILU
HYPRE.PCG HYPRE.PCG
HYPRE.ParaSails HYPRE.ParaSails
``` ```
```@docs
HYPRE.GetNumIterations
HYPRE.GetFinalRelativeResidualNorm
```

16
docs/src/libhypre.md

@ -15,15 +15,13 @@ directly.
Functions from the `LibHYPRE` submodule can be used together with the high level interface. Functions from the `LibHYPRE` submodule can be used together with the high level interface.
This is useful when you need some functionality from the library which isn't exposed in the This is useful when you need some functionality from the library which isn't exposed in the
high level interface. Many functions require passing a reference to a matrix/vector or a high level interface. Many functions require passing a reference to a matrix/vector or a
solver. These can be obtained as follows: solver. HYPRE.jl defines the appropriate conversion methods used by `ccall` such that
- `A::HYPREMatrix` can be passed to `HYPRE_*` functions with `HYPRE_IJMatrix` or
| C type signature | Argument to pass | `HYPRE_ParCSRMatrix` in the signature
|:---------------------|:-------------------------------------| - `b::HYPREVector` can be passed to `HYPRE_*` functions with `HYPRE_IJVector` or
| `HYPRE_IJMatrix` | `A.ijmatrix` where `A::HYPREMatrix` | `HYPRE_ParVector` in the signature
| `HYPRE_ParCSRMatrix` | `A.parmatrix` where `A::HYPREMatrix` | - `s::HYPRESolver` can be passed to `HYPRE_*` functions with `HYPRE_Solver` in the
| `HYPRE_IJVector` | `b.ijvector` where `b::HYPREVector` | signature
| `HYPRE_ParVector` | `b.parvector` where `b::HYPREVector` |
| `HYPRE_Solver` | `s.solver` where `s::HYPRESolver` |
[^1]: Bindings are generated using [^1]: Bindings are generated using
[Clang.jl](https://github.com/JuliaInterop/Clang.jl), see [Clang.jl](https://github.com/JuliaInterop/Clang.jl), see

61
docs/src/matrix-vector.md

@ -5,15 +5,59 @@ datastructures. Specifically it uses the [IJ System
Interface](https://hypre.readthedocs.io/en/latest/api-int-ij.html) which can be used for Interface](https://hypre.readthedocs.io/en/latest/api-int-ij.html) which can be used for
general sparse matrices. general sparse matrices.
HYPRE.jl defines conversion methods from standard Julia datastructures to `HYPREMatrix` and `HYPREMatrix` and `HYPREVector` can be constructed either by assembling directly, or by
`HYPREVector`, respectively. See the following sections for details: first assembling into a Julia datastructure and the converting it. These various methods are
outlined in the following sections:
```@contents ```@contents
Pages = ["hypre-matrix-vector.md"] Pages = ["matrix-vector.md"]
Depth = 2:2 Depth = 2:2
``` ```
## PartitionedArrays.jl (multi-process)
## Direct assembly (multi-/single-process)
Creating `HYPREMatrix` and/or `HYPREVector` directly is possible by first creating an
assembler which is used to add all individual contributions to the matrix/vector. The
required steps are:
1. Create a new matrix and/or vector using the constructor.
2. Create an assembler and initialize the assembling procedure using
[`HYPRE.start_assemble!`](@ref).
3. Assemble all non-zero contributions (e.g. element matrix/vector in a finite element
simulation) using [`HYPRE.assemble!`](@ref).
4. Finalize the assembly using [`HYPRE.finish_assemble!`](@ref).
After these steps the matrix and vector are ready to pass to the solver. In case of multiple
consecutive solves with the same sparsity pattern (e.g. multiple Newton steps, multiple time
steps, ...) it is possible to reuse the same matrix by simply skipping the first step above.
**Example pseudocode**
```julia
# MPI communicator
comm = MPI.COMM_WORLD # MPI.COMM_SELF for single-process setups
# Create empty matrix and vector -- this process owns rows ilower to iupper
A = HYPREMatrix(comm, ilower, iupper)
b = HYPREVector(comm, ilower, iupper)
# Create assembler
assembler = HYPRE.start_assemble!(A, b)
# Assemble contributions from all elements owned by this process
for element in owned_elements
Ae, be = compute_element_contribution(...)
global_indices = get_global_indices(...)
HYPRE.assemble!(assembler, global_indices, Ae, be)
end
# Finalize the assembly
A, b = HYPRE.finish_assemble!(assembler)
```
## Create from PartitionedArrays.jl (multi-process)
HYPRE.jl integrates seemlessly with `PSparseMatrix` and `PVector` from the HYPRE.jl integrates seemlessly with `PSparseMatrix` and `PVector` from the
[PartitionedArrays.jl](https://github.com/fverdugo/PartitionedArrays.jl) package. These can [PartitionedArrays.jl](https://github.com/fverdugo/PartitionedArrays.jl) package. These can
@ -71,7 +115,7 @@ copy!(x, x_h)
``` ```
## `SparseMatrixCSC` / `SparseMatrixCSR` (single-process) ## Create from `SparseMatrixCSC` / `SparseMatrixCSR` (single-process)
HYPRE.jl also support working directly with `SparseMatrixCSC` (from the HYPRE.jl also support working directly with `SparseMatrixCSC` (from the
[SparseArrays.jl](https://github.com/JuliaSparse/SparseArrays.jl) standard library) and [SparseArrays.jl](https://github.com/JuliaSparse/SparseArrays.jl) standard library) and
@ -100,10 +144,3 @@ x = solve(solver, A, b)
x = zeros(length(b)) x = zeros(length(b))
solve!(solver, x, A, b) solve!(solver, x, A, b)
``` ```
## `SparseMatrixCSC` / `SparseMatrixCSR` (multi-process)
!!! warning
This interface isn't finalized yet and is therefore not documented since it
is subject to change.

33
examples/ex5.jl

@ -85,7 +85,9 @@ function main(argc, argv)
end end
# Preliminaries: want at least one processor per row # Preliminaries: want at least one processor per row
if n * n < num_procs; n = trunc(Int, sqrt(n)) + 1; end if n * n < num_procs
n = trunc(Int, sqrt(n)) + 1
end
N = n * n # global number of rows N = n * n # global number of rows
h = 1.0 / (n + 1) # mesh size h = 1.0 / (n + 1) # mesh size
h2 = h * h h2 = h * h
@ -257,8 +259,7 @@ function main(argc, argv)
num_iterations = Ref{Cint}() num_iterations = Ref{Cint}()
final_res_norm = Ref{Cdouble}() final_res_norm = Ref{Cdouble}()
# AMG if solver_id == 0 # AMG
if solver_id == 0
# Create solver # Create solver
HYPRE_BoomerAMGCreate(solver_ref) HYPRE_BoomerAMGCreate(solver_ref)
solver = solver_ref[] solver = solver_ref[]
@ -270,7 +271,7 @@ function main(argc, argv)
HYPRE_BoomerAMGSetRelaxOrder(solver, 1) # uses C/F relaxation HYPRE_BoomerAMGSetRelaxOrder(solver, 1) # uses C/F relaxation
HYPRE_BoomerAMGSetNumSweeps(solver, 1) # Sweeeps on each level HYPRE_BoomerAMGSetNumSweeps(solver, 1) # Sweeeps on each level
HYPRE_BoomerAMGSetMaxLevels(solver, 20) # maximum number of levels HYPRE_BoomerAMGSetMaxLevels(solver, 20) # maximum number of levels
HYPRE_BoomerAMGSetTol(solver, 1e-7) # conv. tolerance HYPRE_BoomerAMGSetTol(solver, 1.0e-7) # conv. tolerance
# Now setup and solve! # Now setup and solve!
HYPRE_BoomerAMGSetup(solver, parcsr_A, par_b, par_x) HYPRE_BoomerAMGSetup(solver, parcsr_A, par_b, par_x)
@ -289,15 +290,14 @@ function main(argc, argv)
# Destroy solver # Destroy solver
HYPRE_BoomerAMGDestroy(solver) HYPRE_BoomerAMGDestroy(solver)
# PCG elseif solver_id == 50 # PCG
elseif solver_id == 50
# Create solver # Create solver
HYPRE_ParCSRPCGCreate(MPI_COMM_WORLD, solver_ref) HYPRE_ParCSRPCGCreate(MPI_COMM_WORLD, solver_ref)
solver = solver_ref[] solver = solver_ref[]
# Set some parameters (See Reference Manual for more parameters) # Set some parameters (See Reference Manual for more parameters)
HYPRE_PCGSetMaxIter(solver, 1000) # max iterations HYPRE_PCGSetMaxIter(solver, 1000) # max iterations
HYPRE_PCGSetTol(solver, 1e-7) # conv. tolerance HYPRE_PCGSetTol(solver, 1.0e-7) # conv. tolerance
HYPRE_PCGSetTwoNorm(solver, 1) # use the two norm as the stopping criteria HYPRE_PCGSetTwoNorm(solver, 1) # use the two norm as the stopping criteria
HYPRE_PCGSetPrintLevel(solver, 2) # prints out the iteration info HYPRE_PCGSetPrintLevel(solver, 2) # prints out the iteration info
HYPRE_PCGSetLogging(solver, 1) # needed to get run info later HYPRE_PCGSetLogging(solver, 1) # needed to get run info later
@ -319,15 +319,14 @@ function main(argc, argv)
# Destroy solver # Destroy solver
HYPRE_ParCSRPCGDestroy(solver) HYPRE_ParCSRPCGDestroy(solver)
# PCG with AMG preconditioner elseif solver_id == 1 # PCG with AMG preconditioner
elseif solver_id == 1
# Create solver # Create solver
HYPRE_ParCSRPCGCreate(MPI_COMM_WORLD, solver_ref) HYPRE_ParCSRPCGCreate(MPI_COMM_WORLD, solver_ref)
solver = solver_ref[] solver = solver_ref[]
# Set some parameters (See Reference Manual for more parameters) # Set some parameters (See Reference Manual for more parameters)
HYPRE_PCGSetMaxIter(solver, 1000) # max iterations HYPRE_PCGSetMaxIter(solver, 1000) # max iterations
HYPRE_PCGSetTol(solver, 1e-7) # conv. tolerance HYPRE_PCGSetTol(solver, 1.0e-7) # conv. tolerance
HYPRE_PCGSetTwoNorm(solver, 1) # use the two norm as the stopping criteria HYPRE_PCGSetTwoNorm(solver, 1) # use the two norm as the stopping criteria
HYPRE_PCGSetPrintLevel(solver, 2) # print solve info HYPRE_PCGSetPrintLevel(solver, 2) # print solve info
HYPRE_PCGSetLogging(solver, 1) # needed to get run info later HYPRE_PCGSetLogging(solver, 1) # needed to get run info later
@ -364,15 +363,14 @@ function main(argc, argv)
HYPRE_ParCSRPCGDestroy(solver) HYPRE_ParCSRPCGDestroy(solver)
HYPRE_BoomerAMGDestroy(precond) HYPRE_BoomerAMGDestroy(precond)
# PCG with Parasails Preconditioner elseif solver_id == 8 # PCG with Parasails Preconditioner
elseif solver_id == 8
# Create solver # Create solver
HYPRE_ParCSRPCGCreate(MPI_COMM_WORLD, solver_ref) HYPRE_ParCSRPCGCreate(MPI_COMM_WORLD, solver_ref)
solver = solver_ref[] solver = solver_ref[]
# Set some parameters (See Reference Manual for more parameters) # Set some parameters (See Reference Manual for more parameters)
HYPRE_PCGSetMaxIter(solver, 1000) # max iterations HYPRE_PCGSetMaxIter(solver, 1000) # max iterations
HYPRE_PCGSetTol(solver, 1e-7) # conv. tolerance HYPRE_PCGSetTol(solver, 1.0e-7) # conv. tolerance
HYPRE_PCGSetTwoNorm(solver, 1) # use the two norm as the stopping criteria HYPRE_PCGSetTwoNorm(solver, 1) # use the two norm as the stopping criteria
HYPRE_PCGSetPrintLevel(solver, 2) # print solve info HYPRE_PCGSetPrintLevel(solver, 2) # print solve info
HYPRE_PCGSetLogging(solver, 1) # needed to get run info later HYPRE_PCGSetLogging(solver, 1) # needed to get run info later
@ -412,8 +410,7 @@ function main(argc, argv)
HYPRE_ParCSRPCGDestroy(solver) HYPRE_ParCSRPCGDestroy(solver)
HYPRE_ParaSailsDestroy(precond) HYPRE_ParaSailsDestroy(precond)
# Flexible GMRES with AMG Preconditioner elseif solver_id == 61 # Flexible GMRES with AMG Preconditioner
elseif solver_id == 61
# Create solver # Create solver
HYPRE_ParCSRFlexGMRESCreate(MPI_COMM_WORLD, solver_ref) HYPRE_ParCSRFlexGMRESCreate(MPI_COMM_WORLD, solver_ref)
@ -422,7 +419,7 @@ function main(argc, argv)
# Set some parameters (See Reference Manual for more parameters) # Set some parameters (See Reference Manual for more parameters)
HYPRE_FlexGMRESSetKDim(solver, 30) # restart HYPRE_FlexGMRESSetKDim(solver, 30) # restart
HYPRE_FlexGMRESSetMaxIter(solver, 1000) # max iterations HYPRE_FlexGMRESSetMaxIter(solver, 1000) # max iterations
HYPRE_FlexGMRESSetTol(solver, 1e-7) # conv. tolerance HYPRE_FlexGMRESSetTol(solver, 1.0e-7) # conv. tolerance
HYPRE_FlexGMRESSetPrintLevel(solver, 2) # print solve info HYPRE_FlexGMRESSetPrintLevel(solver, 2) # print solve info
HYPRE_FlexGMRESSetLogging(solver, 1) # needed to get run info later HYPRE_FlexGMRESSetLogging(solver, 1) # needed to get run info later
@ -459,7 +456,9 @@ function main(argc, argv)
HYPRE_BoomerAMGDestroy(precond) HYPRE_BoomerAMGDestroy(precond)
else else
if myid == 0; println("Invalid solver id specified."); end if myid == 0
println("Invalid solver id specified.")
end
end end
# Clean up # Clean up

300
ext/HYPREPartitionedArrays.jl

@ -0,0 +1,300 @@
module HYPREPartitionedArrays
using HYPRE.LibHYPRE: @check, HYPRE_BigInt, HYPRE_Complex, HYPRE_IJMatrixSetValues,
HYPRE_IJVectorGetValues, HYPRE_IJVectorInitialize, HYPRE_IJVectorSetValues, HYPRE_Int
using HYPRE: HYPRE, HYPREMatrix, HYPRESolver, HYPREVector, Internals
using MPI: MPI
using PartitionedArrays: PartitionedArrays, AbstractLocalIndices, MPIArray, PSparseMatrix,
PVector, SplitMatrix, ghost_to_global, local_values, own_to_global, own_values,
partition
using SparseArrays: SparseArrays, SparseMatrixCSC, nonzeros, nzrange, rowvals
using SparseMatricesCSR: SparseMatrixCSR, colvals
##################################################
# PartitionedArrays.PSparseMatrix -> HYPREMatrix #
##################################################
function Internals.to_hypre_data(
A::SplitMatrix{<:SparseMatrixCSC}, r::AbstractLocalIndices, c::AbstractLocalIndices
)
# Own/ghost to global index mappings
own_to_global_row = own_to_global(r)
own_to_global_col = own_to_global(c)
ghost_to_global_col = ghost_to_global(c)
# HYPRE requires contiguous row indices
ilower = own_to_global_row[1]
iupper = own_to_global_row[end]
@assert iupper - ilower + 1 == length(own_to_global_row)
# Extract sparse matrices from the SplitMatrix. We are only interested in the owned
# rows, so only consider own-own and own-ghost blocks.
Aoo = A.blocks.own_own::SparseMatrixCSC
Aoo_rows = rowvals(Aoo)
Aoo_vals = nonzeros(Aoo)
Aog = A.blocks.own_ghost::SparseMatrixCSC
Aog_rows = rowvals(Aog)
Aog_vals = nonzeros(Aog)
@assert size(Aoo, 1) == size(Aog, 1) == length(own_to_global_row)
# Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(length(own_to_global_row)) # Total number of rows
ncols = zeros(HYPRE_Int, nrows) # Number of colums for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
# cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
# values = Vector{HYPRE_Complex}(undef, nnz) # The values
# First pass to count nnz per row (note that global column indices and column
# permutation doesn't matter for this pass)
@inbounds for own_col in 1:size(Aoo, 2)
for k in nzrange(Aoo, own_col)
own_row = Aoo_rows[k]
ncols[own_row] += 1
end
end
@inbounds for ghost_col in 1:size(Aog, 2)
for k in nzrange(Aog, ghost_col)
own_row = Aog_rows[k]
ncols[own_row] += 1
end
end
# Initialize remaining buffers now that nnz is known
nnz = sum(ncols)
cols = Vector{HYPRE_BigInt}(undef, nnz)
values = Vector{HYPRE_Complex}(undef, nnz)
# Keep track of the last index used for every row
lastinds = zeros(Int, nrows)
cumsum!((@view lastinds[2:end]), (@view ncols[1:(end - 1)]))
# Second pass to populate the output. Here we need to map column
# indices from own/ghost to global
@inbounds for own_col in 1:size(Aoo, 2)
for k in nzrange(Aoo, own_col)
own_row = Aoo_rows[k]
i = lastinds[own_row] += 1
values[i] = Aoo_vals[k]
cols[i] = own_to_global_col[own_col]
end
end
@inbounds for ghost_col in 1:size(Aog, 2)
for k in nzrange(Aog, ghost_col)
own_row = Aog_rows[k]
i = lastinds[own_row] += 1
values[i] = Aog_vals[k]
cols[i] = ghost_to_global_col[ghost_col]
end
end
# Sanity checks and return
@assert nrows == length(ncols) == length(rows)
return nrows, ncols, rows, cols, values
end
function Internals.to_hypre_data(
A::SplitMatrix{<:SparseMatrixCSR}, r::AbstractLocalIndices, c::AbstractLocalIndices
)
# Own/ghost to global index mappings
own_to_global_row = own_to_global(r)
own_to_global_col = own_to_global(c)
ghost_to_global_col = ghost_to_global(c)
# HYPRE requires contiguous row indices
ilower = own_to_global_row[1]
iupper = own_to_global_row[end]
@assert iupper - ilower + 1 == length(own_to_global_row)
# Extract sparse matrices from the SplitMatrix. We are only interested in the owned
# rows, so only consider own-own and own-ghost blocks.
Aoo = A.blocks.own_own::SparseMatrixCSR
Aoo_cols = colvals(Aoo)
Aoo_vals = nonzeros(Aoo)
Aog = A.blocks.own_ghost::SparseMatrixCSR
Aog_cols = colvals(Aog)
Aog_vals = nonzeros(Aog)
@assert size(Aoo, 1) == size(Aog, 1) == length(own_to_global_row)
# Initialize the data buffers HYPRE wants
nnz = SparseArrays.nnz(Aoo) + SparseArrays.nnz(Aog)
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = zeros(HYPRE_Int, nrows) # Number of columns for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
values = Vector{HYPRE_Complex}(undef, nnz) # The values
# For CSR we only need a single pass to over the owned rows to collect everything
i = 0
for own_row in 1:size(Aoo, 1)
nzro = nzrange(Aoo, own_row)
nzrg = nzrange(Aog, own_row)
ncols[own_row] = length(nzro) + length(nzrg)
for k in nzro
i += 1
own_col = Aoo_cols[k]
cols[i] = own_to_global_col[own_col]
values[i] = Aoo_vals[k]
end
for k in nzrg
i += 1
ghost_col = Aog_cols[k]
cols[i] = ghost_to_global_col[ghost_col]
values[i] = Aog_vals[k]
end
end
# Sanity checks and return
@assert nnz == i
@assert nrows == length(ncols) == length(rows)
return nrows, ncols, rows, cols, values
end
function Internals.get_comm(A::Union{PSparseMatrix{<:Any, <:M}, PVector{<:Any, <:M}}) where {M <: MPIArray}
return partition(A).comm
end
Internals.get_comm(_::Union{PSparseMatrix, PVector}) = MPI.COMM_SELF
function Internals.get_proc_rows(A::Union{PSparseMatrix, PVector})
ilower::HYPRE_BigInt = typemax(HYPRE_BigInt)
iupper::HYPRE_BigInt = typemin(HYPRE_BigInt)
map(partition(axes(A, 1))) do a
# This is a map over the local process' owned indices. For MPI it will
# be a single value but for DebugArray / Array it will have multiple
# values.
o_to_g = own_to_global(a)
ilower_part = o_to_g[1]
iupper_part = o_to_g[end]
ilower = min(ilower, convert(HYPRE_BigInt, ilower_part))
iupper = max(iupper, convert(HYPRE_BigInt, iupper_part))
end
return ilower, iupper
end
function HYPRE.HYPREMatrix(B::PSparseMatrix)
# Use the same communicator as the matrix
comm = Internals.get_comm(B)
# Fetch rows owned by this process
ilower, iupper = Internals.get_proc_rows(B)
# Create the IJ matrix
A = HYPREMatrix(comm, ilower, iupper)
# Set all the values
map(local_values(B), partition(axes(B, 1)), partition(axes(B, 2))) do Bv, Br, Bc
nrows, ncols, rows, cols, values = Internals.to_hypre_data(Bv, Br, Bc)
@check HYPRE_IJMatrixSetValues(A, nrows, ncols, rows, cols, values)
return nothing
end
# Finalize
Internals.assemble_matrix(A)
return A
end
############################################
# PartitionedArrays.PVector -> HYPREVector #
############################################
function HYPRE.HYPREVector(v::PVector)
# Use the same communicator as the matrix
comm = Internals.get_comm(v)
# Fetch rows owned by this process
ilower, iupper = Internals.get_proc_rows(v)
# Create the IJ vector
b = HYPREVector(comm, ilower, iupper)
# Set all the values
map(own_values(v), partition(axes(v, 1))) do vo, vr
o_to_g = own_to_global(vr)
ilower_part = o_to_g[1]
iupper_part = o_to_g[end]
# Option 1: Set all values
nvalues = HYPRE_Int(iupper_part - ilower_part + 1)
indices = collect(HYPRE_BigInt, ilower_part:iupper_part)
# TODO: Could probably just pass the full vector even if it is too long
# values = convert(Vector{HYPRE_Complex}, vv)
values = collect(HYPRE_Complex, vo)
# # Option 2: Set only non-zeros
# indices = HYPRE_BigInt[]
# values = HYPRE_Complex[]
# for (i, vi) in zip(ilower_part:iupper_part, vo)
# if !iszero(vi)
# push!(indices, i)
# push!(values, vi)
# end
# end
# nvalues = length(indices)
@check HYPRE_IJVectorSetValues(b, nvalues, indices, values)
return nothing
end
# Finalize
Internals.assemble_vector(b)
return b
end
function copy_check(dst::HYPREVector, src::PVector)
il_dst, iu_dst = Internals.get_proc_rows(dst)
il_src, iu_src = Internals.get_proc_rows(src)
if il_dst != il_src && iu_dst != iu_src
# TODO: Why require this?
msg = "row owner mismatch between dst ($(il_dst:iu_dst)) and src ($(il_src:iu_src))"
throw(ArgumentError(msg))
end
return
end
# TODO: Other eltypes could be support by using a intermediate buffer
function Base.copy!(dst::PVector{<:AbstractVector{HYPRE_Complex}}, src::HYPREVector)
copy_check(src, dst)
map(own_values(dst), partition(axes(dst, 1))) do ov, vr
o_to_g = own_to_global(vr)
il_src_part = o_to_g[1]
iu_src_part = o_to_g[end]
nvalues = HYPRE_Int(iu_src_part - il_src_part + 1)
indices = collect(HYPRE_BigInt, il_src_part:iu_src_part)
values = ov
@check HYPRE_IJVectorGetValues(src, nvalues, indices, values)
end
return dst
end
function Base.copy!(dst::HYPREVector, src::PVector{<:AbstractVector{HYPRE_Complex}})
copy_check(dst, src)
# Re-initialize the vector
@check HYPRE_IJVectorInitialize(dst)
map(own_values(src), partition(axes(src, 1))) do ov, vr
o_to_g = own_to_global(vr)
ilower_src_part = o_to_g[1]
iupper_src_part = o_to_g[end]
nvalues = HYPRE_Int(iupper_src_part - ilower_src_part + 1)
indices = collect(HYPRE_BigInt, ilower_src_part:iupper_src_part)
values = ov
@check HYPRE_IJVectorSetValues(dst, nvalues, indices, values)
end
# TODO: It shouldn't be necessary to assemble here since we only set owned rows (?)
# @check HYPRE_IJVectorAssemble(dst)
# TODO: Necessary to recreate the ParVector? Running some examples it seems like it is
# not needed.
return dst
end
######################################
# PartitionedArrays solver interface #
######################################
# TODO: Would it be useful with a method that copied the solution to b instead?
function HYPRE.solve(solver::HYPRESolver, A::PSparseMatrix, b::PVector)
hypre_x = HYPRE.solve(solver, HYPREMatrix(A), HYPREVector(b))
x = copy!(similar(b, HYPRE_Complex), hypre_x)
return x
end
function HYPRE.solve!(solver::HYPRESolver, x::PVector, A::PSparseMatrix, b::PVector)
hypre_x = HYPREVector(x)
HYPRE.solve!(solver, hypre_x, HYPREMatrix(A), HYPREVector(b))
copy!(x, hypre_x)
return x
end
end # module HYPREPartitionedArrays

86
ext/HYPRESparseArrays.jl

@ -0,0 +1,86 @@
module HYPRESparseArrays
using HYPRE.LibHYPRE: @check, HYPRE_BigInt, HYPRE_Complex, HYPRE_Int
using HYPRE:
HYPRE, HYPREMatrix, HYPRESolver, HYPREVector, HYPRE_IJMatrixSetValues, Internals
using MPI: MPI
using SparseArrays: SparseArrays, SparseMatrixCSC, nonzeros, nzrange, rowvals
##################################
# SparseMatrixCSC -> HYPREMatrix #
##################################
function Internals.to_hypre_data(A::SparseMatrixCSC, ilower, iupper)
Internals.check_n_rows(A, ilower, iupper)
nnz = SparseArrays.nnz(A)
A_rows = rowvals(A)
A_vals = nonzeros(A)
# Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = zeros(HYPRE_Int, nrows) # Number of colums for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
values = Vector{HYPRE_Complex}(undef, nnz) # The values
# First pass to count nnz per row
@inbounds for j in 1:size(A, 2)
for i in nzrange(A, j)
row = A_rows[i]
ncols[row] += 1
end
end
# Keep track of the last index used for every row
lastinds = zeros(Int, nrows)
cumsum!((@view lastinds[2:end]), (@view ncols[1:(end - 1)]))
# Second pass to populate the output
@inbounds for j in 1:size(A, 2)
for i in nzrange(A, j)
row = A_rows[i]
k = lastinds[row] += 1
val = A_vals[i]
cols[k] = j
values[k] = val
end
end
@assert nrows == length(ncols) == length(rows)
return nrows, ncols, rows, cols, values
end
# Note: keep in sync with the SparseMatrixCSR method
function HYPRE.HYPREMatrix(comm::MPI.Comm, B::SparseMatrixCSC, ilower, iupper)
A = HYPREMatrix(comm, ilower, iupper)
nrows, ncols, rows, cols, values = Internals.to_hypre_data(B, ilower, iupper)
@check HYPRE_IJMatrixSetValues(A, nrows, ncols, rows, cols, values)
Internals.assemble_matrix(A)
return A
end
# Note: keep in sync with the SparseMatrixCSC method
function HYPRE.HYPREMatrix(B::SparseMatrixCSC, ilower = 1, iupper = size(B, 1))
return HYPREMatrix(MPI.COMM_SELF, B, ilower, iupper)
end
####################################
# SparseMatrixCSC solver interface #
####################################
# Note: keep in sync with the SparseMatrixCSR method
function HYPRE.solve(solver::HYPRESolver, A::SparseMatrixCSC, b::Vector)
hypre_x = HYPRE.solve(solver, HYPREMatrix(A), HYPREVector(b))
x = copy!(similar(b, HYPRE_Complex), hypre_x)
return x
end
# Note: keep in sync with the SparseMatrixCSR method
function HYPRE.solve!(solver::HYPRESolver, x::Vector, A::SparseMatrixCSC, b::Vector)
hypre_x = HYPREVector(x)
HYPRE.solve!(solver, hypre_x, HYPREMatrix(A), HYPREVector(b))
copy!(x, hypre_x)
return x
end
end # module HYPRESparseMatricesCSR

80
ext/HYPRESparseMatricesCSR.jl

@ -0,0 +1,80 @@
module HYPRESparseMatricesCSR
using HYPRE.LibHYPRE: @check, HYPRE_BigInt, HYPRE_Complex, HYPRE_Int
using HYPRE: HYPRE, HYPREMatrix, HYPRESolver, HYPREVector, HYPRE_IJMatrixSetValues, Internals
using MPI: MPI
using SparseArrays: SparseArrays, nonzeros, nzrange
using SparseMatricesCSR: SparseMatrixCSR, colvals
##################################
# SparseMatrixCSR -> HYPREMatrix #
##################################
function Internals.to_hypre_data(A::SparseMatrixCSR, ilower, iupper)
Internals.check_n_rows(A, ilower, iupper)
nnz = SparseArrays.nnz(A)
A_cols = colvals(A)
A_vals = nonzeros(A)
# Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = Vector{HYPRE_Int}(undef, nrows) # Number of colums for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
values = Vector{HYPRE_Complex}(undef, nnz) # The values
# Loop over the rows and collect all values
k = 0
@inbounds for i in 1:size(A, 1)
nzr = nzrange(A, i)
ncols[i] = length(nzr)
for j in nzr
k += 1
col = A_cols[j]
val = A_vals[j]
cols[k] = col
values[k] = val
end
end
@assert nnz == k
@assert nrows == length(ncols) == length(rows)
return nrows, ncols, rows, cols, values
end
# Note: keep in sync with the SparseMatrixCSC method
function HYPRE.HYPREMatrix(comm::MPI.Comm, B::SparseMatrixCSR, ilower, iupper)
A = HYPREMatrix(comm, ilower, iupper)
nrows, ncols, rows, cols, values = Internals.to_hypre_data(B, ilower, iupper)
@check HYPRE_IJMatrixSetValues(A, nrows, ncols, rows, cols, values)
Internals.assemble_matrix(A)
return A
end
# Note: keep in sync with the SparseMatrixCSC method
function HYPRE.HYPREMatrix(B::SparseMatrixCSR, ilower = 1, iupper = size(B, 1))
return HYPREMatrix(MPI.COMM_SELF, B, ilower, iupper)
end
####################################
# SparseMatrixCSR solver interface #
####################################
# Note: keep in sync with the SparseMatrixCSC method
function HYPRE.solve(solver::HYPRESolver, A::SparseMatrixCSR, b::Vector)
hypre_x = HYPRE.solve(solver, HYPREMatrix(A), HYPREVector(b))
x = copy!(similar(b, HYPRE_Complex), hypre_x)
return x
end
# Note: keep in sync with the SparseMatrixCSC method
function HYPRE.solve!(solver::HYPRESolver, x::Vector, A::SparseMatrixCSR, b::Vector)
hypre_x = HYPREVector(x)
HYPRE.solve!(solver, hypre_x, HYPREMatrix(A), HYPREVector(b))
copy!(x, hypre_x)
return x
end
end # module HYPRESparseMatricesCSR

16
gen/Makefile

@ -1,2 +1,14 @@
default: MAKEDIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
julia --project generator.jl LIBHYPRE:=$(shell dirname $(MAKEDIR))/lib/LibHYPRE.jl
generate: $(LIBHYPRE)
clean:
rm -f $(LIBHYPRE)
.PHONY: generate clean
$(LIBHYPRE): Project.toml Manifest.toml $(MAKEDIR)/generator.toml $(MAKEDIR)/generator.jl
julia --project generator.jl && \
sed -i -e '1s/^/local libHYPRE # Silence of the Langs(erver)\n\n/' -e 's/using HYPRE_jll/using HYPRE_jll: HYPRE_jll, libHYPRE/' -e 's/using CEnum/using CEnum: @cenum/' $(LIBHYPRE) && \
julia-1.11 --project=@runic -e 'using Runic; exit(Runic.main(ARGS))' -- -i $(LIBHYPRE)

140
gen/Manifest.toml

@ -1,44 +1,47 @@
# This file is machine-generated - editing it directly is not advised # This file is machine-generated - editing it directly is not advised
julia_version = "1.8.2" julia_version = "1.11.2"
manifest_format = "2.0" manifest_format = "2.0"
project_hash = "cc39013dba1e9068883c1b156d3b25864ebc62f8" project_hash = "cc39013dba1e9068883c1b156d3b25864ebc62f8"
[[deps.ArgTools]] [[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1" version = "1.1.2"
[[deps.Artifacts]] [[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
version = "1.11.0"
[[deps.Base64]] [[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
version = "1.11.0"
[[deps.CEnum]] [[deps.CEnum]]
git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90" git-tree-sha1 = "389ad5c84de1ae7cf0e28e381131c98ea87d54fc"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82" uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.2" version = "0.5.0"
[[deps.Clang]] [[deps.Clang]]
deps = ["CEnum", "Clang_jll", "Downloads", "Pkg", "TOML"] deps = ["CEnum", "Clang_jll", "Downloads", "Pkg", "TOML"]
git-tree-sha1 = "b7e356adf44b1d4eb7aa2b0961ec130730fa208f" git-tree-sha1 = "2397d5da17ba4970f772a9888b208a0a1d77eb5d"
uuid = "40e3b903-d033-50b4-a0cc-940c62c95e31" uuid = "40e3b903-d033-50b4-a0cc-940c62c95e31"
version = "0.16.3" version = "0.18.3"
[[deps.Clang_jll]] [[deps.Clang_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll", "libLLVM_jll"] deps = ["Artifacts", "JLLWrappers", "Libdl", "TOML", "Zlib_jll", "libLLVM_jll"]
git-tree-sha1 = "0dfffba1b32bb3e30cb0372bfe666a5ddffe37fb" git-tree-sha1 = "0dc9bd89383fd6fffed127e03fc42ed409cc865b"
uuid = "0ee61d77-7f21-5576-8119-9fcc46b10100" uuid = "0ee61d77-7f21-5576-8119-9fcc46b10100"
version = "13.0.1+3" version = "16.0.6+4"
[[deps.CompilerSupportLibraries_jll]] [[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"] deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
version = "0.5.2+0" version = "1.1.1+0"
[[deps.Dates]] [[deps.Dates]]
deps = ["Printf"] deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
version = "1.11.0"
[[deps.Downloads]] [[deps.Downloads]]
deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"]
@ -47,6 +50,7 @@ version = "1.6.0"
[[deps.FileWatching]] [[deps.FileWatching]]
uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"
version = "1.11.0"
[[deps.HYPRE_jll]] [[deps.HYPRE_jll]]
deps = ["Artifacts", "JLLWrappers", "LAPACK_jll", "LazyArtifacts", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenBLAS_jll", "OpenMPI_jll", "Pkg", "TOML"] deps = ["Artifacts", "JLLWrappers", "LAPACK_jll", "LazyArtifacts", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenBLAS_jll", "OpenMPI_jll", "Pkg", "TOML"]
@ -54,87 +58,99 @@ git-tree-sha1 = "b77d3eca75f8442e034ccf415c87405a49e77985"
uuid = "0a602bbd-b08b-5d75-8d32-0de6eef44785" uuid = "0a602bbd-b08b-5d75-8d32-0de6eef44785"
version = "2.23.1+1" version = "2.23.1+1"
[[deps.InteractiveUtils]] [[deps.Hwloc_jll]]
deps = ["Markdown"] deps = ["Artifacts", "JLLWrappers", "Libdl"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" git-tree-sha1 = "50aedf345a709ab75872f80a2779568dc0bb461b"
uuid = "e33a78d0-f292-5ffc-b300-72abe9b543c8"
version = "2.11.2+3"
[[deps.JLLWrappers]] [[deps.JLLWrappers]]
deps = ["Preferences"] deps = ["Artifacts", "Preferences"]
git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1" git-tree-sha1 = "a007feb38b422fbdab534406aeca1b86823cb4d6"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.4.1" version = "1.7.0"
[[deps.LAPACK_jll]] [[deps.LAPACK_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg", "libblastrampoline_jll"] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "libblastrampoline_jll"]
git-tree-sha1 = "a539affa8228208f5a3396037165b04bff9a2ba6" git-tree-sha1 = "47a6ccfc4b78494669cd7c502ba112ee2b24eb45"
uuid = "51474c39-65e3-53ba-86ba-03b1b862ec14" uuid = "51474c39-65e3-53ba-86ba-03b1b862ec14"
version = "3.10.0+1" version = "3.12.0+3"
[[deps.LazyArtifacts]] [[deps.LazyArtifacts]]
deps = ["Artifacts", "Pkg"] deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3" uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
version = "1.11.0"
[[deps.LibCURL]] [[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"] deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
version = "0.6.3" version = "0.6.4"
[[deps.LibCURL_jll]] [[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
version = "7.84.0+0" version = "8.6.0+0"
[[deps.LibGit2]] [[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"] deps = ["Base64", "LibGit2_jll", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
version = "1.11.0"
[[deps.LibGit2_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll"]
uuid = "e37daf67-58a4-590a-8e99-b0245dd2ffc5"
version = "1.7.2+0"
[[deps.LibSSH2_jll]] [[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"] deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
version = "1.10.2+0" version = "1.11.0+1"
[[deps.Libdl]] [[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
version = "1.11.0"
[[deps.Logging]] [[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
version = "1.11.0"
[[deps.MPICH_jll]] [[deps.MPICH_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Hwloc_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"]
git-tree-sha1 = "6d4fa43afab4611d090b11617ecea1a144b21d35" git-tree-sha1 = "7715e65c47ba3941c502bffb7f266a41a7f54423"
uuid = "7cb0a576-ebde-5e09-9194-50597f1243b4" uuid = "7cb0a576-ebde-5e09-9194-50597f1243b4"
version = "4.0.2+5" version = "4.2.3+0"
[[deps.MPIPreferences]] [[deps.MPIPreferences]]
deps = ["Libdl", "Preferences"] deps = ["Libdl", "Preferences"]
git-tree-sha1 = "9959c42b41220206eeda9004f695d913e2245658" git-tree-sha1 = "c105fe467859e7f6e9a852cb15cb4301126fac07"
uuid = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267" uuid = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267"
version = "0.1.5" version = "0.1.11"
[[deps.MPItrampoline_jll]] [[deps.MPItrampoline_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML"]
git-tree-sha1 = "b3f9e42685b4ad614eca0b44bd863cd41b1c86ea" git-tree-sha1 = "70e830dab5d0775183c99fc75e4c24c614ed7142"
uuid = "f1f71cc9-e9ae-5b93-9b94-4fe0e1ad3748" uuid = "f1f71cc9-e9ae-5b93-9b94-4fe0e1ad3748"
version = "5.0.2+1" version = "5.5.1+2"
[[deps.Markdown]] [[deps.Markdown]]
deps = ["Base64"] deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
version = "1.11.0"
[[deps.MbedTLS_jll]] [[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"] deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
version = "2.28.0+0" version = "2.28.6+0"
[[deps.MicrosoftMPI_jll]] [[deps.MicrosoftMPI_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "a16aa086d335ed7e0170c5265247db29172af2f9" git-tree-sha1 = "bc95bf4149bf535c09602e3acdf950d9b4376227"
uuid = "9237b28f-5490-5468-be7b-bb81f5f5e6cf" uuid = "9237b28f-5490-5468-be7b-bb81f5f5e6cf"
version = "10.1.3+2" version = "10.1.4+3"
[[deps.MozillaCACerts_jll]] [[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159" uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
version = "2022.2.1" version = "2023.12.12"
[[deps.NetworkOptions]] [[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
@ -143,85 +159,85 @@ version = "1.2.0"
[[deps.OpenBLAS_jll]] [[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
version = "0.3.20+0" version = "0.3.27+1"
[[deps.OpenMPI_jll]] [[deps.OpenMPI_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Hwloc_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "TOML", "Zlib_jll"]
git-tree-sha1 = "346d6b357a480300ed7854dbc70e746ac52e10fd" git-tree-sha1 = "2dace87e14256edb1dd0724ab7ba831c779b96bd"
uuid = "fe0851c0-eecd-5654-98d4-656369965a5c" uuid = "fe0851c0-eecd-5654-98d4-656369965a5c"
version = "4.1.3+3" version = "5.0.6+0"
[[deps.Pkg]] [[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "Random", "SHA", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
version = "1.8.0" version = "1.11.0"
[deps.Pkg.extensions]
REPLExt = "REPL"
[deps.Pkg.weakdeps]
REPL = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Preferences]] [[deps.Preferences]]
deps = ["TOML"] deps = ["TOML"]
git-tree-sha1 = "47e5f437cc0e7ef2ce8406ce1e7e24d44915f88d" git-tree-sha1 = "9306f6085165d270f7e3db02af26a400d580f5c6"
uuid = "21216c6a-2e73-6563-6e65-726566657250" uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.3.0" version = "1.4.3"
[[deps.Printf]] [[deps.Printf]]
deps = ["Unicode"] deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
version = "1.11.0"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Random]] [[deps.Random]]
deps = ["SHA", "Serialization"] deps = ["SHA"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
version = "1.11.0"
[[deps.SHA]] [[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
version = "0.7.0" version = "0.7.0"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.TOML]] [[deps.TOML]]
deps = ["Dates"] deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
version = "1.0.0" version = "1.0.3"
[[deps.Tar]] [[deps.Tar]]
deps = ["ArgTools", "SHA"] deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
version = "1.10.1" version = "1.10.0"
[[deps.UUIDs]] [[deps.UUIDs]]
deps = ["Random", "SHA"] deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
version = "1.11.0"
[[deps.Unicode]] [[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
version = "1.11.0"
[[deps.Zlib_jll]] [[deps.Zlib_jll]]
deps = ["Libdl"] deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a" uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
version = "1.2.12+3" version = "1.2.13+1"
[[deps.libLLVM_jll]] [[deps.libLLVM_jll]]
deps = ["Artifacts", "Libdl"] deps = ["Artifacts", "Libdl"]
uuid = "8f36deef-c2a5-5394-99ed-8e07531fb29a" uuid = "8f36deef-c2a5-5394-99ed-8e07531fb29a"
version = "13.0.1+3" version = "16.0.6+4"
[[deps.libblastrampoline_jll]] [[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl", "OpenBLAS_jll"] deps = ["Artifacts", "Libdl"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
version = "5.1.1+0" version = "5.11.0+0"
[[deps.nghttp2_jll]] [[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"] deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
version = "1.48.0+0" version = "1.59.0+0"
[[deps.p7zip_jll]] [[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"] deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
version = "17.4.0+0" version = "17.4.0+2"

15
gen/generator.jl

@ -21,12 +21,15 @@ push!(args, "-DHYPRE_ENABLE_CUDA_STREAMS=OFF")
push!(args, "-DHYPRE_ENABLE_CUSPARSE=OFF") push!(args, "-DHYPRE_ENABLE_CUSPARSE=OFF")
push!(args, "-DHYPRE_ENABLE_CURAND=OFF") push!(args, "-DHYPRE_ENABLE_CURAND=OFF")
headers = joinpath.(hypre_include_dir, [ headers = joinpath.(
"HYPRE.h", hypre_include_dir,
"HYPRE_IJ_mv.h", [
"HYPRE_parcsr_mv.h", "HYPRE.h",
"HYPRE_parcsr_ls.h", "HYPRE_IJ_mv.h",
]) "HYPRE_parcsr_mv.h",
"HYPRE_parcsr_ls.h",
]
)
ctx = create_context(headers, args, options) ctx = create_context(headers, args, options)

7
gen/generator.toml

@ -13,5 +13,10 @@ output_ignorelist = [
# Bogus expression: const HYPRE_VERSION = ((("HYPRE_RELEASE_NAME Date Compiled: ")(__DATE__))(" "))(__TIME__) # Bogus expression: const HYPRE_VERSION = ((("HYPRE_RELEASE_NAME Date Compiled: ")(__DATE__))(" "))(__TIME__)
"HYPRE_VERSION", "HYPRE_VERSION",
# Filter out MPI stuff # Filter out MPI stuff
"^[PQ]?MPI" "^[PQ]?MPI",
# Included in prologue.jl
"MPI_Comm",
] ]
[codegen]
use_ccall_macro = true

15
gen/prologue.jl

@ -1,6 +1,17 @@
###########################
## Start gen/prologue.jl ##
###########################
using MPI: MPI, MPI_Comm using MPI: MPI, MPI_Comm
if isdefined(MPI, :API) # MPI >= 0.20.0
if isdefined(MPI, :API)
# MPI >= 0.20.0
using MPI.API: MPI_INT, MPI_DOUBLE using MPI.API: MPI_INT, MPI_DOUBLE
else # MPI < 0.20.0 else
# MPI < 0.20.0
using MPI: MPI_INT, MPI_DOUBLE using MPI: MPI_INT, MPI_DOUBLE
end end
#########################
## End gen/prologue.jl ##
#########################

8
gen/solver_options.jl

@ -2,11 +2,10 @@ using HYPRE.LibHYPRE
function generate_options(io, structname, prefixes...) function generate_options(io, structname, prefixes...)
println(io, "") println(io, "")
println(io, "function Internals.set_options(s::$(structname), kwargs)") println(io, "function Internals.set_options(solver::$(structname), kwargs)")
println(io, " solver = s.solver")
println(io, " for (k, v) in kwargs") println(io, " for (k, v) in kwargs")
ns = Tuple{Symbol,String}[] ns = Tuple{Symbol, String}[]
for prefix in prefixes, n in names(LibHYPRE) for prefix in prefixes, n in names(LibHYPRE)
r = Regex("^" * prefix * "([A-Z].*)\$") r = Regex("^" * prefix * "([A-Z].*)\$")
if (m = match(r, string(n)); m !== nothing) if (m = match(r, string(n)); m !== nothing)
@ -29,7 +28,7 @@ function generate_options(io, structname, prefixes...)
println(io) println(io)
if k == "Precond" if k == "Precond"
println(io, " Internals.set_precond_defaults(v)") println(io, " Internals.set_precond_defaults(v)")
println(io, " Internals.set_precond(s, v)") println(io, " Internals.set_precond(solver, v)")
elseif nargs == 1 elseif nargs == 1
println(io, " @check ", n, "(solver)") println(io, " @check ", n, "(solver)")
elseif nargs == 2 elseif nargs == 2
@ -44,6 +43,7 @@ function generate_options(io, structname, prefixes...)
println(io, " end") println(io, " end")
println(io, " end") println(io, " end")
println(io, "end") println(io, "end")
return
end end
open(joinpath(@__DIR__, "..", "src", "solver_options.jl"), "w") do io open(joinpath(@__DIR__, "..", "src", "solver_options.jl"), "w") do io

1768
lib/LibHYPRE.jl

File diff suppressed because it is too large Load Diff

537
src/HYPRE.jl

@ -3,14 +3,9 @@
module HYPRE module HYPRE
using MPI: MPI using MPI: MPI
using PartitionedArrays: IndexRange, MPIData, PSparseMatrix, PVector, PartitionedArrays,
SequentialData, map_parts
using SparseArrays: SparseArrays, AbstractSparseMatrixCSC, SparseMatrixCSC, nnz, nonzeros, nzrange, rowvals
using SparseMatricesCSR: SparseMatrixCSR, colvals, getrowptr
export HYPREMatrix, HYPREVector export HYPREMatrix, HYPREVector
# Clang.jl auto-generated bindings and some manual methods # Clang.jl auto-generated bindings and some manual methods
include("LibHYPRE.jl") include("LibHYPRE.jl")
using .LibHYPRE using .LibHYPRE
@ -29,7 +24,7 @@ initialized.
**Note**: This function *must* be called before using HYPRE functions. **Note**: This function *must* be called before using HYPRE functions.
""" """
function Init(; finalize_atexit=true) function Init(; finalize_atexit = true)
if !(MPI.Initialized()) if !(MPI.Initialized())
MPI.Init() MPI.Init()
end end
@ -38,7 +33,12 @@ function Init(; finalize_atexit=true)
if finalize_atexit if finalize_atexit
# TODO: MPI only calls the finalizer if not exiting due to a Julia exeption. Does # TODO: MPI only calls the finalizer if not exiting due to a Julia exeption. Does
# the same reasoning apply here? # the same reasoning apply here?
atexit(HYPRE_Finalize) atexit() do
# Finalize any HYPRE objects that are still alive
foreach(finalize, keys(Internals.HYPRE_OBJECTS))
# Finalize the library
HYPRE_Finalize()
end
end end
return nothing return nothing
end end
@ -49,28 +49,43 @@ end
############### ###############
mutable struct HYPREMatrix # <: AbstractMatrix{HYPRE_Complex} mutable struct HYPREMatrix # <: AbstractMatrix{HYPRE_Complex}
#= const =# comm::MPI.Comm const comm::MPI.Comm
#= const =# ilower::HYPRE_BigInt const ilower::HYPRE_BigInt
#= const =# iupper::HYPRE_BigInt const iupper::HYPRE_BigInt
#= const =# jlower::HYPRE_BigInt const jlower::HYPRE_BigInt
#= const =# jupper::HYPRE_BigInt const jupper::HYPRE_BigInt
ijmatrix::HYPRE_IJMatrix ijmatrix::HYPRE_IJMatrix
parmatrix::HYPRE_ParCSRMatrix parmatrix::HYPRE_ParCSRMatrix
end end
function HYPREMatrix(comm::MPI.Comm, ilower::Integer, iupper::Integer, # Defining unsafe_convert enables ccall to automatically convert A::HYPREMatrix to
jlower::Integer=ilower, jupper::Integer=iupper) # HYPRE_IJMatrix and HYPRE_ParCSRMatrix while also making sure A won't be GC'd and
# finalized.
Base.unsafe_convert(::Type{HYPRE_IJMatrix}, A::HYPREMatrix) = A.ijmatrix
Base.unsafe_convert(::Type{HYPRE_ParCSRMatrix}, A::HYPREMatrix) = A.parmatrix
function HYPREMatrix(
comm::MPI.Comm,
ilower::Integer, iupper::Integer,
jlower::Integer = ilower, jupper::Integer = iupper
)
# Create the IJ matrix # Create the IJ matrix
A = HYPREMatrix(comm, ilower, iupper, jlower, jupper, C_NULL, C_NULL) A = HYPREMatrix(comm, ilower, iupper, jlower, jupper, C_NULL, C_NULL)
ijmatrix_ref = Ref{HYPRE_IJMatrix}(C_NULL) ijmatrix_ref = Ref{HYPRE_IJMatrix}(C_NULL)
@check HYPRE_IJMatrixCreate(comm, ilower, iupper, ilower, iupper, ijmatrix_ref) @check HYPRE_IJMatrixCreate(comm, ilower, iupper, ilower, iupper, ijmatrix_ref)
A.ijmatrix = ijmatrix_ref[] A.ijmatrix = ijmatrix_ref[]
# Attach a finalizer # Attach a finalizer
finalizer(x -> HYPRE_IJMatrixDestroy(x.ijmatrix), A) finalizer(A) do x
if x.ijmatrix != C_NULL
HYPRE_IJMatrixDestroy(x)
x.ijmatrix = x.parmatrix = C_NULL
end
end
push!(Internals.HYPRE_OBJECTS, A => nothing)
# Set storage type # Set storage type
@check HYPRE_IJMatrixSetObjectType(A.ijmatrix, HYPRE_PARCSR) @check HYPRE_IJMatrixSetObjectType(A, HYPRE_PARCSR)
# Initialize to make ready for setting values # Initialize to make ready for setting values
@check HYPRE_IJMatrixInitialize(A.ijmatrix) @check HYPRE_IJMatrixInitialize(A)
return A return A
end end
@ -78,10 +93,10 @@ end
# This should be called after setting all the values # This should be called after setting all the values
function Internals.assemble_matrix(A::HYPREMatrix) function Internals.assemble_matrix(A::HYPREMatrix)
# Finalize after setting all values # Finalize after setting all values
@check HYPRE_IJMatrixAssemble(A.ijmatrix) @check HYPRE_IJMatrixAssemble(A)
# Fetch the assembled CSR matrix # Fetch the assembled CSR matrix
parmatrix_ref = Ref{Ptr{Cvoid}}(C_NULL) parmatrix_ref = Ref{Ptr{Cvoid}}(C_NULL)
@check HYPRE_IJMatrixGetObject(A.ijmatrix, parmatrix_ref) @check HYPRE_IJMatrixGetObject(A, parmatrix_ref)
A.parmatrix = convert(Ptr{HYPRE_ParCSRMatrix}, parmatrix_ref[]) A.parmatrix = convert(Ptr{HYPRE_ParCSRMatrix}, parmatrix_ref[])
return A return A
end end
@ -91,13 +106,18 @@ end
############### ###############
mutable struct HYPREVector # <: AbstractVector{HYPRE_Complex} mutable struct HYPREVector # <: AbstractVector{HYPRE_Complex}
#= const =# comm::MPI.Comm const comm::MPI.Comm
#= const =# ilower::HYPRE_BigInt const ilower::HYPRE_BigInt
#= const =# iupper::HYPRE_BigInt const iupper::HYPRE_BigInt
ijvector::HYPRE_IJVector ijvector::HYPRE_IJVector
parvector::HYPRE_ParVector parvector::HYPRE_ParVector
end end
# Defining unsafe_convert enables ccall to automatically convert b::HYPREVector to
# HYPRE_IJVector and HYPRE_ParVector while also making sure b won't be GC'd and finalized.
Base.unsafe_convert(::Type{HYPRE_IJVector}, b::HYPREVector) = b.ijvector
Base.unsafe_convert(::Type{HYPRE_ParVector}, b::HYPREVector) = b.parvector
function HYPREVector(comm::MPI.Comm, ilower::Integer, iupper::Integer) function HYPREVector(comm::MPI.Comm, ilower::Integer, iupper::Integer)
# Create the IJ vector # Create the IJ vector
b = HYPREVector(comm, ilower, iupper, C_NULL, C_NULL) b = HYPREVector(comm, ilower, iupper, C_NULL, C_NULL)
@ -105,20 +125,26 @@ function HYPREVector(comm::MPI.Comm, ilower::Integer, iupper::Integer)
@check HYPRE_IJVectorCreate(comm, ilower, iupper, ijvector_ref) @check HYPRE_IJVectorCreate(comm, ilower, iupper, ijvector_ref)
b.ijvector = ijvector_ref[] b.ijvector = ijvector_ref[]
# Attach a finalizer # Attach a finalizer
finalizer(x -> HYPRE_IJVectorDestroy(x.ijvector), b) finalizer(b) do x
if x.ijvector != C_NULL
HYPRE_IJVectorDestroy(x)
x.ijvector = x.parvector = C_NULL
end
end
push!(Internals.HYPRE_OBJECTS, b => nothing)
# Set storage type # Set storage type
@check HYPRE_IJVectorSetObjectType(b.ijvector, HYPRE_PARCSR) @check HYPRE_IJVectorSetObjectType(b, HYPRE_PARCSR)
# Initialize to make ready for setting values # Initialize to make ready for setting values
@check HYPRE_IJVectorInitialize(b.ijvector) @check HYPRE_IJVectorInitialize(b)
return b return b
end end
function Internals.assemble_vector(b::HYPREVector) function Internals.assemble_vector(b::HYPREVector)
# Finalize after setting all values # Finalize after setting all values
@check HYPRE_IJVectorAssemble(b.ijvector) @check HYPRE_IJVectorAssemble(b)
# Fetch the assembled vector # Fetch the assembled vector
parvector_ref = Ref{Ptr{Cvoid}}(C_NULL) parvector_ref = Ref{Ptr{Cvoid}}(C_NULL)
@check HYPRE_IJVectorGetObject(b.ijvector, parvector_ref) @check HYPRE_IJVectorGetObject(b, parvector_ref)
b.parvector = convert(Ptr{HYPRE_ParVector}, parvector_ref[]) b.parvector = convert(Ptr{HYPRE_ParVector}, parvector_ref[])
return b return b
end end
@ -126,7 +152,7 @@ end
function Internals.get_proc_rows(b::HYPREVector) function Internals.get_proc_rows(b::HYPREVector)
# ilower_ref = Ref{HYPRE_BigInt}() # ilower_ref = Ref{HYPRE_BigInt}()
# iupper_ref = Ref{HYPRE_BigInt}() # iupper_ref = Ref{HYPRE_BigInt}()
# @check HYPRE_IJVectorGetLocalRange(b.ijvector, ilower_ref, iupper_ref) # @check HYPRE_IJVectorGetLocalRange(b, ilower_ref, iupper_ref)
# ilower = ilower_ref[] # ilower = ilower_ref[]
# iupper = iupper_ref[] # iupper = iupper_ref[]
# return ilower, iupper # return ilower, iupper
@ -152,105 +178,24 @@ function Base.zero(b::HYPREVector)
nvalues = jupper - jlower + 1 nvalues = jupper - jlower + 1
indices = collect(HYPRE_BigInt, jlower:jupper) indices = collect(HYPRE_BigInt, jlower:jupper)
values = zeros(HYPRE_Complex, nvalues) values = zeros(HYPRE_Complex, nvalues)
@check HYPRE_IJVectorSetValues(x.ijvector, nvalues, indices, values) @check HYPRE_IJVectorSetValues(x, nvalues, indices, values)
# Finalize and return # Finalize and return
Internals.assemble_vector(x) Internals.assemble_vector(x)
return x return x
end end
######################################
# SparseMatrixCS(C|R) -> HYPREMatrix # #########################
###################################### # Vector -> HYPREVector #
#########################
function Internals.check_n_rows(A, ilower, iupper) function Internals.check_n_rows(A, ilower, iupper)
if size(A, 1) != (iupper - ilower + 1) if size(A, 1) != (iupper - ilower + 1)
throw(ArgumentError("number of rows in matrix does not match global start/end rows ilower and iupper")) throw(ArgumentError("number of rows in matrix does not match global start/end rows ilower and iupper"))
end end
return
end end
function Internals.to_hypre_data(A::SparseMatrixCSC, ilower, iupper)
Internals.check_n_rows(A, ilower, iupper)
nnz = SparseArrays.nnz(A)
A_rows = rowvals(A)
A_vals = nonzeros(A)
# Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = zeros(HYPRE_Int, nrows) # Number of colums for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
values = Vector{HYPRE_Complex}(undef, nnz) # The values
# First pass to count nnz per row
@inbounds for j in 1:size(A, 2)
for i in nzrange(A, j)
row = A_rows[i]
ncols[row] += 1
end
end
# Keep track of the last index used for every row
lastinds = zeros(Int, nrows)
cumsum!((@view lastinds[2:end]), (@view ncols[1:end-1]))
# Second pass to populate the output
@inbounds for j in 1:size(A, 2)
for i in nzrange(A, j)
row = A_rows[i]
k = lastinds[row] += 1
val = A_vals[i]
cols[k] = j
values[k] = val
end
end
return nrows, ncols, rows, cols, values
end
function Internals.to_hypre_data(A::SparseMatrixCSR, ilower, iupper)
Internals.check_n_rows(A, ilower, iupper)
nnz = SparseArrays.nnz(A)
A_cols = colvals(A)
A_vals = nonzeros(A)
# Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = Vector{HYPRE_Int}(undef, nrows) # Number of colums for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
values = Vector{HYPRE_Complex}(undef, nnz) # The values
# Loop over the rows and collect all values
k = 0
@inbounds for i in 1:size(A, 1)
nzr = nzrange(A, i)
ncols[i] = length(nzr)
for j in nzr
k += 1
col = A_cols[j]
val = A_vals[j]
cols[k] = col
values[k] = val
end
end
@assert nnz == k
return nrows, ncols, rows, cols, values
end
function HYPREMatrix(comm::MPI.Comm, B::Union{SparseMatrixCSC,SparseMatrixCSR}, ilower, iupper)
A = HYPREMatrix(comm, ilower, iupper)
nrows, ncols, rows, cols, values = Internals.to_hypre_data(B, ilower, iupper)
@check HYPRE_IJMatrixSetValues(A.ijmatrix, nrows, ncols, rows, cols, values)
Internals.assemble_matrix(A)
return A
end
HYPREMatrix(B::Union{SparseMatrixCSC,SparseMatrixCSR}, ilower=1, iupper=size(B, 1)) =
HYPREMatrix(MPI.COMM_SELF, B, ilower, iupper)
#########################
# Vector -> HYPREVector #
#########################
function Internals.to_hypre_data(x::Vector, ilower, iupper) function Internals.to_hypre_data(x::Vector, ilower, iupper)
Internals.check_n_rows(x, ilower, iupper) Internals.check_n_rows(x, ilower, iupper)
indices = collect(HYPRE_BigInt, ilower:iupper) indices = collect(HYPRE_BigInt, ilower:iupper)
@ -262,13 +207,14 @@ end
function HYPREVector(comm::MPI.Comm, x::Vector, ilower, iupper) function HYPREVector(comm::MPI.Comm, x::Vector, ilower, iupper)
b = HYPREVector(comm, ilower, iupper) b = HYPREVector(comm, ilower, iupper)
nvalues, indices, values = Internals.to_hypre_data(x, ilower, iupper) nvalues, indices, values = Internals.to_hypre_data(x, ilower, iupper)
@check HYPRE_IJVectorSetValues(b.ijvector, nvalues, indices, values) @check HYPRE_IJVectorSetValues(b, nvalues, indices, values)
Internals.assemble_vector(b) Internals.assemble_vector(b)
return b return b
end end
HYPREVector(x::Vector, ilower=1, iupper=length(x)) = function HYPREVector(x::Vector, ilower = 1, iupper = length(x))
HYPREVector(MPI.COMM_SELF, x, ilower, iupper) return HYPREVector(MPI.COMM_SELF, x, ilower, iupper)
end
# TODO: Other eltypes could be support by using a intermediate buffer # TODO: Other eltypes could be support by using a intermediate buffer
function Base.copy!(dst::Vector{HYPRE_Complex}, src::HYPREVector) function Base.copy!(dst::Vector{HYPRE_Complex}, src::HYPREVector)
@ -278,7 +224,7 @@ function Base.copy!(dst::Vector{HYPRE_Complex}, src::HYPREVector)
throw(ArgumentError("length of dst and src does not match")) throw(ArgumentError("length of dst and src does not match"))
end end
indices = collect(HYPRE_BigInt, ilower:iupper) indices = collect(HYPRE_BigInt, ilower:iupper)
@check HYPRE_IJVectorGetValues(src.ijvector, nvalues, indices, dst) @check HYPRE_IJVectorGetValues(src, nvalues, indices, dst)
return dst return dst
end end
@ -289,242 +235,187 @@ function Base.copy!(dst::HYPREVector, src::Vector{HYPRE_Complex})
throw(ArgumentError("length of dst and src does not match")) throw(ArgumentError("length of dst and src does not match"))
end end
# Re-initialize the vector # Re-initialize the vector
@check HYPRE_IJVectorInitialize(dst.ijvector) @check HYPRE_IJVectorInitialize(dst)
# Set all the values # Set all the values
indices = collect(HYPRE_BigInt, ilower:iupper) indices = collect(HYPRE_BigInt, ilower:iupper)
@check HYPRE_IJVectorSetValues(dst.ijvector, nvalues, indices, src) @check HYPRE_IJVectorSetValues(dst, nvalues, indices, src)
# TODO: It shouldn't be necessary to assemble here since we only set owned rows (?) # TODO: It shouldn't be necessary to assemble here since we only set owned rows (?)
# @check HYPRE_IJVectorAssemble(dst.ijvector) # @check HYPRE_IJVectorAssemble(dst)
# TODO: Necessary to recreate the ParVector? Running some examples it seems like it is # TODO: Necessary to recreate the ParVector? Running some examples it seems like it is
# not needed. # not needed.
return dst return dst
end end
##################################################
# PartitionedArrays.PSparseMatrix -> HYPREMatrix #
##################################################
# TODO: This has some duplicated code with to_hypre_data(::SparseMatrixCSC, ilower, iupper)
function Internals.to_hypre_data(A::SparseMatrixCSC, r::IndexRange, c::IndexRange)
@assert r.oid_to_lid isa UnitRange && r.oid_to_lid.start == 1
ilower = r.lid_to_gid[r.oid_to_lid.start]
iupper = r.lid_to_gid[r.oid_to_lid.stop]
a_rows = rowvals(A)
a_vals = nonzeros(A)
# Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = zeros(HYPRE_Int, nrows) # Number of colums for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
# cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
# values = Vector{HYPRE_Complex}(undef, nnz) # The values
# First pass to count nnz per row (note that the fact that columns are permuted
# doesn't matter for this pass)
a_rows = rowvals(A)
a_vals = nonzeros(A)
@inbounds for j in 1:size(A, 2)
for i in nzrange(A, j)
row = a_rows[i]
row > r.oid_to_lid.stop && continue # Skip ghost rows
# grow = r.lid_to_gid[lrow]
ncols[row] += 1
end
end
# Initialize remaining buffers now that nnz is known ####################
nnz = sum(ncols) ## HYPREAssembler ##
cols = Vector{HYPRE_BigInt}(undef, nnz) ####################
values = Vector{HYPRE_Complex}(undef, nnz)
# Keep track of the last index used for every row
lastinds = zeros(Int, nrows)
cumsum!((@view lastinds[2:end]), (@view ncols[1:end-1]))
# Second pass to populate the output -- here we need to take care of the permutation
# of columns. TODO: Problem that they are not sorted?
@inbounds for j in 1:size(A, 2)
for i in nzrange(A, j)
row = a_rows[i]
row > r.oid_to_lid.stop && continue # Skip ghost rows
k = lastinds[row] += 1
val = a_vals[i]
cols[k] = c.lid_to_gid[j]
values[k] = val
end
end
return nrows, ncols, rows, cols, values
end
# TODO: Possibly this can be optimized if it is possible to pass overlong vectors to HYPRE. struct HYPREMatrixAssembler
# At least values should be possible to directly share, but cols needs to translated A::HYPREMatrix
# to global ids. ncols::Vector{HYPRE_Int}
function Internals.to_hypre_data(A::SparseMatrixCSR, r::IndexRange, c::IndexRange) rows::Vector{HYPRE_BigInt}
@assert r.oid_to_lid isa UnitRange && r.oid_to_lid.start == 1 cols::Vector{HYPRE_BigInt}
values::Vector{HYPRE_Complex}
ilower = r.lid_to_gid[r.oid_to_lid.start]
iupper = r.lid_to_gid[r.oid_to_lid.stop]
a_cols = colvals(A)
a_vals = nonzeros(A)
nnz = getrowptr(A)[r.oid_to_lid.stop + 1] - 1
# Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = zeros(HYPRE_Int, nrows) # Number of colums for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
values = Vector{HYPRE_Complex}(undef, nnz) # The values
# Loop over the (owned) rows and collect all values
k = 0
@inbounds for i in r.oid_to_lid
nzr = nzrange(A, i)
ncols[i] = length(nzr)
for j in nzr
k += 1
col = a_cols[j]
val = a_vals[j]
cols[k] = c.lid_to_gid[col]
values[k] = val
end
end
@assert nnz == k
return nrows, ncols, rows, cols, values
end end
function Internals.get_comm(A::Union{PSparseMatrix{<:Any,<:M}, PVector{<:Any,<:M}}) where M <: MPIData struct HYPREVectorAssembler
return A.rows.partition.comm b::HYPREVector
indices::Vector{HYPRE_BigInt}
values::Vector{HYPRE_Complex}
end end
Internals.get_comm(_::Union{PSparseMatrix,PVector}) = MPI.COMM_SELF
function Internals.get_proc_rows(A::Union{PSparseMatrix{<:Any,<:M}, PVector{<:Any,<:M}}) where M <: MPIData struct HYPREAssembler
r = A.rows.partition.part A::HYPREMatrixAssembler
ilower::HYPRE_BigInt = r.lid_to_gid[r.oid_to_lid[1]] b::HYPREVectorAssembler
iupper::HYPRE_BigInt = r.lid_to_gid[r.oid_to_lid[end]]
return ilower, iupper
end end
function Internals.get_proc_rows(A::Union{PSparseMatrix{<:Any,<:S}, PVector{<:Any,<:S}}) where S <: SequentialData
ilower::HYPRE_BigInt = typemax(HYPRE_BigInt) """
iupper::HYPRE_BigInt = typemin(HYPRE_BigInt) HYPRE.start_assemble!(A::HYPREMatrix) -> HYPREMatrixAssembler
for r in A.rows.partition.parts HYPRE.start_assemble!(b::HYPREVector) -> HYPREVectorAssembler
ilower = min(r.lid_to_gid[r.oid_to_lid[1]], ilower) HYPRE.start_assemble!(A::HYPREMatrix, b::HYPREVector) -> HYPREAssembler
iupper = max(r.lid_to_gid[r.oid_to_lid[end]], iupper)
Initialize a new assembly for matrix `A`, vector `b`, or for both. This zeroes out any
previous data in the arrays. Return a `HYPREAssembler` with allocated data buffers needed to
perform the assembly efficiently.
See also: [`HYPRE.assemble!`](@ref), [`HYPRE.finish_assemble!`](@ref).
"""
start_assemble!
function start_assemble!(A::HYPREMatrix)
if A.parmatrix != C_NULL
# This matrix have been assembled before, reset to 0
@check HYPRE_IJMatrixSetConstantValues(A, 0)
end end
return ilower, iupper @check HYPRE_IJMatrixInitialize(A)
return HYPREMatrixAssembler(A, HYPRE_Int[], HYPRE_BigInt[], HYPRE_BigInt[], HYPRE_Complex[])
end end
function HYPREMatrix(B::PSparseMatrix) function start_assemble!(b::HYPREVector)
# Use the same communicator as the matrix if b.parvector != C_NULL
comm = Internals.get_comm(B) # This vector have been assembled before, reset to 0
# Fetch rows owned by this process # See https://github.com/hypre-space/hypre/pull/689
ilower, iupper = Internals.get_proc_rows(B) # @check HYPRE_IJVectorSetConstantValues(b, 0)
# Create the IJ matrix end
A = HYPREMatrix(comm, ilower, iupper) @check HYPRE_IJVectorInitialize(b)
# Set all the values if b.parvector != C_NULL
map_parts(B.values, B.rows.partition, B.cols.partition) do Bv, Br, Bc nvalues = HYPRE_Int(b.iupper - b.ilower + 1)
nrows, ncols, rows, cols, values = Internals.to_hypre_data(Bv, Br, Bc) indices = collect(HYPRE_BigInt, b.ilower:b.iupper)
@check HYPRE_IJMatrixSetValues(A.ijmatrix, nrows, ncols, rows, cols, values) values = zeros(HYPRE_Complex, nvalues)
return nothing @check HYPRE_IJVectorSetValues(b, nvalues, indices, values)
# TODO: Do I need to assemble here?
end end
# Finalize return HYPREVectorAssembler(b, HYPRE_BigInt[], HYPRE_Complex[])
Internals.assemble_matrix(A) end
function start_assemble!(A::HYPREMatrix, b::HYPREVector)
return HYPREAssembler(start_assemble!(A), start_assemble!(b))
end
"""
HYPRE.assemble!(A::HYPREMatrixAssembler, i, j, a::Matrix)
HYPRE.assemble!(A::HYPREVectorAssembler, i, b::Vector)
HYPRE.assemble!(A::HYPREAssembler, ij, a::Matrix, b::Vector)
Assemble (by adding) matrix contribution `a`, vector contribution `b`, into the underlying
array(s) of the assembler at global row indices `i` and column indices `j`.
This is roughly equivalent to:
```julia
# A.A::HYPREMatrix
A.A[i, j] += a
# A.b::HYPREVector
A.b[i] += b
```
See also: [`HYPRE.start_assemble!`](@ref), [`HYPRE.finish_assemble!`](@ref).
"""
assemble!
function assemble!(A::HYPREMatrixAssembler, i::Vector, j::Vector, a::Matrix)
nrows, ncols, rows, cols, values = Internals.to_hypre_data(A, a, i, j)
@check HYPRE_IJMatrixAddToValues(A.A, nrows, ncols, rows, cols, values)
return A return A
end end
@deprecate assemble!(A::HYPREMatrixAssembler, ij::Vector, a::Matrix) assemble!(A, ij, ij, a) false
############################################ function assemble!(A::HYPREVectorAssembler, ij::Vector, a::Vector)
# PartitionedArrays.PVector -> HYPREVector # nvalues, indices, values = Internals.to_hypre_data(A, a, ij)
############################################ @check HYPRE_IJVectorAddToValues(A.b, nvalues, indices, values)
return A
end
function HYPREVector(v::PVector) function assemble!(A::HYPREAssembler, ij::Vector, a::Matrix, b::Vector)
# Use the same communicator as the matrix assemble!(A.A, ij, ij, a)
comm = Internals.get_comm(v) assemble!(A.b, ij, b)
# Fetch rows owned by this process return A
ilower, iupper = Internals.get_proc_rows(v)
# Create the IJ vector
b = HYPREVector(comm, ilower, iupper)
# Set all the values
map_parts(v.values, v.owned_values, v.rows.partition) do _, vo, vr
ilower_part = vr.lid_to_gid[vr.oid_to_lid.start]
iupper_part = vr.lid_to_gid[vr.oid_to_lid.stop]
# Option 1: Set all values
nvalues = HYPRE_Int(iupper_part - ilower_part + 1)
indices = collect(HYPRE_BigInt, ilower_part:iupper_part)
# TODO: Could probably just pass the full vector even if it is too long
# values = convert(Vector{HYPRE_Complex}, vv)
values = collect(HYPRE_Complex, vo)
# # Option 2: Set only non-zeros
# indices = HYPRE_BigInt[]
# values = HYPRE_Complex[]
# for (i, vi) in zip(ilower_part:iupper_part, vo)
# if !iszero(vi)
# push!(indices, i)
# push!(values, vi)
# end
# end
# nvalues = length(indices)
@check HYPRE_IJVectorSetValues(b.ijvector, nvalues, indices, values)
return nothing
end
# Finalize
Internals.assemble_vector(b)
return b
end end
function Internals.copy_check(dst::HYPREVector, src::PVector) function Internals.to_hypre_data(A::HYPREMatrixAssembler, a::Matrix, I::Vector, J::Vector)
il_dst, iu_dst = Internals.get_proc_rows(dst) size(a, 1) == length(I) || error("mismatching number of rows")
il_src, iu_src = Internals.get_proc_rows(src) size(a, 2) == length(J) || error("mismatching number of cols")
if il_dst != il_src && iu_dst != iu_src nrows = HYPRE_Int(length(I))
# TODO: Why require this? # Resize cache vectors
throw(ArgumentError( ncols = resize!(A.ncols, nrows)
"row owner mismatch between dst ($(il_dst:iu_dst)) and src ($(il_dst:iu_dst))" rows = resize!(A.rows, nrows)
)) cols = resize!(A.cols, length(a))
values = resize!(A.values, length(a))
# Fill vectors
ncols = fill!(ncols, HYPRE_Int(length(J)))
copyto!(rows, I)
idx = 0
for i in 1:length(I), j in 1:length(J)
idx += 1
cols[idx] = J[j]
values[idx] = a[i, j]
end end
@assert idx == length(a)
@assert nrows == length(ncols) == length(rows)
return nrows, ncols, rows, cols, values
end end
# TODO: Other eltypes could be support by using a intermediate buffer function Internals.to_hypre_data(A::HYPREVectorAssembler, b::Vector, I::Vector)
function Base.copy!(dst::PVector{HYPRE_Complex}, src::HYPREVector) length(b) == length(I) || error("mismatching number of entries")
Internals.copy_check(src, dst) nvalues = HYPRE_Int(length(I))
map_parts(dst.values, dst.owned_values, dst.rows.partition) do vv, _, vr # Resize cache vectors
il_src_part = vr.lid_to_gid[vr.oid_to_lid.start] indices = resize!(A.indices, nvalues)
iu_src_part = vr.lid_to_gid[vr.oid_to_lid.stop] values = resize!(A.values, nvalues)
nvalues = HYPRE_Int(iu_src_part - il_src_part + 1) # Fill vectors
indices = collect(HYPRE_BigInt, il_src_part:iu_src_part) copyto!(indices, I)
copyto!(values, b)
# Assumption: the dst vector is assembled, and should thus have 0s on the ghost return nvalues, indices, values
# entries (??). If this is not true, we must call fill!(vv, 0) here. This should be
# fairly cheap anyway, so might as well do it...
fill!(vv, 0)
# TODO: Safe to use vv here? Owned values are always first?
@check HYPRE_IJVectorGetValues(src.ijvector, nvalues, indices, vv)
end
return dst
end end
function Base.copy!(dst::HYPREVector, src::PVector{HYPRE_Complex}) """
Internals.copy_check(dst, src) HYPRE.finish_assemble!(A::HYPREMatrixAssembler)
# Re-initialize the vector HYPRE.finish_assemble!(A::HYPREVectorAssembler)
@check HYPRE_IJVectorInitialize(dst.ijvector) HYPRE.finish_assemble!(A::HYPREAssembler)
map_parts(src.values, src.owned_values, src.rows.partition) do vv, _, vr
ilower_src_part = vr.lid_to_gid[vr.oid_to_lid.start] Finish the assembly. This synchronizes the data between processors.
iupper_src_part = vr.lid_to_gid[vr.oid_to_lid.stop] """
nvalues = HYPRE_Int(iupper_src_part - ilower_src_part + 1) finish_assemble!
indices = collect(HYPRE_BigInt, ilower_src_part:iupper_src_part)
# TODO: Safe to use vv here? Owned values are always first? function finish_assemble!(A::HYPREMatrixAssembler)
@check HYPRE_IJVectorSetValues(dst.ijvector, nvalues, indices, vv) Internals.assemble_matrix(A.A)
end return A.A
# TODO: It shouldn't be necessary to assemble here since we only set owned rows (?)
# @check HYPRE_IJVectorAssemble(dst.ijvector)
# TODO: Necessary to recreate the ParVector? Running some examples it seems like it is
# not needed.
return dst
end end
# Solver interface function finish_assemble!(A::HYPREVectorAssembler)
Internals.assemble_vector(A.b)
return A.b
end
function finish_assemble!(A::HYPREAssembler)
return finish_assemble!(A.A), finish_assemble!(A.b)
end
######################
## Solver interface ##
######################
include("solvers.jl") include("solvers.jl")
include("solver_options.jl") include("solver_options.jl")

2
src/Internals.jl

@ -16,4 +16,6 @@ function setup_func end
function solve_func end function solve_func end
function to_hypre_data end function to_hypre_data end
const HYPRE_OBJECTS = WeakKeyDict{Any, Nothing}()
end # module Internals end # module Internals

3
src/LibHYPRE.jl

@ -80,7 +80,7 @@ macro check(arg)
end end
# Export everything with HYPRE_ prefix # Export everything with HYPRE_ prefix
for name in names(@__MODULE__; all=true) for name in names(@__MODULE__; all = true)
if startswith(string(name), "HYPRE_") if startswith(string(name), "HYPRE_")
@eval export $name @eval export $name
end end
@ -92,6 +92,7 @@ function __init__()
patch_ref = Ref{HYPRE_Int}(-1) patch_ref = Ref{HYPRE_Int}(-1)
@check HYPRE_VersionNumber(major_ref, minor_ref, patch_ref, C_NULL) @check HYPRE_VersionNumber(major_ref, minor_ref, patch_ref, C_NULL)
global VERSION = VersionNumber(major_ref[], minor_ref[], patch_ref[]) global VERSION = VersionNumber(major_ref[], minor_ref[], patch_ref[])
return
end end
end end

2
src/precs.jl

@ -28,7 +28,7 @@ function construct_boomeramg_prec_builder(settings_fun!; kwargs...)
return BoomerAMGPrecBuilder(settings_fun!, kwargs) return BoomerAMGPrecBuilder(settings_fun!, kwargs)
end end
function (b::BoomerAMGPrecBuilder)(A::AbstractSparseMatrixCSC, p) function (b::BoomerAMGPrecBuilder)(A, p)
amg = HYPRE.BoomerAMG(; b.kwargs) amg = HYPRE.BoomerAMG(; b.kwargs)
settings_fun!(amg, A, p) settings_fun!(amg, A, p)
return (BoomerAMGPrecWrapper(amg, A), I) return (BoomerAMGPrecWrapper(amg, A), I)

42
src/solver_options.jl

@ -4,8 +4,7 @@
Internals.set_options(::HYPRESolver, kwargs) = nothing Internals.set_options(::HYPRESolver, kwargs) = nothing
function Internals.set_options(s::BiCGSTAB, kwargs) function Internals.set_options(solver::BiCGSTAB, kwargs)
solver = s.solver
for (k, v) in kwargs for (k, v) in kwargs
if k === :ConvergenceFactorTol if k === :ConvergenceFactorTol
@check HYPRE_BiCGSTABSetConvergenceFactorTol(solver, v) @check HYPRE_BiCGSTABSetConvergenceFactorTol(solver, v)
@ -19,7 +18,7 @@ function Internals.set_options(s::BiCGSTAB, kwargs)
@check HYPRE_ParCSRBiCGSTABSetMinIter(solver, v) @check HYPRE_ParCSRBiCGSTABSetMinIter(solver, v)
elseif k === :Precond elseif k === :Precond
Internals.set_precond_defaults(v) Internals.set_precond_defaults(v)
Internals.set_precond(s, v) Internals.set_precond(solver, v)
elseif k === :PrintLevel elseif k === :PrintLevel
@check HYPRE_ParCSRBiCGSTABSetPrintLevel(solver, v) @check HYPRE_ParCSRBiCGSTABSetPrintLevel(solver, v)
elseif k === :StopCrit elseif k === :StopCrit
@ -30,10 +29,10 @@ function Internals.set_options(s::BiCGSTAB, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.BiCGSTAB")) throw(ArgumentError("unknown option $k for HYPRE.BiCGSTAB"))
end end
end end
return
end end
function Internals.set_options(s::BoomerAMG, kwargs) function Internals.set_options(solver::BoomerAMG, kwargs)
solver = s.solver
for (k, v) in kwargs for (k, v) in kwargs
if k === :ADropTol if k === :ADropTol
@check HYPRE_BoomerAMGSetADropTol(solver, v) @check HYPRE_BoomerAMGSetADropTol(solver, v)
@ -287,10 +286,10 @@ function Internals.set_options(s::BoomerAMG, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.BoomerAMG")) throw(ArgumentError("unknown option $k for HYPRE.BoomerAMG"))
end end
end end
return
end end
function Internals.set_options(s::FlexGMRES, kwargs) function Internals.set_options(solver::FlexGMRES, kwargs)
solver = s.solver
for (k, v) in kwargs for (k, v) in kwargs
if k === :ConvergenceFactorTol if k === :ConvergenceFactorTol
@check HYPRE_FlexGMRESSetConvergenceFactorTol(solver, v) @check HYPRE_FlexGMRESSetConvergenceFactorTol(solver, v)
@ -308,7 +307,7 @@ function Internals.set_options(s::FlexGMRES, kwargs)
@check HYPRE_ParCSRFlexGMRESSetModifyPC(solver, v) @check HYPRE_ParCSRFlexGMRESSetModifyPC(solver, v)
elseif k === :Precond elseif k === :Precond
Internals.set_precond_defaults(v) Internals.set_precond_defaults(v)
Internals.set_precond(s, v) Internals.set_precond(solver, v)
elseif k === :PrintLevel elseif k === :PrintLevel
@check HYPRE_ParCSRFlexGMRESSetPrintLevel(solver, v) @check HYPRE_ParCSRFlexGMRESSetPrintLevel(solver, v)
elseif k === :Tol elseif k === :Tol
@ -317,10 +316,10 @@ function Internals.set_options(s::FlexGMRES, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.FlexGMRES")) throw(ArgumentError("unknown option $k for HYPRE.FlexGMRES"))
end end
end end
return
end end
function Internals.set_options(s::GMRES, kwargs) function Internals.set_options(solver::GMRES, kwargs)
solver = s.solver
for (k, v) in kwargs for (k, v) in kwargs
if k === :ConvergenceFactorTol if k === :ConvergenceFactorTol
@check HYPRE_GMRESSetConvergenceFactorTol(solver, v) @check HYPRE_GMRESSetConvergenceFactorTol(solver, v)
@ -340,7 +339,7 @@ function Internals.set_options(s::GMRES, kwargs)
@check HYPRE_ParCSRGMRESSetMinIter(solver, v) @check HYPRE_ParCSRGMRESSetMinIter(solver, v)
elseif k === :Precond elseif k === :Precond
Internals.set_precond_defaults(v) Internals.set_precond_defaults(v)
Internals.set_precond(s, v) Internals.set_precond(solver, v)
elseif k === :PrintLevel elseif k === :PrintLevel
@check HYPRE_ParCSRGMRESSetPrintLevel(solver, v) @check HYPRE_ParCSRGMRESSetPrintLevel(solver, v)
elseif k === :StopCrit elseif k === :StopCrit
@ -351,10 +350,10 @@ function Internals.set_options(s::GMRES, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.GMRES")) throw(ArgumentError("unknown option $k for HYPRE.GMRES"))
end end
end end
return
end end
function Internals.set_options(s::Hybrid, kwargs) function Internals.set_options(solver::Hybrid, kwargs)
solver = s.solver
for (k, v) in kwargs for (k, v) in kwargs
if k === :AbsoluteTol if k === :AbsoluteTol
@check HYPRE_ParCSRHybridSetAbsoluteTol(solver, v) @check HYPRE_ParCSRHybridSetAbsoluteTol(solver, v)
@ -424,7 +423,7 @@ function Internals.set_options(s::Hybrid, kwargs)
@check HYPRE_ParCSRHybridSetPMaxElmts(solver, v) @check HYPRE_ParCSRHybridSetPMaxElmts(solver, v)
elseif k === :Precond elseif k === :Precond
Internals.set_precond_defaults(v) Internals.set_precond_defaults(v)
Internals.set_precond(s, v) Internals.set_precond(solver, v)
elseif k === :PrintLevel elseif k === :PrintLevel
@check HYPRE_ParCSRHybridSetPrintLevel(solver, v) @check HYPRE_ParCSRHybridSetPrintLevel(solver, v)
elseif k === :RecomputeResidual elseif k === :RecomputeResidual
@ -461,10 +460,10 @@ function Internals.set_options(s::Hybrid, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.Hybrid")) throw(ArgumentError("unknown option $k for HYPRE.Hybrid"))
end end
end end
return
end end
function Internals.set_options(s::ILU, kwargs) function Internals.set_options(solver::ILU, kwargs)
solver = s.solver
for (k, v) in kwargs for (k, v) in kwargs
if k === :DropThreshold if k === :DropThreshold
@check HYPRE_ILUSetDropThreshold(solver, v) @check HYPRE_ILUSetDropThreshold(solver, v)
@ -496,10 +495,10 @@ function Internals.set_options(s::ILU, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.ILU")) throw(ArgumentError("unknown option $k for HYPRE.ILU"))
end end
end end
return
end end
function Internals.set_options(s::ParaSails, kwargs) function Internals.set_options(solver::ParaSails, kwargs)
solver = s.solver
for (k, v) in kwargs for (k, v) in kwargs
if k === :Filter if k === :Filter
@check HYPRE_ParCSRParaSailsSetFilter(solver, v) @check HYPRE_ParCSRParaSailsSetFilter(solver, v)
@ -517,10 +516,10 @@ function Internals.set_options(s::ParaSails, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.ParaSails")) throw(ArgumentError("unknown option $k for HYPRE.ParaSails"))
end end
end end
return
end end
function Internals.set_options(s::PCG, kwargs) function Internals.set_options(solver::PCG, kwargs)
solver = s.solver
for (k, v) in kwargs for (k, v) in kwargs
if k === :AbsoluteTolFactor if k === :AbsoluteTolFactor
@check HYPRE_PCGSetAbsoluteTolFactor(solver, v) @check HYPRE_PCGSetAbsoluteTolFactor(solver, v)
@ -540,7 +539,7 @@ function Internals.set_options(s::PCG, kwargs)
@check HYPRE_ParCSRPCGSetMaxIter(solver, v) @check HYPRE_ParCSRPCGSetMaxIter(solver, v)
elseif k === :Precond elseif k === :Precond
Internals.set_precond_defaults(v) Internals.set_precond_defaults(v)
Internals.set_precond(s, v) Internals.set_precond(solver, v)
elseif k === :PrintLevel elseif k === :PrintLevel
@check HYPRE_ParCSRPCGSetPrintLevel(solver, v) @check HYPRE_ParCSRPCGSetPrintLevel(solver, v)
elseif k === :RelChange elseif k === :RelChange
@ -555,4 +554,5 @@ function Internals.set_options(s::PCG, kwargs)
throw(ArgumentError("unknown option $k for HYPRE.PCG")) throw(ArgumentError("unknown option $k for HYPRE.PCG"))
end end
end end
return
end end

209
src/solvers.jl

@ -7,16 +7,23 @@ Abstract super type of all the wrapped HYPRE solvers.
""" """
abstract type HYPRESolver end abstract type HYPRESolver end
function Internals.safe_finalizer(Destroy) function Internals.safe_finalizer(Destroy, solver)
# Only calls the Destroy if pointer not C_NULL # Add the solver to object tracker for possible atexit finalizing
return function(solver) push!(Internals.HYPRE_OBJECTS, solver => nothing)
if solver.solver != C_NULL # Add a finalizer that only calls Destroy if pointer not C_NULL
Destroy(solver.solver) finalizer(solver) do s
solver.solver = C_NULL if s.solver != C_NULL
Destroy(s)
s.solver = C_NULL
end end
end end
return
end end
# Defining unsafe_convert enables ccall to automatically convert solver::HYPRESolver to
# HYPRE_Solver while also making sure solver won't be GC'd and finalized.
Base.unsafe_convert(::Type{HYPRE_Solver}, solver::HYPRESolver) = solver.solver
# Fallback for the solvers that doesn't have required defaults # Fallback for the solvers that doesn't have required defaults
Internals.set_precond_defaults(::HYPRESolver) = nothing Internals.set_precond_defaults(::HYPRESolver) = nothing
@ -45,43 +52,6 @@ See also [`solve`](@ref).
solve!(pcg::HYPRESolver, x::HYPREVector, A::HYPREMatrix, ::HYPREVector) solve!(pcg::HYPRESolver, x::HYPREVector, A::HYPREMatrix, ::HYPREVector)
######################################
# PartitionedArrays solver interface #
######################################
# TODO: Would it be useful with a method that copied the solution to b instead?
function solve(solver::HYPRESolver, A::PSparseMatrix, b::PVector)
hypre_x = solve(solver, HYPREMatrix(A), HYPREVector(b))
x = copy!(similar(b, HYPRE_Complex), hypre_x)
return x
end
function solve!(solver::HYPRESolver, x::PVector, A::PSparseMatrix, b::PVector)
hypre_x = HYPREVector(x)
solve!(solver, hypre_x, HYPREMatrix(A), HYPREVector(b))
copy!(x, hypre_x)
return x
end
########################################
# SparseMatrixCS(C|R) solver interface #
########################################
# TODO: This could use the HYPRE compile flag for sequential mode to avoid MPI overhead
function solve(solver::HYPRESolver, A::Union{SparseMatrixCSC,SparseMatrixCSR}, b::Vector)
hypre_x = solve(solver, HYPREMatrix(A), HYPREVector(b))
x = copy!(similar(b, HYPRE_Complex), hypre_x)
return x
end
function solve!(solver::HYPRESolver, x::Vector, A::Union{SparseMatrixCSC,SparseMatrixCSR}, b::Vector)
hypre_x = HYPREVector(x)
solve!(solver, hypre_x, HYPREMatrix(A), HYPREVector(b))
copy!(x, hypre_x)
return x
end
##################################### #####################################
## Concrete solver implementations ## ## Concrete solver implementations ##
##################################### #####################################
@ -102,14 +72,15 @@ Create a `BiCGSTAB` solver. See HYPRE API reference for details and supported se
mutable struct BiCGSTAB <: HYPRESolver mutable struct BiCGSTAB <: HYPRESolver
comm::MPI.Comm comm::MPI.Comm
solver::HYPRE_Solver solver::HYPRE_Solver
function BiCGSTAB(comm::MPI.Comm=MPI.COMM_NULL; kwargs...) precond::Union{HYPRESolver, Nothing}
function BiCGSTAB(comm::MPI.Comm = MPI.COMM_NULL; kwargs...)
# comm defaults to COMM_NULL since it is unused in HYPRE_ParCSRBiCGSTABCreate # comm defaults to COMM_NULL since it is unused in HYPRE_ParCSRBiCGSTABCreate
solver = new(comm, C_NULL) solver = new(comm, C_NULL, nothing)
solver_ref = Ref{HYPRE_Solver}(C_NULL) solver_ref = Ref{HYPRE_Solver}(C_NULL)
@check HYPRE_ParCSRBiCGSTABCreate(comm, solver_ref) @check HYPRE_ParCSRBiCGSTABCreate(comm, solver_ref)
solver.solver = solver_ref[] solver.solver = solver_ref[]
# Attach a finalizer # Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_ParCSRBiCGSTABDestroy), solver) Internals.safe_finalizer(HYPRE_ParCSRBiCGSTABDestroy, solver)
# Set the options # Set the options
Internals.set_options(solver, kwargs) Internals.set_options(solver, kwargs)
return solver return solver
@ -119,8 +90,8 @@ end
const ParCSRBiCGSTAB = BiCGSTAB const ParCSRBiCGSTAB = BiCGSTAB
function solve!(bicg::BiCGSTAB, x::HYPREVector, A::HYPREMatrix, b::HYPREVector) function solve!(bicg::BiCGSTAB, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
@check HYPRE_ParCSRBiCGSTABSetup(bicg.solver, A.parmatrix, b.parvector, x.parvector) @check HYPRE_ParCSRBiCGSTABSetup(bicg, A, b, x)
@check HYPRE_ParCSRBiCGSTABSolve(bicg.solver, A.parmatrix, b.parvector, x.parvector) @check HYPRE_ParCSRBiCGSTABSolve(bicg, A, b, x)
return x return x
end end
@ -128,9 +99,10 @@ Internals.setup_func(::BiCGSTAB) = HYPRE_ParCSRBiCGSTABSetup
Internals.solve_func(::BiCGSTAB) = HYPRE_ParCSRBiCGSTABSolve Internals.solve_func(::BiCGSTAB) = HYPRE_ParCSRBiCGSTABSolve
function Internals.set_precond(bicg::BiCGSTAB, p::HYPRESolver) function Internals.set_precond(bicg::BiCGSTAB, p::HYPRESolver)
bicg.precond = p
solve_f = Internals.solve_func(p) solve_f = Internals.solve_func(p)
setup_f = Internals.setup_func(p) setup_f = Internals.setup_func(p)
@check HYPRE_ParCSRBiCGSTABSetPrecond(bicg.solver, solve_f, setup_f, p.solver) @check HYPRE_ParCSRBiCGSTABSetPrecond(bicg, solve_f, setup_f, p)
return nothing return nothing
end end
@ -157,7 +129,7 @@ mutable struct BoomerAMG <: HYPRESolver
@check HYPRE_BoomerAMGCreate(solver_ref) @check HYPRE_BoomerAMGCreate(solver_ref)
solver.solver = solver_ref[] solver.solver = solver_ref[]
# Attach a finalizer # Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_BoomerAMGDestroy), solver) Internals.safe_finalizer(HYPRE_BoomerAMGDestroy, solver)
# Set the options # Set the options
Internals.set_options(solver, kwargs) Internals.set_options(solver, kwargs)
return solver return solver
@ -165,8 +137,8 @@ mutable struct BoomerAMG <: HYPRESolver
end end
function solve!(amg::BoomerAMG, x::HYPREVector, A::HYPREMatrix, b::HYPREVector) function solve!(amg::BoomerAMG, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
@check HYPRE_BoomerAMGSetup(amg.solver, A.parmatrix, b.parvector, x.parvector) @check HYPRE_BoomerAMGSetup(amg, A, b, x)
@check HYPRE_BoomerAMGSolve(amg.solver, A.parmatrix, b.parvector, x.parvector) @check HYPRE_BoomerAMGSolve(amg, A, b, x)
return x return x
end end
@ -195,14 +167,15 @@ Create a `FlexGMRES` solver. See HYPRE API reference for details and supported s
mutable struct FlexGMRES <: HYPRESolver mutable struct FlexGMRES <: HYPRESolver
comm::MPI.Comm comm::MPI.Comm
solver::HYPRE_Solver solver::HYPRE_Solver
function FlexGMRES(comm::MPI.Comm=MPI.COMM_NULL; kwargs...) precond::Union{HYPRESolver, Nothing}
function FlexGMRES(comm::MPI.Comm = MPI.COMM_NULL; kwargs...)
# comm defaults to COMM_NULL since it is unused in HYPRE_ParCSRFlexGMRESCreate # comm defaults to COMM_NULL since it is unused in HYPRE_ParCSRFlexGMRESCreate
solver = new(comm, C_NULL) solver = new(comm, C_NULL, nothing)
solver_ref = Ref{HYPRE_Solver}(C_NULL) solver_ref = Ref{HYPRE_Solver}(C_NULL)
@check HYPRE_ParCSRFlexGMRESCreate(comm, solver_ref) @check HYPRE_ParCSRFlexGMRESCreate(comm, solver_ref)
solver.solver = solver_ref[] solver.solver = solver_ref[]
# Attach a finalizer # Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_ParCSRFlexGMRESDestroy), solver) Internals.safe_finalizer(HYPRE_ParCSRFlexGMRESDestroy, solver)
# Set the options # Set the options
Internals.set_options(solver, kwargs) Internals.set_options(solver, kwargs)
return solver return solver
@ -210,8 +183,8 @@ mutable struct FlexGMRES <: HYPRESolver
end end
function solve!(flex::FlexGMRES, x::HYPREVector, A::HYPREMatrix, b::HYPREVector) function solve!(flex::FlexGMRES, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
@check HYPRE_ParCSRFlexGMRESSetup(flex.solver, A.parmatrix, b.parvector, x.parvector) @check HYPRE_ParCSRFlexGMRESSetup(flex, A, b, x)
@check HYPRE_ParCSRFlexGMRESSolve(flex.solver, A.parmatrix, b.parvector, x.parvector) @check HYPRE_ParCSRFlexGMRESSolve(flex, A, b, x)
return x return x
end end
@ -219,9 +192,10 @@ Internals.setup_func(::FlexGMRES) = HYPRE_ParCSRFlexGMRESSetup
Internals.solve_func(::FlexGMRES) = HYPRE_ParCSRFlexGMRESSolve Internals.solve_func(::FlexGMRES) = HYPRE_ParCSRFlexGMRESSolve
function Internals.set_precond(flex::FlexGMRES, p::HYPRESolver) function Internals.set_precond(flex::FlexGMRES, p::HYPRESolver)
flex.precond = p
solve_f = Internals.solve_func(p) solve_f = Internals.solve_func(p)
setup_f = Internals.setup_func(p) setup_f = Internals.setup_func(p)
@check HYPRE_ParCSRFlexGMRESSetPrecond(flex.solver, solve_f, setup_f, p.solver) @check HYPRE_ParCSRFlexGMRESSetPrecond(flex, solve_f, setup_f, p)
return nothing return nothing
end end
@ -248,8 +222,8 @@ end
#end #end
#function solve!(fsai::FSAI, x::HYPREVector, A::HYPREMatrix, b::HYPREVector) #function solve!(fsai::FSAI, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
# @check HYPRE_FSAISetup(fsai.solver, A.parmatrix, b.parvector, x.parvector) # @check HYPRE_FSAISetup(fsai, A, b, x)
# @check HYPRE_FSAISolve(fsai.solver, A.parmatrix, b.parvector, x.parvector) # @check HYPRE_FSAISolve(fsai, A, b, x)
# return x # return x
#end #end
@ -278,14 +252,15 @@ Create a `GMRES` solver. See HYPRE API reference for details and supported setti
mutable struct GMRES <: HYPRESolver mutable struct GMRES <: HYPRESolver
comm::MPI.Comm comm::MPI.Comm
solver::HYPRE_Solver solver::HYPRE_Solver
function GMRES(comm::MPI.Comm=MPI.COMM_NULL; kwargs...) precond::Union{HYPRESolver, Nothing}
function GMRES(comm::MPI.Comm = MPI.COMM_NULL; kwargs...)
# comm defaults to COMM_NULL since it is unused in HYPRE_ParCSRGMRESCreate # comm defaults to COMM_NULL since it is unused in HYPRE_ParCSRGMRESCreate
solver = new(comm, C_NULL) solver = new(comm, C_NULL, nothing)
solver_ref = Ref{HYPRE_Solver}(C_NULL) solver_ref = Ref{HYPRE_Solver}(C_NULL)
@check HYPRE_ParCSRGMRESCreate(comm, solver_ref) @check HYPRE_ParCSRGMRESCreate(comm, solver_ref)
solver.solver = solver_ref[] solver.solver = solver_ref[]
# Attach a finalizer # Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_ParCSRGMRESDestroy), solver) Internals.safe_finalizer(HYPRE_ParCSRGMRESDestroy, solver)
# Set the options # Set the options
Internals.set_options(solver, kwargs) Internals.set_options(solver, kwargs)
return solver return solver
@ -293,8 +268,8 @@ mutable struct GMRES <: HYPRESolver
end end
function solve!(gmres::GMRES, x::HYPREVector, A::HYPREMatrix, b::HYPREVector) function solve!(gmres::GMRES, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
@check HYPRE_ParCSRGMRESSetup(gmres.solver, A.parmatrix, b.parvector, x.parvector) @check HYPRE_ParCSRGMRESSetup(gmres, A, b, x)
@check HYPRE_ParCSRGMRESSolve(gmres.solver, A.parmatrix, b.parvector, x.parvector) @check HYPRE_ParCSRGMRESSolve(gmres, A, b, x)
return x return x
end end
@ -302,9 +277,10 @@ Internals.setup_func(::GMRES) = HYPRE_ParCSRGMRESSetup
Internals.solve_func(::GMRES) = HYPRE_ParCSRGMRESSolve Internals.solve_func(::GMRES) = HYPRE_ParCSRGMRESSolve
function Internals.set_precond(gmres::GMRES, p::HYPRESolver) function Internals.set_precond(gmres::GMRES, p::HYPRESolver)
gmres.precond = p
solve_f = Internals.solve_func(p) solve_f = Internals.solve_func(p)
setup_f = Internals.setup_func(p) setup_f = Internals.setup_func(p)
@check HYPRE_ParCSRGMRESSetPrecond(gmres.solver, solve_f, setup_f, p.solver) @check HYPRE_ParCSRGMRESSetPrecond(gmres, solve_f, setup_f, p)
return nothing return nothing
end end
@ -324,13 +300,14 @@ Create a `Hybrid` solver. See HYPRE API reference for details and supported sett
""" """
mutable struct Hybrid <: HYPRESolver mutable struct Hybrid <: HYPRESolver
solver::HYPRE_Solver solver::HYPRE_Solver
precond::Union{HYPRESolver, Nothing}
function Hybrid(; kwargs...) function Hybrid(; kwargs...)
solver = new(C_NULL) solver = new(C_NULL, nothing)
solver_ref = Ref{HYPRE_Solver}(C_NULL) solver_ref = Ref{HYPRE_Solver}(C_NULL)
@check HYPRE_ParCSRHybridCreate(solver_ref) @check HYPRE_ParCSRHybridCreate(solver_ref)
solver.solver = solver_ref[] solver.solver = solver_ref[]
# Attach a finalizer # Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_ParCSRHybridDestroy), solver) Internals.safe_finalizer(HYPRE_ParCSRHybridDestroy, solver)
# Set the options # Set the options
Internals.set_options(solver, kwargs) Internals.set_options(solver, kwargs)
return solver return solver
@ -338,8 +315,8 @@ mutable struct Hybrid <: HYPRESolver
end end
function solve!(hybrid::Hybrid, x::HYPREVector, A::HYPREMatrix, b::HYPREVector) function solve!(hybrid::Hybrid, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
@check HYPRE_ParCSRHybridSetup(hybrid.solver, A.parmatrix, b.parvector, x.parvector) @check HYPRE_ParCSRHybridSetup(hybrid, A, b, x)
@check HYPRE_ParCSRHybridSolve(hybrid.solver, A.parmatrix, b.parvector, x.parvector) @check HYPRE_ParCSRHybridSolve(hybrid, A, b, x)
return x return x
end end
@ -347,12 +324,13 @@ Internals.setup_func(::Hybrid) = HYPRE_ParCSRHybridSetup
Internals.solve_func(::Hybrid) = HYPRE_ParCSRHybridSolve Internals.solve_func(::Hybrid) = HYPRE_ParCSRHybridSolve
function Internals.set_precond(hybrid::Hybrid, p::HYPRESolver) function Internals.set_precond(hybrid::Hybrid, p::HYPRESolver)
hybrid.precond = p
solve_f = Internals.solve_func(p) solve_f = Internals.solve_func(p)
setup_f = Internals.setup_func(p) setup_f = Internals.setup_func(p)
# Deactivate the finalizer of p since the HYBRIDDestroy function does this, # Deactivate the finalizer of p since the HYBRIDDestroy function does this,
# see https://github.com/hypre-space/hypre/issues/699 # see https://github.com/hypre-space/hypre/issues/699
finalizer(x -> (x.solver = C_NULL), p) finalizer(x -> (x.solver = C_NULL), p)
@check HYPRE_ParCSRHybridSetPrecond(hybrid.solver, solve_f, setup_f, p.solver) @check HYPRE_ParCSRHybridSetPrecond(hybrid, solve_f, setup_f, p)
return nothing return nothing
end end
@ -379,7 +357,7 @@ mutable struct ILU <: HYPRESolver
@check HYPRE_ILUCreate(solver_ref) @check HYPRE_ILUCreate(solver_ref)
solver.solver = solver_ref[] solver.solver = solver_ref[]
# Attach a finalizer # Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_ILUDestroy), solver) Internals.safe_finalizer(HYPRE_ILUDestroy, solver)
# Set the options # Set the options
Internals.set_options(solver, kwargs) Internals.set_options(solver, kwargs)
return solver return solver
@ -387,8 +365,8 @@ mutable struct ILU <: HYPRESolver
end end
function solve!(ilu::ILU, x::HYPREVector, A::HYPREMatrix, b::HYPREVector) function solve!(ilu::ILU, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
@check HYPRE_ILUSetup(ilu.solver, A.parmatrix, b.parvector, x.parvector) @check HYPRE_ILUSetup(ilu, A, b, x)
@check HYPRE_ILUSolve(ilu.solver, A.parmatrix, b.parvector, x.parvector) @check HYPRE_ILUSolve(ilu, A, b, x)
return x return x
end end
@ -419,14 +397,14 @@ settings.
mutable struct ParaSails <: HYPRESolver mutable struct ParaSails <: HYPRESolver
comm::MPI.Comm comm::MPI.Comm
solver::HYPRE_Solver solver::HYPRE_Solver
function ParaSails(comm::MPI.Comm=MPI.COMM_WORLD; kwargs...) function ParaSails(comm::MPI.Comm = MPI.COMM_WORLD; kwargs...)
# Note: comm is used in this solver so default to COMM_WORLD # Note: comm is used in this solver so default to COMM_WORLD
solver = new(comm, C_NULL) solver = new(comm, C_NULL)
solver_ref = Ref{HYPRE_Solver}(C_NULL) solver_ref = Ref{HYPRE_Solver}(C_NULL)
@check HYPRE_ParCSRParaSailsCreate(comm, solver_ref) @check HYPRE_ParCSRParaSailsCreate(comm, solver_ref)
solver.solver = solver_ref[] solver.solver = solver_ref[]
# Attach a finalizer # Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_ParCSRParaSailsDestroy), solver) Internals.safe_finalizer(HYPRE_ParCSRParaSailsDestroy, solver)
# Set the options # Set the options
Internals.set_options(solver, kwargs) Internals.set_options(solver, kwargs)
return solver return solver
@ -454,14 +432,15 @@ Create a `PCG` solver. See HYPRE API reference for details and supported setting
mutable struct PCG <: HYPRESolver mutable struct PCG <: HYPRESolver
comm::MPI.Comm comm::MPI.Comm
solver::HYPRE_Solver solver::HYPRE_Solver
function PCG(comm::MPI.Comm=MPI.COMM_NULL; kwargs...) precond::Union{HYPRESolver, Nothing}
function PCG(comm::MPI.Comm = MPI.COMM_NULL; kwargs...)
# comm defaults to COMM_NULL since it is unused in HYPRE_ParCSRPCGCreate # comm defaults to COMM_NULL since it is unused in HYPRE_ParCSRPCGCreate
solver = new(comm, C_NULL) solver = new(comm, C_NULL, nothing)
solver_ref = Ref{HYPRE_Solver}(C_NULL) solver_ref = Ref{HYPRE_Solver}(C_NULL)
@check HYPRE_ParCSRPCGCreate(comm, solver_ref) @check HYPRE_ParCSRPCGCreate(comm, solver_ref)
solver.solver = solver_ref[] solver.solver = solver_ref[]
# Attach a finalizer # Attach a finalizer
finalizer(Internals.safe_finalizer(HYPRE_ParCSRPCGDestroy), solver) Internals.safe_finalizer(HYPRE_ParCSRPCGDestroy, solver)
# Set the options # Set the options
Internals.set_options(solver, kwargs) Internals.set_options(solver, kwargs)
return solver return solver
@ -471,8 +450,8 @@ end
const ParCSRPCG = PCG const ParCSRPCG = PCG
function solve!(pcg::PCG, x::HYPREVector, A::HYPREMatrix, b::HYPREVector) function solve!(pcg::PCG, x::HYPREVector, A::HYPREMatrix, b::HYPREVector)
@check HYPRE_ParCSRPCGSetup(pcg.solver, A.parmatrix, b.parvector, x.parvector) @check HYPRE_ParCSRPCGSetup(pcg, A, b, x)
@check HYPRE_ParCSRPCGSolve(pcg.solver, A.parmatrix, b.parvector, x.parvector) @check HYPRE_ParCSRPCGSolve(pcg, A, b, x)
return x return x
end end
@ -480,8 +459,74 @@ Internals.setup_func(::PCG) = HYPRE_ParCSRPCGSetup
Internals.solve_func(::PCG) = HYPRE_ParCSRPCGSolve Internals.solve_func(::PCG) = HYPRE_ParCSRPCGSolve
function Internals.set_precond(pcg::PCG, p::HYPRESolver) function Internals.set_precond(pcg::PCG, p::HYPRESolver)
pcg.precond = p
solve_f = Internals.solve_func(p) solve_f = Internals.solve_func(p)
setup_f = Internals.setup_func(p) setup_f = Internals.setup_func(p)
@check HYPRE_ParCSRPCGSetPrecond(pcg.solver, solve_f, setup_f, p.solver) @check HYPRE_ParCSRPCGSetPrecond(pcg, solve_f, setup_f, p)
return nothing return nothing
end end
##########################################################
# Extracting information about the solution from solvers #
##########################################################
"""
HYPRE.GetFinalRelativeResidualNorm(s::HYPRESolver)
Return the final relative residual norm from the last solve with solver `s`.
This function dispatches on the solver to the corresponding C API wrapper
`LibHYPRE.HYPRE_\$(Solver)GetFinalRelativeResidualNorm`.
"""
function GetFinalRelativeResidualNorm(s::HYPRESolver)
r = Ref{HYPRE_Real}()
if s isa BiCGSTAB
@check HYPRE_ParCSRBiCGSTABGetFinalRelativeResidualNorm(s, r)
elseif s isa BoomerAMG
@check HYPRE_BoomerAMGGetFinalRelativeResidualNorm(s, r)
elseif s isa FlexGMRES
@check HYPRE_ParCSRFlexGMRESGetFinalRelativeResidualNorm(s, r)
elseif s isa GMRES
@check HYPRE_ParCSRGMRESGetFinalRelativeResidualNorm(s, r)
elseif s isa Hybrid
@check HYPRE_ParCSRHybridGetFinalRelativeResidualNorm(s, r)
elseif s isa ILU
@check HYPRE_ILUGetFinalRelativeResidualNorm(s, r)
elseif s isa PCG
@check HYPRE_ParCSRPCGGetFinalRelativeResidualNorm(s, r)
else
throw(ArgumentError("cannot get residual norm for $(typeof(s))"))
end
return r[]
end
"""
HYPRE.GetNumIterations(s::HYPRESolver)
Return number of iterations during the last solve with solver `s`.
This function dispatches on the solver to the corresponding C API wrapper
`LibHYPRE.HYPRE_\$(Solver)GetNumIterations`.
"""
function GetNumIterations(s::HYPRESolver)
r = Ref{HYPRE_Int}()
if s isa BiCGSTAB
@check HYPRE_ParCSRBiCGSTABGetNumIterations(s, r)
elseif s isa BoomerAMG
@check HYPRE_BoomerAMGGetNumIterations(s, r)
elseif s isa FlexGMRES
@check HYPRE_ParCSRFlexGMRESGetNumIterations(s, r)
elseif s isa GMRES
@check HYPRE_ParCSRGMRESGetNumIterations(s, r)
elseif s isa Hybrid
@check HYPRE_ParCSRHybridGetNumIterations(s, r)
elseif s isa ILU
@check HYPRE_ILUGetNumIterations(s, r)
elseif s isa PCG
@check HYPRE_ParCSRPCGGetNumIterations(s, r)
else
throw(ArgumentError("cannot get number of iterations for $(typeof(s))"))
end
return r[]
end

501
test/runtests.jl

@ -11,6 +11,8 @@ using SparseMatricesCSR
using Test using Test
using LinearSolve using LinearSolve
include("test_utils.jl")
# Init HYPRE and MPI # Init HYPRE and MPI
HYPRE.Init() HYPRE.Init()
@ -52,11 +54,10 @@ end
@testset "HYPREMatrix(::SparseMatrixCS(C|R))" begin @testset "HYPREMatrix(::SparseMatrixCS(C|R))" begin
ilower, iupper = 4, 6 ilower, iupper = 4, 6
CSC = convert(SparseMatrixCSC{HYPRE_Complex, HYPRE_Int}, sparse([ CSC = convert(
1 2 0 0 3 SparseMatrixCSC{HYPRE_Complex, HYPRE_Int},
0 4 0 5 0 sparse([1 2 0 0 3; 0 4 0 5 0; 0 6 7 0 8])
0 6 7 0 8 )
]))
CSR = sparsecsr(findnz(CSC)..., size(CSC)...) CSR = sparsecsr(findnz(CSC)..., size(CSC)...)
@test CSC == CSR @test CSC == CSR
csc = Internals.to_hypre_data(CSC, ilower, iupper) csc = Internals.to_hypre_data(CSC, ilower, iupper)
@ -73,8 +74,8 @@ end
@test csr[5] == CSR.nzval @test csr[5] == CSR.nzval
@test_broken csr[5]::Vector{HYPRE_Complex} === CSR.nzval @test_broken csr[5]::Vector{HYPRE_Complex} === CSR.nzval
@test_throws ArgumentError Internals.to_hypre_data(CSC, ilower, iupper-1) @test_throws ArgumentError Internals.to_hypre_data(CSC, ilower, iupper - 1)
@test_throws ArgumentError Internals.to_hypre_data(CSR, ilower, iupper+1) @test_throws ArgumentError Internals.to_hypre_data(CSR, ilower, iupper + 1)
ilower, iupper = 6, 10 ilower, iupper = 6, 10
CSC = sprand(5, 10, 0.3) CSC = sprand(5, 10, 0.3)
@ -91,7 +92,7 @@ end
H = HYPREMatrix(CSR, ilower, iupper) H = HYPREMatrix(CSR, ilower, iupper)
@test H.ijmatrix != HYPRE_IJMatrix(C_NULL) @test H.ijmatrix != HYPRE_IJMatrix(C_NULL)
@test H.parmatrix != HYPRE_ParCSRMatrix(C_NULL) @test H.parmatrix != HYPRE_ParCSRMatrix(C_NULL)
H = HYPREMatrix(MPI.COMM_WORLD, CSR, ilower, iupper) H = HYPREMatrix(MPI.COMM_WORLD, CSR, ilower, iupper)
@test H.ijmatrix != HYPRE_IJMatrix(C_NULL) @test H.ijmatrix != HYPRE_IJMatrix(C_NULL)
@test H.parmatrix != HYPRE_ParCSRMatrix(C_NULL) @test H.parmatrix != HYPRE_ParCSRMatrix(C_NULL)
@ -112,105 +113,98 @@ end
@test H.iupper == H.jupper == 10 @test H.iupper == H.jupper == 10
end end
function tomain(x) function distribute_as_parray(parts, backend)
g = gather(copy(x)) if backend == :debug
be = get_backend(g.values) parts = DebugArray(parts)
if be isa SequentialBackend elseif backend == :mpi
return g.values.parts[1] parts = distribute_with_mpi(parts)
else # if be isa MPIBackend else
return g.values.part @assert backend == :native
parts = collect(parts)
end end
return parts
end end
@testset "HYPREMatrix(::PSparseMatrix)" begin @testset "HYPREMatrix(::PSparseMatrix)" begin
# Sequential backend function diag_data(parts)
function diag_data(backend, parts) rows = uniform_partition(parts, 10)
is_seq = backend isa SequentialBackend cols = uniform_partition(parts, 10)
rows = PRange(parts, 10) np = length(parts)
cols = PRange(parts, 10) IJV = map(parts) do p
I, J, V = map_parts(parts) do p
i = Int[] i = Int[]
j = Int[] j = Int[]
v = Float64[] v = Float64[]
if (is_seq && p == 1) || !is_seq if np == 1
# MPI case is special, we only have one MPI process.
@assert p == 1
append!(i, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
append!(j, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
append!(v, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
elseif p == 1
@assert np == 2
append!(i, [1, 2, 3, 4, 5, 6]) append!(i, [1, 2, 3, 4, 5, 6])
append!(j, [1, 2, 3, 4, 5, 6]) append!(j, [1, 2, 3, 4, 5, 6])
append!(v, [1, 2, 3, 4, 5, 6]) append!(v, [1, 2, 3, 4, 5, 6])
end else
if (is_seq && p == 2) || !is_seq @assert np == 2
@assert p == 2
append!(i, [4, 5, 6, 7, 8, 9, 10]) append!(i, [4, 5, 6, 7, 8, 9, 10])
append!(j, [4, 5, 6, 7, 8, 9, 10]) append!(j, [4, 5, 6, 7, 8, 9, 10])
append!(v, [4, 5, 6, 7, 8, 9, 10]) append!(v, [4, 5, 6, 7, 8, 9, 10])
end end
return i, j, v return i, j, v
end end
add_gids!(rows, I) I, J, V = tuple_of_arrays(IJV)
assemble!(I, J, V, rows)
add_gids!(cols, J)
return I, J, V, rows, cols return I, J, V, rows, cols
end end
backend = SequentialBackend() for backend in [:native, :debug, :mpi]
parts = get_part_ids(backend, 2) @testset "Backend=$backend" begin
CSC = PSparseMatrix(diag_data(backend, parts)...; ids=:global) if backend == :mpi
CSR = PSparseMatrix(sparsecsr, diag_data(backend, parts)...; ids=:global) parts = 1:1
else
@test tomain(CSC) == tomain(CSR) == parts = 1:2
Diagonal([1, 2, 3, 8, 10, 12, 7, 8, 9, 10]) end
parts = distribute_as_parray(parts, backend)
map_parts(CSC.values, CSC.rows.partition, CSC.cols.partition, CSC = psparse(diag_data(parts)...) |> fetch
CSR.values, CSR.rows.partition, CSR.cols.partition, parts) do args... CSR = psparse(sparsecsr, diag_data(parts)...) |> fetch
cscvalues, cscrows, csccols, csrvalues, csrrows, csrcols, p = args
csc = Internals.to_hypre_data(cscvalues, cscrows, csccols) for A in [CSC, CSR]
csr = Internals.to_hypre_data(csrvalues, csrrows, csrcols) map(local_values(A), A.row_partition, A.col_partition, parts) do values, rows, cols, p
if p == 1 hypre_data = Internals.to_hypre_data(values, rows, cols)
nrows = 5 if backend == :mpi
ncols = [1, 1, 1, 1, 1] @assert p == 1
rows = [1, 2, 3, 4, 5] nrows = 10
cols = [1, 2, 3, 4, 5] ncols = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
values = [1, 2, 3, 8, 10] rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
else # if p == 1 cols = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
nrows = 5 values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
ncols = [1, 1, 1, 1, 1] elseif p == 1
rows = [6, 7, 8, 9, 10] nrows = 5
cols = [6, 7, 8, 9, 10] ncols = [1, 1, 1, 1, 1]
values = [12, 7, 8, 9, 10] rows = [1, 2, 3, 4, 5]
cols = [1, 2, 3, 4, 5]
values = [1, 2, 3, 8, 10]
else
@assert p == 2
nrows = 5
ncols = [1, 1, 1, 1, 1]
rows = [6, 7, 8, 9, 10]
cols = [6, 7, 8, 9, 10]
values = [12, 7, 8, 9, 10]
end
@test hypre_data[1]::HYPRE_Int == nrows
@test hypre_data[2]::Vector{HYPRE_Int} == ncols
@test hypre_data[3]::Vector{HYPRE_BigInt} == rows
@test hypre_data[4]::Vector{HYPRE_BigInt} == cols
@test hypre_data[5]::Vector{HYPRE_Complex} == values
end
end
end end
@test csc[1]::HYPRE_Int == csr[1]::HYPRE_Int == nrows
@test csc[2]::Vector{HYPRE_Int} == csr[2]::Vector{HYPRE_Int} == ncols
@test csc[3]::Vector{HYPRE_BigInt} == csr[3]::Vector{HYPRE_BigInt} == rows
@test csc[4]::Vector{HYPRE_BigInt} == csr[4]::Vector{HYPRE_BigInt} == cols
@test csc[5]::Vector{HYPRE_Complex} == csr[5]::Vector{HYPRE_Complex} == values
end end
# MPI backend
backend = MPIBackend()
parts = MPIData(1, MPI.COMM_WORLD, (1,)) # get_part_ids duplicates the comm
CSC = PSparseMatrix(diag_data(backend, parts)...; ids=:global)
CSR = PSparseMatrix(sparsecsr, diag_data(backend, parts)...; ids=:global)
@test tomain(CSC) == tomain(CSR) ==
Diagonal([1, 2, 3, 8, 10, 12, 7, 8, 9, 10])
map_parts(CSC.values, CSC.rows.partition, CSC.cols.partition,
CSR.values, CSR.rows.partition, CSR.cols.partition, parts) do args...
cscvalues, cscrows, csccols, csrvalues, csrrows, csrcols, p = args
csc = Internals.to_hypre_data(cscvalues, cscrows, csccols)
csr = Internals.to_hypre_data(csrvalues, csrrows, csrcols)
nrows = 10
ncols = fill(1, 10)
rows = collect(1:10)
cols = collect(1:10)
values = [1, 2, 3, 8, 10, 12, 7, 8, 9, 10]
@test csc[1]::HYPRE_Int == csr[1]::HYPRE_Int == nrows
@test csc[2]::Vector{HYPRE_Int} == csr[2]::Vector{HYPRE_Int} == ncols
@test csc[3]::Vector{HYPRE_BigInt} == csr[3]::Vector{HYPRE_BigInt} == rows
@test csc[4]::Vector{HYPRE_BigInt} == csr[4]::Vector{HYPRE_BigInt} == cols
@test csc[5]::Vector{HYPRE_Complex} == csr[5]::Vector{HYPRE_Complex} == values
end
end end
@testset "HYPREVector" begin @testset "HYPREVector" begin
h = HYPREVector(MPI.COMM_WORLD, 1, 5) h = HYPREVector(MPI.COMM_WORLD, 1, 5)
@test h.ijvector != HYPRE_IJVector(C_NULL) @test h.ijvector != HYPRE_IJVector(C_NULL)
@ -271,52 +265,106 @@ end
end end
@testset "HYPREVector(::PVector)" begin @testset "HYPREVector(::PVector)" begin
# Sequential backend for backend in [:native, :debug, :mpi]
backend = SequentialBackend() if backend == :mpi
parts = get_part_ids(backend, 2) parts = distribute_as_parray(1:1, backend)
rows = PRange(parts, 10) else
b = rand(10) parts = distribute_as_parray(1:2, backend)
I, V = map_parts(parts) do p
if p == 1
return collect(1:6), b[1:6]
else # p == 2
return collect(4:10), b[4:10]
end end
rows = uniform_partition(parts, 10)
b = rand(10)
IV = map(parts, rows) do p, owned
if backend == :mpi
row_indices = 1:10
elseif p == 1
row_indices = 1:6
else # p == 2
row_indices = 4:10
end
values = zeros(length(row_indices))
for (i, row) in enumerate(row_indices)
if row in owned
values[i] = b[row]
end
end
return collect(row_indices), values
end
I, V = tuple_of_arrays(IV)
pb = pvector(I, V, rows) |> fetch
H = HYPREVector(pb)
# Check for valid vector
@test H.ijvector != HYPRE_IJVector(C_NULL)
@test H.parvector != HYPRE_ParVector(C_NULL)
# Copy back, check if identical
b_copy = copy!(similar(b), H)
@test b_copy == b
# Test copy to and from HYPREVector
pb2 = 2 * pb
H′ = copy!(H, pb2)
@test H === H′
pbc = similar(pb)
copy!(pbc, H)
@test pbc == 2 * pb
end
end
@testset "HYPRE(Matrix|Vector)?Assembler" begin
comm = MPI.COMM_WORLD
# Assembly HYPREMatrix from ::Matrix
A = HYPREMatrix(comm, 1, 3)
AM = zeros(3, 3)
for i in 1:2
assembler = HYPRE.start_assemble!(A)
fill!(AM, 0)
for idx in ([1, 2], [3, 1])
a = rand(2, 2)
HYPRE.assemble!(assembler, idx, idx, a)
AM[idx, idx] += a
ar = rand(1, 2)
HYPRE.assemble!(assembler, [2], idx, ar)
AM[[2], idx] += ar
end
f = HYPRE.finish_assemble!(assembler)
@test f === A
@test getindex_debug(A, 1:3, 1:3) == AM
end end
add_gids!(rows, I) # Assembly HYPREVector from ::Vector
pb = PVector(I, V, rows; ids=:global) b = HYPREVector(comm, 1, 3)
assemble!(pb) bv = zeros(3)
@test tomain(pb) == [i in 4:6 ? 2x : x for (i, x) in zip(eachindex(b), b)] for i in 1:2
H = HYPREVector(pb) assembler = HYPRE.start_assemble!(b)
@test H.ijvector != HYPRE_IJVector(C_NULL) fill!(bv, 0)
@test H.parvector != HYPRE_ParVector(C_NULL) for idx in ([1, 2], [3, 1])
pbc = fill!(copy(pb), 0) c = rand(2)
copy!(pbc, H) HYPRE.assemble!(assembler, idx, c)
@test tomain(pbc) == tomain(pb) bv[idx] += c
end
pb2 = 2 * pb f = HYPRE.finish_assemble!(assembler)
H′ = copy!(H, pb2) @test f === b
@test H === H′ @test getindex_debug(b, 1:3) == bv
copy!(pbc, H) end
@test tomain(pbc) == 2 * tomain(pb) # Assembly HYPREMatrix/HYPREVector from ::Array
A = HYPREMatrix(comm, 1, 3)
# MPI backend AM = zeros(3, 3)
backend = MPIBackend() b = HYPREVector(comm, 1, 3)
parts = get_part_ids(backend, 1) bv = zeros(3)
rows = PRange(parts, 10) for i in 1:2
I, V = map_parts(parts) do p assembler = HYPRE.start_assemble!(A, b)
return collect(1:10), b fill!(AM, 0)
fill!(bv, 0)
for idx in ([1, 2], [3, 1])
a = rand(2, 2)
c = rand(2)
HYPRE.assemble!(assembler, idx, a, c)
AM[idx, idx] += a
bv[idx] += c
end
F, f = HYPRE.finish_assemble!(assembler)
@test F === A
@test f === b
@test getindex_debug(A, 1:3, 1:3) == AM
@test getindex_debug(b, 1:3) == bv
end end
add_gids!(rows, I)
pb = PVector(I, V, rows; ids=:global)
assemble!(pb)
@test tomain(pb) == b
H = HYPREVector(pb)
@test H.ijvector != HYPRE_IJVector(C_NULL)
@test H.parvector != HYPRE_ParVector(C_NULL)
pbc = fill!(copy(pb), 0)
copy!(pbc, H)
@test tomain(pbc) == tomain(pb)
end end
@testset "BiCGSTAB" begin @testset "BiCGSTAB" begin
@ -333,16 +381,19 @@ end
b_h = HYPREVector(b) b_h = HYPREVector(b)
x_h = HYPREVector(x) x_h = HYPREVector(x)
# Solve # Solve
tol = 1e-9 tol = 1.0e-9
bicg = HYPRE.BiCGSTAB(; Tol = tol) bicg = HYPRE.BiCGSTAB(; Tol = tol)
HYPRE.solve!(bicg, x_h, A_h, b_h) HYPRE.solve!(bicg, x_h, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
# Test result with direct solver # Test result with direct solver
@test x A \ b atol=tol @test x A \ b atol = tol
# Test without passing initial guess # Test without passing initial guess
x_h = HYPRE.solve(bicg, A_h, b_h) x_h = HYPRE.solve(bicg, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
@test x A \ b atol=tol @test x A \ b atol = tol
# Test solver queries
@test HYPRE.GetFinalRelativeResidualNorm(bicg) < tol
@test HYPRE.GetNumIterations(bicg) > 0
# Solve with preconditioner # Solve with preconditioner
precond = HYPRE.BoomerAMG(; MaxIter = 1, Tol = 0.0) precond = HYPRE.BoomerAMG(; MaxIter = 1, Tol = 0.0)
@ -351,11 +402,11 @@ end
HYPRE.solve!(bicg, x_h, A_h, b_h) HYPRE.solve!(bicg, x_h, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
# Test result with direct solver # Test result with direct solver
@test x A \ b atol=tol @test x A \ b atol = tol
# Test without passing initial guess # Test without passing initial guess
x_h = HYPRE.solve(bicg, A_h, b_h) x_h = HYPRE.solve(bicg, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
@test x A \ b atol=tol @test x A \ b atol = tol
# Tests Internals.set_precond_defaults for BoomerAMG # Tests Internals.set_precond_defaults for BoomerAMG
precond = HYPRE.BoomerAMG() precond = HYPRE.BoomerAMG()
bicg = HYPRE.BiCGSTAB(; Tol = tol, Precond = precond) bicg = HYPRE.BiCGSTAB(; Tol = tol, Precond = precond)
@ -363,7 +414,7 @@ end
HYPRE.solve!(bicg, x_h, A_h, b_h) HYPRE.solve!(bicg, x_h, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
# Test result with direct solver # Test result with direct solver
@test x A \ b atol=tol @test x A \ b atol = tol
end end
@testset "BoomerAMG" begin @testset "BoomerAMG" begin
@ -377,11 +428,11 @@ end
for i in 1:99 for i in 1:99
k = (1 + rand()) * [1.0 -1.0; -1.0 1.0] k = (1 + rand()) * [1.0 -1.0; -1.0 1.0]
append!(V, k) append!(V, k)
append!(I, [i, i+1, i, i+1]) # rows append!(I, [i, i + 1, i, i + 1]) # rows
append!(J, [i, i, i+1, i+1]) # cols append!(J, [i, i, i + 1, i + 1]) # cols
end end
A = sparse(I, J, V) A = sparse(I, J, V)
A[:, 1] .= 0; A[1, :] .= 0; A[:, end] .= 0; A[end, :] .= 0; A[:, 1] .= 0; A[1, :] .= 0; A[:, end] .= 0; A[end, :] .= 0
A[1, 1] = 2; A[end, end] = 2 A[1, 1] = 2; A[end, end] = 2
@test isposdef(A) @test isposdef(A)
b = rand(100) b = rand(100)
@ -391,7 +442,7 @@ end
b_h = HYPREVector(b, ilower, iupper) b_h = HYPREVector(b, ilower, iupper)
x_h = HYPREVector(b, ilower, iupper) x_h = HYPREVector(b, ilower, iupper)
# Solve # Solve
tol = 1e-9 tol = 1.0e-9
amg = HYPRE.BoomerAMG(; Tol = tol) amg = HYPRE.BoomerAMG(; Tol = tol)
HYPRE.solve!(amg, x_h, A_h, b_h) HYPRE.solve!(amg, x_h, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
@ -402,6 +453,9 @@ end
x_h = HYPRE.solve(amg, A_h, b_h) x_h = HYPRE.solve(amg, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
@test x A \ b atol = tol * norm(b) @test x A \ b atol = tol * norm(b)
# Test solver queries
@test HYPRE.GetFinalRelativeResidualNorm(amg) < tol
@test HYPRE.GetNumIterations(amg) > 0
end end
@testset "FlexGMRES" begin @testset "FlexGMRES" begin
@ -418,16 +472,19 @@ end
b_h = HYPREVector(b) b_h = HYPREVector(b)
x_h = HYPREVector(x) x_h = HYPREVector(x)
# Solve # Solve
tol = 1e-9 tol = 1.0e-9
gmres = HYPRE.FlexGMRES(; Tol = tol) gmres = HYPRE.FlexGMRES(; Tol = tol)
HYPRE.solve!(gmres, x_h, A_h, b_h) HYPRE.solve!(gmres, x_h, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
# Test result with direct solver # Test result with direct solver
@test x A \ b atol=tol @test x A \ b atol = tol
# Test without passing initial guess # Test without passing initial guess
x_h = HYPRE.solve(gmres, A_h, b_h) x_h = HYPRE.solve(gmres, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
@test x A \ b atol=tol @test x A \ b atol = tol
# Test solver queries
@test HYPRE.GetFinalRelativeResidualNorm(gmres) < tol
@test HYPRE.GetNumIterations(gmres) > 0
# Solve with preconditioner # Solve with preconditioner
precond = HYPRE.BoomerAMG() precond = HYPRE.BoomerAMG()
@ -436,11 +493,11 @@ end
HYPRE.solve!(gmres, x_h, A_h, b_h) HYPRE.solve!(gmres, x_h, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
# Test result with direct solver # Test result with direct solver
@test x A \ b atol=tol @test x A \ b atol = tol
# Test without passing initial guess # Test without passing initial guess
x_h = HYPRE.solve(gmres, A_h, b_h) x_h = HYPRE.solve(gmres, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
@test x A \ b atol=tol @test x A \ b atol = tol
end end
@ -458,16 +515,19 @@ end
b_h = HYPREVector(b) b_h = HYPREVector(b)
x_h = HYPREVector(x) x_h = HYPREVector(x)
# Solve # Solve
tol = 1e-9 tol = 1.0e-9
gmres = HYPRE.GMRES(; Tol = tol) gmres = HYPRE.GMRES(; Tol = tol)
HYPRE.solve!(gmres, x_h, A_h, b_h) HYPRE.solve!(gmres, x_h, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
# Test result with direct solver # Test result with direct solver
@test x A \ b atol=tol @test x A \ b atol = tol
# Test without passing initial guess # Test without passing initial guess
x_h = HYPRE.solve(gmres, A_h, b_h) x_h = HYPRE.solve(gmres, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
@test x A \ b atol=tol @test x A \ b atol = tol
# Test solver queries
@test HYPRE.GetFinalRelativeResidualNorm(gmres) < tol
@test HYPRE.GetNumIterations(gmres) > 0
# Solve with preconditioner # Solve with preconditioner
precond = HYPRE.BoomerAMG(; MaxIter = 1, Tol = 0.0) precond = HYPRE.BoomerAMG(; MaxIter = 1, Tol = 0.0)
@ -476,11 +536,11 @@ end
HYPRE.solve!(gmres, x_h, A_h, b_h) HYPRE.solve!(gmres, x_h, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
# Test result with direct solver # Test result with direct solver
@test x A \ b atol=tol @test x A \ b atol = tol
# Test without passing initial guess # Test without passing initial guess
x_h = HYPRE.solve(gmres, A_h, b_h) x_h = HYPRE.solve(gmres, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
@test x A \ b atol=tol @test x A \ b atol = tol
end end
@testset "Hybrid" begin @testset "Hybrid" begin
@ -497,16 +557,19 @@ end
b_h = HYPREVector(b) b_h = HYPREVector(b)
x_h = HYPREVector(x) x_h = HYPREVector(x)
# Solve # Solve
tol = 1e-9 tol = 1.0e-9
hybrid = HYPRE.Hybrid(; Tol = tol) hybrid = HYPRE.Hybrid(; Tol = tol)
HYPRE.solve!(hybrid, x_h, A_h, b_h) HYPRE.solve!(hybrid, x_h, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
# Test result with direct solver # Test result with direct solver
@test x A \ b atol=tol @test x A \ b atol = tol
# Test without passing initial guess # Test without passing initial guess
x_h = HYPRE.solve(hybrid, A_h, b_h) x_h = HYPRE.solve(hybrid, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
@test x A \ b atol=tol @test x A \ b atol = tol
# Test solver queries
@test HYPRE.GetFinalRelativeResidualNorm(hybrid) < tol
@test HYPRE.GetNumIterations(hybrid) > 0
# Solve with given preconditioner # Solve with given preconditioner
precond = HYPRE.BoomerAMG() precond = HYPRE.BoomerAMG()
@ -515,11 +578,11 @@ end
HYPRE.solve!(hybrid, x_h, A_h, b_h) HYPRE.solve!(hybrid, x_h, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
# Test result with direct solver # Test result with direct solver
@test x A \ b atol=tol @test x A \ b atol = tol
# Test without passing initial guess # Test without passing initial guess
x_h = HYPRE.solve(hybrid, A_h, b_h) x_h = HYPRE.solve(hybrid, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
@test x A \ b atol=tol @test x A \ b atol = tol
end end
@ -537,16 +600,19 @@ end
b_h = HYPREVector(b) b_h = HYPREVector(b)
x_h = HYPREVector(x) x_h = HYPREVector(x)
# Solve # Solve
tol = 1e-9 tol = 1.0e-9
ilu = HYPRE.ILU(; Tol = tol) ilu = HYPRE.ILU(; Tol = tol)
HYPRE.solve!(ilu, x_h, A_h, b_h) HYPRE.solve!(ilu, x_h, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
# Test result with direct solver # Test result with direct solver
@test x A \ b atol=tol @test x A \ b atol = tol
# Test without passing initial guess # Test without passing initial guess
x_h = HYPRE.solve(ilu, A_h, b_h) x_h = HYPRE.solve(ilu, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
@test x A \ b atol=tol @test x A \ b atol = tol
# Test solver queries
@test HYPRE.GetFinalRelativeResidualNorm(ilu) < tol
@test HYPRE.GetNumIterations(ilu) > 0
# Use as preconditioner to PCG # Use as preconditioner to PCG
precond = HYPRE.ILU() precond = HYPRE.ILU()
@ -555,11 +621,11 @@ end
HYPRE.solve!(pcg, x_h, A_h, b_h) HYPRE.solve!(pcg, x_h, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
# Test result with direct solver # Test result with direct solver
@test x A \ b atol=tol @test x A \ b atol = tol
# Test without passing initial guess # Test without passing initial guess
x_h = HYPRE.solve(pcg, A_h, b_h) x_h = HYPRE.solve(pcg, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
@test x A \ b atol=tol @test x A \ b atol = tol
end end
@ -578,13 +644,16 @@ end
b_h = HYPREVector(b, ilower, iupper) b_h = HYPREVector(b, ilower, iupper)
x_h = HYPREVector(b, ilower, iupper) x_h = HYPREVector(b, ilower, iupper)
# Solve with ParaSails as preconditioner # Solve with ParaSails as preconditioner
tol = 1e-9 tol = 1.0e-9
parasails = HYPRE.ParaSails() parasails = HYPRE.ParaSails()
pcg = HYPRE.PCG(; Tol = tol, Precond = parasails) pcg = HYPRE.PCG(; Tol = tol, Precond = parasails)
HYPRE.solve!(pcg, x_h, A_h, b_h) HYPRE.solve!(pcg, x_h, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
# Test result with direct solver # Test result with direct solver
@test x A \ b atol=tol @test x A \ b atol = tol
# Test solver queries (should error)
@test_throws ArgumentError("cannot get residual norm for HYPRE.ParaSails") HYPRE.GetFinalRelativeResidualNorm(parasails)
@test_throws ArgumentError("cannot get number of iterations for HYPRE.ParaSails") HYPRE.GetNumIterations(parasails)
end end
@testset "(ParCSR)PCG" begin @testset "(ParCSR)PCG" begin
@ -602,16 +671,20 @@ end
b_h = HYPREVector(b, ilower, iupper) b_h = HYPREVector(b, ilower, iupper)
x_h = HYPREVector(b, ilower, iupper) x_h = HYPREVector(b, ilower, iupper)
# Solve # Solve
tol = 1e-9 tol = 1.0e-9
pcg = HYPRE.PCG(; Tol = tol) pcg = HYPRE.PCG(; Tol = tol)
HYPRE.solve!(pcg, x_h, A_h, b_h) HYPRE.solve!(pcg, x_h, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
# Test result with direct solver # Test result with direct solver
@test x A \ b atol=tol @test x A \ b atol = tol
# Test without passing initial guess # Test without passing initial guess
x_h = HYPRE.solve(pcg, A_h, b_h) x_h = HYPRE.solve(pcg, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
@test x A \ b atol=tol @test x A \ b atol = tol
# Test solver queries
@test HYPRE.GetFinalRelativeResidualNorm(pcg) < tol
@test HYPRE.GetNumIterations(pcg) > 0
# Solve with AMG preconditioner # Solve with AMG preconditioner
precond = HYPRE.BoomerAMG(; MaxIter = 1, Tol = 0.0) precond = HYPRE.BoomerAMG(; MaxIter = 1, Tol = 0.0)
pcg = HYPRE.PCG(; Tol = tol, Precond = precond) pcg = HYPRE.PCG(; Tol = tol, Precond = precond)
@ -619,62 +692,96 @@ end
HYPRE.solve!(pcg, x_h, A_h, b_h) HYPRE.solve!(pcg, x_h, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
# Test result with direct solver # Test result with direct solver
@test x A \ b atol=tol @test x A \ b atol = tol
# Test without passing initial guess # Test without passing initial guess
x_h = HYPRE.solve(pcg, A_h, b_h) x_h = HYPRE.solve(pcg, A_h, b_h)
copy!(x, x_h) copy!(x, x_h)
@test x A \ b atol=tol @test x A \ b atol = tol
end end
function topartitioned(x::Vector, A::SparseMatrixCSC, b::Vector) function topartitioned(x::Vector, A::SparseMatrixCSC, b::Vector, backend)
parts = get_part_ids(SequentialBackend(), 1) parts = distribute_as_parray(1:1, backend)
rows = PRange(parts, size(A, 1)) n = size(A, 1)
cols = PRange(parts, size(A, 2)) rows = uniform_partition(parts, n)
II, JJ, VV, bb, xx = map_parts(parts) do _ cols = uniform_partition(parts, n)
tmp = map(parts) do _
return findnz(A)..., b, x return findnz(A)..., b, x
end end
add_gids!(rows, II) II, JJ, VV, bb, xx = tuple_of_arrays(tmp)
assemble!(II, JJ, VV, rows) A_p = psparse(II, JJ, VV, rows, cols) |> fetch
add_gids!(cols, JJ)
A_p = PSparseMatrix(II, JJ, VV, rows, cols; ids = :global)
b_p = PVector(bb, rows) b_p = PVector(bb, rows)
x_p = PVector(xx, cols) x_p = PVector(xx, cols)
return x_p, A_p, b_p return x_p, A_p, b_p
end end
@testset "solve with PartitionedArrays" begin @testset "solve with PartitionedArrays" begin
# Setup for backend in [:native, :debug, :mpi]
A = sprand(100, 100, 0.05); A = A'A + 5I # Setup
b = rand(100) A = sprand(100, 100, 0.05); A = A'A + 5I
x = zeros(100) b = rand(100)
x_p, A_p, b_p = topartitioned(x, A, b) x = zeros(100)
@test A == tomain(A_p) x_p, A_p, b_p = topartitioned(x, A, b, :native)
@test b == tomain(b_p) # Data is distributed over a single process. We can then check the following
@test x == tomain(x_p) # as local_values is the entire matrix/vector.
# Solve map(local_values(x_p)) do x_l
tol = 1e-9 @test x_l == x
pcg = HYPRE.PCG(; Tol = tol) end
## solve! map(local_values(b_p)) do b_l
HYPRE.solve!(pcg, x_p, A_p, b_p) @test b_l == b
@test tomain(x_p) A \ b atol=tol end
## solve map(local_values(A_p)) do A_l
x_p = HYPRE.solve(pcg, A_p, b_p) @test A_l == A
@test tomain(x_p) A \ b atol=tol end
# Solve
tol = 1.0e-9
pcg = HYPRE.PCG(; Tol = tol)
## solve!
HYPRE.solve!(pcg, x_p, A_p, b_p)
ref = A \ b
map(local_values(x_p)) do x
@test x ref atol = tol
end
## solve
x_p = HYPRE.solve(pcg, A_p, b_p)
map(local_values(x_p)) do x
@test x ref atol = tol
end
end
end end
@testset "solve with SparseMatrixCS(C|R)" begin @testset "solve with SparseMatrixCS(C|R)" begin
# Setup # Setup
A = sprand(100, 100, 0.05); A = A'A + 5I CSC = sprand(100, 100, 0.05); CSC = CSC'CSC + 5I
CSR = sparsecsr(findnz(CSC)..., size(CSC)...)
b = rand(100) b = rand(100)
x = zeros(100) xcsc = zeros(100)
xcsr = zeros(100)
# Solve # Solve
tol = 1e-9 tol = 1.0e-9
pcg = HYPRE.PCG(; Tol = tol) pcg = HYPRE.PCG(; Tol = tol)
## solve! ## solve!
HYPRE.solve!(pcg, x, A, b) HYPRE.solve!(pcg, xcsc, CSC, b)
@test x A \ b atol=tol @test xcsc CSC \ b atol = tol
HYPRE.solve!(pcg, xcsr, CSR, b)
@test xcsr CSC \ b atol = tol # TODO: CSR \ b fails
## solve ## solve
x = HYPRE.solve(pcg, A, b) xcsc = HYPRE.solve(pcg, CSC, b)
@test x A \ b atol=tol @test xcsc CSC \ b atol = tol
xcsr = HYPRE.solve(pcg, CSR, b)
@test xcsr CSC \ b atol = tol # TODO: CSR \ b fails
end
@testset "MPI execution" begin
testfiles = joinpath.(
@__DIR__,
[
"test_assembler.jl",
]
)
for file in testfiles
r = run(ignorestatus(`$(mpiexec()) -n 2 $(Base.julia_cmd()) $(file)`))
@test r.exitcode == 0
end
end end

117
test/test_assembler.jl

@ -0,0 +1,117 @@
# SPDX-License-Identifier: MIT
using HYPRE
using MPI
using Test
MPI.Init()
HYPRE.Init()
include("test_utils.jl")
comm = MPI.COMM_WORLD
comm_rank = MPI.Comm_rank(comm)
comm_size = MPI.Comm_size(comm)
if comm_size != 2
error("Must run with 2 ranks.")
end
if comm_rank == 0
ilower = 1
iupper = 10
N = 2:10
else
ilower = 11
iupper = 20
N = 11:19
end
function values_and_indices(n)
idx = [n - 1, n, n + 1]
a = Float64[
# runic: off
n -2n -n
-2n n -2n
-n -2n n
# runic: on
]
b = Float64[n, n / 2, n / 3]
return idx, a, b
end
##########################
## HYPREMatrixAssembler ##
##########################
# Dense local matrix
A = HYPREMatrix(comm, ilower, iupper)
AM = zeros(20, 20)
for i in 1:2
assembler = HYPRE.start_assemble!(A)
fill!(AM, 0)
for n in N
idx, a, _ = values_and_indices(n)
HYPRE.assemble!(assembler, idx, idx, a)
AM[idx, idx] += a
end
f = HYPRE.finish_assemble!(assembler)
@test f === A
MPI.Allreduce!(AM, +, comm)
@test getindex_debug(A, ilower:iupper, 1:20) == AM[ilower:iupper, 1:20]
MPI.Barrier(comm)
end
##########################
## HYPREVectorAssembler ##
##########################
# Dense local vector
b = HYPREVector(comm, ilower, iupper)
bv = zeros(20)
for i in 1:2
assembler = HYPRE.start_assemble!(b)
fill!(bv, 0)
for n in N
idx, _, a = values_and_indices(n)
HYPRE.assemble!(assembler, idx, a)
bv[idx] += a
end
f = HYPRE.finish_assemble!(assembler)
@test f === b
MPI.Allreduce!(bv, +, comm)
@test getindex_debug(b, ilower:iupper) == bv[ilower:iupper]
MPI.Barrier(comm)
end
####################
## HYPREAssembler ##
####################
# Dense local arrays
A = HYPREMatrix(comm, ilower, iupper)
AM = zeros(20, 20)
b = HYPREVector(comm, ilower, iupper)
bv = zeros(20)
for i in 1:2
assembler = HYPRE.start_assemble!(A, b)
fill!(AM, 0)
fill!(bv, 0)
for n in N
idx, a, c = values_and_indices(n)
HYPRE.assemble!(assembler, idx, a, c)
AM[idx, idx] += a
bv[idx] += c
end
F, f = HYPRE.finish_assemble!(assembler)
@test F === A
@test f === b
MPI.Allreduce!(AM, +, comm)
MPI.Allreduce!(bv, +, comm)
@test getindex_debug(A, ilower:iupper, 1:20) == AM[ilower:iupper, 1:20]
@test getindex_debug(b, ilower:iupper) == bv[ilower:iupper]
MPI.Barrier(comm)
end

23
test/test_utils.jl

@ -0,0 +1,23 @@
# SPDX-License-Identifier: MIT
using HYPRE
using HYPRE.LibHYPRE
using HYPRE.LibHYPRE: @check
function getindex_debug(A::HYPREMatrix, i::AbstractVector, j::AbstractVector)
nrows = HYPRE_Int(length(i))
ncols = fill(HYPRE_Int(length(j)), length(i))
rows = convert(Vector{HYPRE_BigInt}, i)
cols = convert(Vector{HYPRE_BigInt}, repeat(j, length(i)))
values = Vector{HYPRE_Complex}(undef, length(i) * length(j))
@check HYPRE_IJMatrixGetValues(A.ijmatrix, nrows, ncols, rows, cols, values)
return permutedims(reshape(values, (length(j), length(i))))
end
function getindex_debug(b::HYPREVector, i::AbstractVector)
nvalues = HYPRE_Int(length(i))
indices = convert(Vector{HYPRE_BigInt}, i)
values = Vector{HYPRE_Complex}(undef, length(i))
@check HYPRE_IJVectorGetValues(b.ijvector, nvalues, indices, values)
return values
end
Loading…
Cancel
Save