Browse Source

Upgrade PartitionedArrays.jl from version 0.2.x to version 0.3.x. (#16)

pull/19/head
Olav Møyner 3 years ago committed by GitHub
parent
commit
e05bead0c7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      Project.toml
  2. 141
      src/HYPRE.jl
  3. 241
      test/runtests.jl

2
Project.toml

@ -14,7 +14,7 @@ SparseMatricesCSR = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1" @@ -14,7 +14,7 @@ SparseMatricesCSR = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1"
[compat]
CEnum = "0.4"
MPI = "0.19, 0.20"
PartitionedArrays = "0.2"
PartitionedArrays = "0.3"
SparseMatricesCSR = "0.6"
julia = "1.6"

141
src/HYPRE.jl

@ -3,8 +3,10 @@ @@ -3,8 +3,10 @@
module HYPRE
using MPI: MPI
using PartitionedArrays: IndexRange, MPIData, PSparseMatrix, PVector, PartitionedArrays,
SequentialData, map_parts
using PartitionedArrays: own_length, tuple_of_arrays, own_to_global, global_length,
own_to_local, local_to_global, global_to_own, global_to_local,
MPIArray, PSparseMatrix, PVector, PartitionedArrays, AbstractLocalIndices,
local_values, own_values, partition
using SparseArrays: SparseArrays, SparseMatrixCSC, nnz, nonzeros, nzrange, rowvals
using SparseMatricesCSR: SparseMatrixCSR, colvals, getrowptr
@ -334,12 +336,24 @@ end @@ -334,12 +336,24 @@ end
# PartitionedArrays.PSparseMatrix -> HYPREMatrix #
##################################################
# TODO: This has some duplicated code with to_hypre_data(::SparseMatrixCSC, ilower, iupper)
function Internals.to_hypre_data(A::SparseMatrixCSC, r::IndexRange, c::IndexRange)
@assert r.oid_to_lid isa UnitRange && r.oid_to_lid.start == 1
function subarray_unsafe_supported()
# Wrapping of SubArrays as raw pointers may or may not be supported
# depending on the Julia version. If this is not supported, we have to fall
# back to allocation of an intermediate buffer. This logic can be removed if
# HYPRE.jl drops support for Julia < 1.9.
return VERSION >= v"1.9.0"
end
ilower = r.lid_to_gid[r.oid_to_lid.start]
iupper = r.lid_to_gid[r.oid_to_lid.stop]
# TODO: This has some duplicated code with to_hypre_data(::SparseMatrixCSC, ilower, iupper)
function Internals.to_hypre_data(A::SparseMatrixCSC, r::AbstractLocalIndices, c::AbstractLocalIndices)
g_to_l_rows = global_to_local(r) # Not sure about this assert
l_to_g_rows = local_to_global(r)
@assert g_to_l_rows.own_to_local isa UnitRange && g_to_l_rows.own_to_local.start == 1
n_local_rows = own_length(r)
n_local_cols = own_length(c)
ilower = l_to_g_rows[1]
iupper = l_to_g_rows[own_length(r)]
a_rows = rowvals(A)
a_vals = nonzeros(A)
@ -357,7 +371,7 @@ function Internals.to_hypre_data(A::SparseMatrixCSC, r::IndexRange, c::IndexRang @@ -357,7 +371,7 @@ function Internals.to_hypre_data(A::SparseMatrixCSC, r::IndexRange, c::IndexRang
@inbounds for j in 1:size(A, 2)
for i in nzrange(A, j)
row = a_rows[i]
row > r.oid_to_lid.stop && continue # Skip ghost rows
row > n_local_rows && continue # Skip ghost rows
# grow = r.lid_to_gid[lrow]
ncols[row] += 1
end
@ -374,13 +388,14 @@ function Internals.to_hypre_data(A::SparseMatrixCSC, r::IndexRange, c::IndexRang @@ -374,13 +388,14 @@ function Internals.to_hypre_data(A::SparseMatrixCSC, r::IndexRange, c::IndexRang
# Second pass to populate the output -- here we need to take care of the permutation
# of columns. TODO: Problem that they are not sorted?
l_to_g_cols = local_to_global(c)
@inbounds for j in 1:size(A, 2)
for i in nzrange(A, j)
row = a_rows[i]
row > r.oid_to_lid.stop && continue # Skip ghost rows
row > n_local_cols && continue # Skip ghost rows
k = lastinds[row] += 1
val = a_vals[i]
cols[k] = c.lid_to_gid[j]
cols[k] = l_to_g_cols[j]
values[k] = val
end
end
@ -391,32 +406,37 @@ end @@ -391,32 +406,37 @@ end
# TODO: Possibly this can be optimized if it is possible to pass overlong vectors to HYPRE.
# At least values should be possible to directly share, but cols needs to translated
# to global ids.
function Internals.to_hypre_data(A::SparseMatrixCSR, r::IndexRange, c::IndexRange)
@assert r.oid_to_lid isa UnitRange && r.oid_to_lid.start == 1
function Internals.to_hypre_data(A::SparseMatrixCSR, r::AbstractLocalIndices, c::AbstractLocalIndices)
g_to_l_rows = global_to_local(r)
l_to_g_rows = local_to_global(r)
@assert g_to_l_rows.own_to_local isa UnitRange && g_to_l_rows.own_to_local.start == 1
n_local_rows = own_length(r)
ilower = l_to_g_rows[1]
iupper = l_to_g_rows[n_local_rows]
ilower = r.lid_to_gid[r.oid_to_lid.start]
iupper = r.lid_to_gid[r.oid_to_lid.stop]
a_cols = colvals(A)
a_vals = nonzeros(A)
nnz = getrowptr(A)[r.oid_to_lid.stop + 1] - 1
nnz = getrowptr(A)[n_local_rows + 1] - 1
# Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = zeros(HYPRE_Int, nrows) # Number of colums for each row
ncols = zeros(HYPRE_Int, nrows) # Number of columns for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
values = Vector{HYPRE_Complex}(undef, nnz) # The values
# Loop over the (owned) rows and collect all values
l_to_g_cols = local_to_global(c)
k = 0
@inbounds for i in r.oid_to_lid
@inbounds for i in own_to_local(r)
nzr = nzrange(A, i)
ncols[i] = length(nzr)
for j in nzr
k += 1
col = a_cols[j]
val = a_vals[j]
cols[k] = c.lid_to_gid[col]
cols[k] = l_to_g_cols[col]
values[k] = val
end
end
@ -425,23 +445,29 @@ function Internals.to_hypre_data(A::SparseMatrixCSR, r::IndexRange, c::IndexRang @@ -425,23 +445,29 @@ function Internals.to_hypre_data(A::SparseMatrixCSR, r::IndexRange, c::IndexRang
return nrows, ncols, rows, cols, values
end
function Internals.get_comm(A::Union{PSparseMatrix{<:Any,<:M}, PVector{<:Any,<:M}}) where M <: MPIData
return A.rows.partition.comm
function Internals.get_comm(A::Union{PSparseMatrix{<:Any,<:M}, PVector{<:Any,<:M}}) where M <: MPIArray
return partition(A).comm
end
Internals.get_comm(_::Union{PSparseMatrix,PVector}) = MPI.COMM_SELF
function Internals.get_proc_rows(A::Union{PSparseMatrix{<:Any,<:M}, PVector{<:Any,<:M}}) where M <: MPIData
r = A.rows.partition.part
ilower::HYPRE_BigInt = r.lid_to_gid[r.oid_to_lid[1]]
iupper::HYPRE_BigInt = r.lid_to_gid[r.oid_to_lid[end]]
return ilower, iupper
end
function Internals.get_proc_rows(A::Union{PSparseMatrix{<:Any,<:S}, PVector{<:Any,<:S}}) where S <: SequentialData
function Internals.get_proc_rows(A::Union{PSparseMatrix, PVector})
if A isa PVector
r = A.index_partition
else
r = A.row_partition
end
ilower::HYPRE_BigInt = typemax(HYPRE_BigInt)
iupper::HYPRE_BigInt = typemin(HYPRE_BigInt)
for r in A.rows.partition.parts
ilower = min(r.lid_to_gid[r.oid_to_lid[1]], ilower)
iupper = max(r.lid_to_gid[r.oid_to_lid[end]], iupper)
map(r) do a
# This is a map over the local process' owned indices. For MPI it will
# be a single value but for DebugArray / Array it will have multiple
# values.
o_to_g = own_to_global(a)
ilower_part = o_to_g[1]
iupper_part = o_to_g[end]
ilower = min(ilower, convert(HYPRE_BigInt, ilower_part))
iupper = max(iupper, convert(HYPRE_BigInt, iupper_part))
end
return ilower, iupper
end
@ -454,7 +480,7 @@ function HYPREMatrix(B::PSparseMatrix) @@ -454,7 +480,7 @@ function HYPREMatrix(B::PSparseMatrix)
# Create the IJ matrix
A = HYPREMatrix(comm, ilower, iupper)
# Set all the values
map_parts(B.values, B.rows.partition, B.cols.partition) do Bv, Br, Bc
map(local_values(B), B.row_partition, B.col_partition) do Bv, Br, Bc
nrows, ncols, rows, cols, values = Internals.to_hypre_data(Bv, Br, Bc)
@check HYPRE_IJMatrixSetValues(A, nrows, ncols, rows, cols, values)
return nothing
@ -476,9 +502,11 @@ function HYPREVector(v::PVector) @@ -476,9 +502,11 @@ function HYPREVector(v::PVector)
# Create the IJ vector
b = HYPREVector(comm, ilower, iupper)
# Set all the values
map_parts(v.values, v.owned_values, v.rows.partition) do _, vo, vr
ilower_part = vr.lid_to_gid[vr.oid_to_lid.start]
iupper_part = vr.lid_to_gid[vr.oid_to_lid.stop]
map(own_values(v), v.index_partition) do vo, vr
o_to_g = own_to_global(vr)
ilower_part = o_to_g[1]
iupper_part = o_to_g[end]
# Option 1: Set all values
nvalues = HYPRE_Int(iupper_part - ilower_part + 1)
@ -512,42 +540,49 @@ function Internals.copy_check(dst::HYPREVector, src::PVector) @@ -512,42 +540,49 @@ function Internals.copy_check(dst::HYPREVector, src::PVector)
if il_dst != il_src && iu_dst != iu_src
# TODO: Why require this?
throw(ArgumentError(
"row owner mismatch between dst ($(il_dst:iu_dst)) and src ($(il_dst:iu_dst))"
"row owner mismatch between dst ($(il_dst:iu_dst)) and src ($(il_src:iu_src))"
))
end
end
# TODO: Other eltypes could be support by using a intermediate buffer
function Base.copy!(dst::PVector{HYPRE_Complex}, src::HYPREVector)
function Base.copy!(dst::PVector{<:AbstractVector{HYPRE_Complex}}, src::HYPREVector)
Internals.copy_check(src, dst)
map_parts(dst.values, dst.owned_values, dst.rows.partition) do vv, _, vr
il_src_part = vr.lid_to_gid[vr.oid_to_lid.start]
iu_src_part = vr.lid_to_gid[vr.oid_to_lid.stop]
map(own_values(dst), dst.index_partition) do ov, vr
o_to_g = own_to_global(vr)
il_src_part = o_to_g[1]
iu_src_part = o_to_g[end]
nvalues = HYPRE_Int(iu_src_part - il_src_part + 1)
indices = collect(HYPRE_BigInt, il_src_part:iu_src_part)
# Assumption: the dst vector is assembled, and should thus have 0s on the ghost
# entries (??). If this is not true, we must call fill!(vv, 0) here. This should be
# fairly cheap anyway, so might as well do it...
fill!(vv, 0)
# TODO: Safe to use vv here? Owned values are always first?
@check HYPRE_IJVectorGetValues(src, nvalues, indices, vv)
if subarray_unsafe_supported()
values = ov
else
values = collect(HYPRE_Complex, ov)
end
@check HYPRE_IJVectorGetValues(src, nvalues, indices, values)
if !subarray_unsafe_supported()
@. ov = values
end
end
return dst
end
function Base.copy!(dst::HYPREVector, src::PVector{HYPRE_Complex})
function Base.copy!(dst::HYPREVector, src::PVector{<:AbstractVector{HYPRE_Complex}})
Internals.copy_check(dst, src)
# Re-initialize the vector
@check HYPRE_IJVectorInitialize(dst)
map_parts(src.values, src.owned_values, src.rows.partition) do vv, _, vr
ilower_src_part = vr.lid_to_gid[vr.oid_to_lid.start]
iupper_src_part = vr.lid_to_gid[vr.oid_to_lid.stop]
map(own_values(src), src.index_partition) do ov, vr
o_to_g = own_to_global(vr)
ilower_src_part = o_to_g[1]
iupper_src_part = o_to_g[end]
nvalues = HYPRE_Int(iupper_src_part - ilower_src_part + 1)
indices = collect(HYPRE_BigInt, ilower_src_part:iupper_src_part)
# TODO: Safe to use vv here? Owned values are always first?
@check HYPRE_IJVectorSetValues(dst, nvalues, indices, vv)
if subarray_unsafe_supported()
values = ov
else
values = collect(HYPRE_Complex, ov)
end
@check HYPRE_IJVectorSetValues(dst, nvalues, indices, values)
end
# TODO: It shouldn't be necessary to assemble here since we only set owned rows (?)
# @check HYPRE_IJVectorAssemble(dst)

241
test/runtests.jl

@ -91,105 +91,106 @@ end @@ -91,105 +91,106 @@ end
@test H.iupper == H.jupper == 10
end
function tomain(x)
g = gather(copy(x))
be = get_backend(g.values)
if be isa SequentialBackend
return g.values.parts[1]
else # if be isa MPIBackend
return g.values.part
function default_local_values_csr(I,J,V,row_indices,col_indices)
# Adapted from p_sparse_matrix.jl line 487
m = local_length(row_indices)
n = local_length(col_indices)
sparsecsr(I,J,V,m,n)
end
function distribute_as_parray(parts, backend)
if backend == :debug
parts = DebugArray(parts)
elseif backend == :mpi
parts = distribute_with_mpi(parts)
else
@assert backend == :native
parts = collect(parts)
end
return parts
end
@testset "HYPREMatrix(::PSparseMatrix)" begin
# Sequential backend
function diag_data(backend, parts)
is_seq = backend isa SequentialBackend
rows = PRange(parts, 10)
cols = PRange(parts, 10)
I, J, V = map_parts(parts) do p
function diag_data(parts)
rows = uniform_partition(parts, 10)
cols = uniform_partition(parts, 10)
np = length(parts)
IJV = map(parts) do p
i = Int[]
j = Int[]
v = Float64[]
if (is_seq && p == 1) || !is_seq
if np == 1
# MPI case is special, we only have one MPI process.
@assert p == 1
append!(i, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
append!(j, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
append!(v, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
elseif p == 1
@assert np == 2
append!(i, [1, 2, 3, 4, 5, 6])
append!(j, [1, 2, 3, 4, 5, 6])
append!(v, [1, 2, 3, 4, 5, 6])
end
if (is_seq && p == 2) || !is_seq
else
@assert np == 2
@assert p == 2
append!(i, [4, 5, 6, 7, 8, 9, 10])
append!(j, [4, 5, 6, 7, 8, 9, 10])
append!(v, [4, 5, 6, 7, 8, 9, 10])
end
return i, j, v
end
add_gids!(rows, I)
assemble!(I, J, V, rows)
add_gids!(cols, J)
I, J, V = tuple_of_arrays(IJV)
return I, J, V, rows, cols
end
backend = SequentialBackend()
parts = get_part_ids(backend, 2)
CSC = PSparseMatrix(diag_data(backend, parts)...; ids=:global)
CSR = PSparseMatrix(sparsecsr, diag_data(backend, parts)...; ids=:global)
@test tomain(CSC) == tomain(CSR) ==
Diagonal([1, 2, 3, 8, 10, 12, 7, 8, 9, 10])
map_parts(CSC.values, CSC.rows.partition, CSC.cols.partition,
CSR.values, CSR.rows.partition, CSR.cols.partition, parts) do args...
cscvalues, cscrows, csccols, csrvalues, csrrows, csrcols, p = args
csc = Internals.to_hypre_data(cscvalues, cscrows, csccols)
csr = Internals.to_hypre_data(csrvalues, csrrows, csrcols)
if p == 1
for backend in [:native, :debug, :mpi]
@testset "Backend=$backend" begin
if backend == :mpi
parts = 1:1
else
parts = 1:2
end
parts = distribute_as_parray(parts, backend)
CSC = psparse!(diag_data(parts)...) |> fetch
CSR = psparse!(default_local_values_csr, diag_data(parts)...) |> fetch
for A in [CSC, CSR]
map(local_values(A), A.row_partition, A.col_partition, parts) do values, rows, cols, p
hypre_data = Internals.to_hypre_data(values, rows, cols)
if backend == :mpi
@assert p == 1
nrows = 10
ncols = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
cols = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
elseif p == 1
nrows = 5
ncols = [1, 1, 1, 1, 1]
rows = [1, 2, 3, 4, 5]
cols = [1, 2, 3, 4, 5]
values = [1, 2, 3, 8, 10]
else # if p == 1
else
@assert p == 2
nrows = 5
ncols = [1, 1, 1, 1, 1]
rows = [6, 7, 8, 9, 10]
cols = [6, 7, 8, 9, 10]
values = [12, 7, 8, 9, 10]
end
@test csc[1]::HYPRE_Int == csr[1]::HYPRE_Int == nrows
@test csc[2]::Vector{HYPRE_Int} == csr[2]::Vector{HYPRE_Int} == ncols
@test csc[3]::Vector{HYPRE_BigInt} == csr[3]::Vector{HYPRE_BigInt} == rows
@test csc[4]::Vector{HYPRE_BigInt} == csr[4]::Vector{HYPRE_BigInt} == cols
@test csc[5]::Vector{HYPRE_Complex} == csr[5]::Vector{HYPRE_Complex} == values
@test hypre_data[1]::HYPRE_Int == nrows
@test hypre_data[2]::Vector{HYPRE_Int} == ncols
@test hypre_data[3]::Vector{HYPRE_BigInt} == rows
@test hypre_data[4]::Vector{HYPRE_BigInt} == cols
@test hypre_data[5]::Vector{HYPRE_Complex} == values
end
end
end
# MPI backend
backend = MPIBackend()
parts = MPIData(1, MPI.COMM_WORLD, (1,)) # get_part_ids duplicates the comm
CSC = PSparseMatrix(diag_data(backend, parts)...; ids=:global)
CSR = PSparseMatrix(sparsecsr, diag_data(backend, parts)...; ids=:global)
@test tomain(CSC) == tomain(CSR) ==
Diagonal([1, 2, 3, 8, 10, 12, 7, 8, 9, 10])
map_parts(CSC.values, CSC.rows.partition, CSC.cols.partition,
CSR.values, CSR.rows.partition, CSR.cols.partition, parts) do args...
cscvalues, cscrows, csccols, csrvalues, csrrows, csrcols, p = args
csc = Internals.to_hypre_data(cscvalues, cscrows, csccols)
csr = Internals.to_hypre_data(csrvalues, csrrows, csrcols)
nrows = 10
ncols = fill(1, 10)
rows = collect(1:10)
cols = collect(1:10)
values = [1, 2, 3, 8, 10, 12, 7, 8, 9, 10]
@test csc[1]::HYPRE_Int == csr[1]::HYPRE_Int == nrows
@test csc[2]::Vector{HYPRE_Int} == csr[2]::Vector{HYPRE_Int} == ncols
@test csc[3]::Vector{HYPRE_BigInt} == csr[3]::Vector{HYPRE_BigInt} == rows
@test csc[4]::Vector{HYPRE_BigInt} == csr[4]::Vector{HYPRE_BigInt} == cols
@test csc[5]::Vector{HYPRE_Complex} == csr[5]::Vector{HYPRE_Complex} == values
end
end
@testset "HYPREVector" begin
h = HYPREVector(MPI.COMM_WORLD, 1, 5)
@test h.ijvector != HYPRE_IJVector(C_NULL)
@ -250,52 +251,47 @@ end @@ -250,52 +251,47 @@ end
end
@testset "HYPREVector(::PVector)" begin
# Sequential backend
backend = SequentialBackend()
parts = get_part_ids(backend, 2)
rows = PRange(parts, 10)
for backend in [:native, :debug, :mpi]
if backend == :mpi
parts = distribute_as_parray(1:1, backend)
else
parts = distribute_as_parray(1:2, backend)
end
rows = uniform_partition(parts, 10)
b = rand(10)
I, V = map_parts(parts) do p
if p == 1
return collect(1:6), b[1:6]
IV = map(parts, rows) do p, owned
if backend == :mpi
row_indices = 1:10
elseif p == 1
row_indices = 1:6
else # p == 2
return collect(4:10), b[4:10]
row_indices = 4:10
end
values = zeros(length(row_indices))
for (i, row) in enumerate(row_indices)
if row in owned
values[i] = b[row]
end
add_gids!(rows, I)
pb = PVector(I, V, rows; ids=:global)
assemble!(pb)
@test tomain(pb) == [i in 4:6 ? 2x : x for (i, x) in zip(eachindex(b), b)]
end
return collect(row_indices), values
end
I, V = tuple_of_arrays(IV)
pb = pvector!(I, V, rows) |> fetch
H = HYPREVector(pb)
# Check for valid vector
@test H.ijvector != HYPRE_IJVector(C_NULL)
@test H.parvector != HYPRE_ParVector(C_NULL)
pbc = fill!(copy(pb), 0)
copy!(pbc, H)
@test tomain(pbc) == tomain(pb)
# Copy back, check if identical
b_copy = copy!(similar(b), H)
@test b_copy == b
# Test copy to and from HYPREVector
pb2 = 2 * pb
H′ = copy!(H, pb2)
@test H === H′
pbc = similar(pb)
copy!(pbc, H)
@test tomain(pbc) == 2 * tomain(pb)
# MPI backend
backend = MPIBackend()
parts = get_part_ids(backend, 1)
rows = PRange(parts, 10)
I, V = map_parts(parts) do p
return collect(1:10), b
end
add_gids!(rows, I)
pb = PVector(I, V, rows; ids=:global)
assemble!(pb)
@test tomain(pb) == b
H = HYPREVector(pb)
@test H.ijvector != HYPRE_IJVector(C_NULL)
@test H.parvector != HYPRE_ParVector(C_NULL)
pbc = fill!(copy(pb), 0)
copy!(pbc, H)
@test tomain(pbc) == tomain(pb)
@test pbc == 2*pb
end
end
@testset "HYPRE(Matrix|Vector)?Assembler" begin
@ -689,40 +685,55 @@ end @@ -689,40 +685,55 @@ end
@test x A \ b atol=tol
end
function topartitioned(x::Vector, A::SparseMatrixCSC, b::Vector)
parts = get_part_ids(SequentialBackend(), 1)
rows = PRange(parts, size(A, 1))
cols = PRange(parts, size(A, 2))
II, JJ, VV, bb, xx = map_parts(parts) do _
function topartitioned(x::Vector, A::SparseMatrixCSC, b::Vector, backend)
parts = distribute_as_parray(1:1, backend)
n = size(A, 1)
rows = uniform_partition(parts, n)
cols = uniform_partition(parts, n)
tmp = map(parts) do _
return findnz(A)..., b, x
end
add_gids!(rows, II)
assemble!(II, JJ, VV, rows)
add_gids!(cols, JJ)
A_p = PSparseMatrix(II, JJ, VV, rows, cols; ids = :global)
II, JJ, VV, bb, xx = tuple_of_arrays(tmp)
A_p = psparse!(II, JJ, VV, rows, cols) |> fetch
b_p = PVector(bb, rows)
x_p = PVector(xx, cols)
return x_p, A_p, b_p
end
@testset "solve with PartitionedArrays" begin
for backend in [:native, :debug, :mpi]
# Setup
A = sprand(100, 100, 0.05); A = A'A + 5I
b = rand(100)
x = zeros(100)
x_p, A_p, b_p = topartitioned(x, A, b)
@test A == tomain(A_p)
@test b == tomain(b_p)
@test x == tomain(x_p)
x_p, A_p, b_p = topartitioned(x, A, b, :native)
# Data is distributed over a single process. We can then check the following
# as local_values is the entire matrix/vector.
map(local_values(x_p)) do x_l
@test x_l == x
end
map(local_values(b_p)) do b_l
@test b_l == b
end
map(local_values(A_p)) do A_l
@test A_l == A
end
# Solve
tol = 1e-9
pcg = HYPRE.PCG(; Tol = tol)
## solve!
HYPRE.solve!(pcg, x_p, A_p, b_p)
@test tomain(x_p) A \ b atol=tol
ref = A\b
map(local_values(x_p)) do x
@test x ref atol=tol
end
## solve
x_p = HYPRE.solve(pcg, A_p, b_p)
@test tomain(x_p) A \ b atol=tol
map(local_values(x_p)) do x
@test x ref atol=tol
end
end
end
@testset "solve with SparseMatrixCS(C|R)" begin

Loading…
Cancel
Save