Browse Source

Upgrade to PartitionedArrays v0.3

- Catch up to API changes for PartitionedArrays
- Rewrite tests to use all available PArray backends (debug, mpi, native)
pull/16/head
Olav Møyner 3 years ago
parent
commit
e5251bc849
  1. 2
      Project.toml
  2. 118
      src/HYPRE.jl
  3. 239
      test/runtests.jl

2
Project.toml

@ -14,7 +14,7 @@ SparseMatricesCSR = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1"
[compat] [compat]
CEnum = "0.4" CEnum = "0.4"
MPI = "0.19, 0.20" MPI = "0.19, 0.20"
PartitionedArrays = "0.2" PartitionedArrays = "0.3"
SparseMatricesCSR = "0.6" SparseMatricesCSR = "0.6"
julia = "1.6" julia = "1.6"

118
src/HYPRE.jl

@ -3,8 +3,10 @@
module HYPRE module HYPRE
using MPI: MPI using MPI: MPI
using PartitionedArrays: IndexRange, MPIData, PSparseMatrix, PVector, PartitionedArrays, using PartitionedArrays: own_length,tuple_of_arrays, own_to_global, global_length,
SequentialData, map_parts own_to_local, local_to_global, global_to_own, global_to_local,
MPIArray, PSparseMatrix, PVector, PartitionedArrays, AbstractLocalIndices,
local_values, own_values, partition
using SparseArrays: SparseArrays, SparseMatrixCSC, nnz, nonzeros, nzrange, rowvals using SparseArrays: SparseArrays, SparseMatrixCSC, nnz, nonzeros, nzrange, rowvals
using SparseMatricesCSR: SparseMatrixCSR, colvals, getrowptr using SparseMatricesCSR: SparseMatrixCSR, colvals, getrowptr
@ -335,11 +337,15 @@ end
################################################## ##################################################
# TODO: This has some duplicated code with to_hypre_data(::SparseMatrixCSC, ilower, iupper) # TODO: This has some duplicated code with to_hypre_data(::SparseMatrixCSC, ilower, iupper)
function Internals.to_hypre_data(A::SparseMatrixCSC, r::IndexRange, c::IndexRange) function Internals.to_hypre_data(A::SparseMatrixCSC, r::AbstractLocalIndices, c::AbstractLocalIndices)
@assert r.oid_to_lid isa UnitRange && r.oid_to_lid.start == 1 g_to_l_rows = global_to_local(r) # Not sure about this assert
l_to_g_rows = local_to_global(r)
ilower = r.lid_to_gid[r.oid_to_lid.start] @assert g_to_l_rows.own_to_local isa UnitRange && g_to_l_rows.own_to_local.start == 1
iupper = r.lid_to_gid[r.oid_to_lid.stop]
n_local_rows = own_length(r)
n_local_cols = own_length(c)
ilower = l_to_g_rows[1]
iupper = l_to_g_rows[own_length(r)]
a_rows = rowvals(A) a_rows = rowvals(A)
a_vals = nonzeros(A) a_vals = nonzeros(A)
@ -357,7 +363,7 @@ function Internals.to_hypre_data(A::SparseMatrixCSC, r::IndexRange, c::IndexRang
@inbounds for j in 1:size(A, 2) @inbounds for j in 1:size(A, 2)
for i in nzrange(A, j) for i in nzrange(A, j)
row = a_rows[i] row = a_rows[i]
row > r.oid_to_lid.stop && continue # Skip ghost rows row > n_local_rows && continue # Skip ghost rows
# grow = r.lid_to_gid[lrow] # grow = r.lid_to_gid[lrow]
ncols[row] += 1 ncols[row] += 1
end end
@ -374,13 +380,14 @@ function Internals.to_hypre_data(A::SparseMatrixCSC, r::IndexRange, c::IndexRang
# Second pass to populate the output -- here we need to take care of the permutation # Second pass to populate the output -- here we need to take care of the permutation
# of columns. TODO: Problem that they are not sorted? # of columns. TODO: Problem that they are not sorted?
l_to_g_cols = local_to_global(c)
@inbounds for j in 1:size(A, 2) @inbounds for j in 1:size(A, 2)
for i in nzrange(A, j) for i in nzrange(A, j)
row = a_rows[i] row = a_rows[i]
row > r.oid_to_lid.stop && continue # Skip ghost rows row > n_local_cols && continue # Skip ghost rows
k = lastinds[row] += 1 k = lastinds[row] += 1
val = a_vals[i] val = a_vals[i]
cols[k] = c.lid_to_gid[j] cols[k] = l_to_g_cols[j]
values[k] = val values[k] = val
end end
end end
@ -391,32 +398,38 @@ end
# TODO: Possibly this can be optimized if it is possible to pass overlong vectors to HYPRE. # TODO: Possibly this can be optimized if it is possible to pass overlong vectors to HYPRE.
# At least values should be possible to directly share, but cols needs to translated # At least values should be possible to directly share, but cols needs to translated
# to global ids. # to global ids.
function Internals.to_hypre_data(A::SparseMatrixCSR, r::IndexRange, c::IndexRange) function Internals.to_hypre_data(A::SparseMatrixCSR, r::AbstractLocalIndices, c::AbstractLocalIndices)
@assert r.oid_to_lid isa UnitRange && r.oid_to_lid.start == 1 g_to_l_rows = global_to_local(r)
l_to_g_rows = local_to_global(r)
@assert g_to_l_rows.own_to_local isa UnitRange && g_to_l_rows.own_to_local.start == 1
n_local_rows = own_length(r)
n_local_cols = own_length(c)
ilower = l_to_g_rows[1]
iupper = l_to_g_rows[n_local_rows]
ilower = r.lid_to_gid[r.oid_to_lid.start]
iupper = r.lid_to_gid[r.oid_to_lid.stop]
a_cols = colvals(A) a_cols = colvals(A)
a_vals = nonzeros(A) a_vals = nonzeros(A)
nnz = getrowptr(A)[r.oid_to_lid.stop + 1] - 1 nnz = getrowptr(A)[n_local_rows + 1] - 1
# Initialize the data buffers HYPRE wants # Initialize the data buffers HYPRE wants
nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows nrows = HYPRE_Int(iupper - ilower + 1) # Total number of rows
ncols = zeros(HYPRE_Int, nrows) # Number of colums for each row ncols = zeros(HYPRE_Int, nrows) # Number of columns for each row
rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices rows = collect(HYPRE_BigInt, ilower:iupper) # The row indices
cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices cols = Vector{HYPRE_BigInt}(undef, nnz) # The column indices
values = Vector{HYPRE_Complex}(undef, nnz) # The values values = Vector{HYPRE_Complex}(undef, nnz) # The values
# Loop over the (owned) rows and collect all values # Loop over the (owned) rows and collect all values
l_to_g_cols = local_to_global(c)
k = 0 k = 0
@inbounds for i in r.oid_to_lid @inbounds for i in own_to_local(r)
nzr = nzrange(A, i) nzr = nzrange(A, i)
ncols[i] = length(nzr) ncols[i] = length(nzr)
for j in nzr for j in nzr
k += 1 k += 1
col = a_cols[j] col = a_cols[j]
val = a_vals[j] val = a_vals[j]
cols[k] = c.lid_to_gid[col] cols[k] = l_to_g_cols[col]
values[k] = val values[k] = val
end end
end end
@ -425,24 +438,29 @@ function Internals.to_hypre_data(A::SparseMatrixCSR, r::IndexRange, c::IndexRang
return nrows, ncols, rows, cols, values return nrows, ncols, rows, cols, values
end end
function Internals.get_comm(A::Union{PSparseMatrix{<:Any,<:M}, PVector{<:Any,<:M}}) where M <: MPIData function Internals.get_comm(A::Union{PSparseMatrix{<:Any,<:M}, PVector{<:Any,<:M}}) where M <: MPIArray
return A.rows.partition.comm return partition(A).comm
end end
Internals.get_comm(_::Union{PSparseMatrix,PVector}) = MPI.COMM_SELF Internals.get_comm(_::Union{PSparseMatrix,PVector}) = MPI.COMM_SELF
function Internals.get_proc_rows(A::Union{PSparseMatrix{<:Any,<:M}, PVector{<:Any,<:M}}) where M <: MPIData function Internals.get_proc_rows(A::Union{PSparseMatrix, PVector})
r = A.rows.partition.part if A isa PVector
ilower::HYPRE_BigInt = r.lid_to_gid[r.oid_to_lid[1]] r = A.index_partition
iupper::HYPRE_BigInt = r.lid_to_gid[r.oid_to_lid[end]] else
return ilower, iupper r = A.row_partition
end end
function Internals.get_proc_rows(A::Union{PSparseMatrix{<:Any,<:S}, PVector{<:Any,<:S}}) where S <: SequentialData
ilower::HYPRE_BigInt = typemax(HYPRE_BigInt) ilower::HYPRE_BigInt = typemax(HYPRE_BigInt)
iupper::HYPRE_BigInt = typemin(HYPRE_BigInt) iupper::HYPRE_BigInt = typemin(HYPRE_BigInt)
for r in A.rows.partition.parts low_high = map(r) do a
ilower = min(r.lid_to_gid[r.oid_to_lid[1]], ilower) o_to_g = own_to_global(a)
iupper = max(r.lid_to_gid[r.oid_to_lid[end]], iupper) ilower_part = o_to_g[1]
end iupper_part = o_to_g[end]
return ilower_part, iupper_part
end
low, high = tuple_of_arrays(low_high)
ilower = convert(HYPRE_BigInt, reduce(min, low))
iupper = convert(HYPRE_BigInt, reduce(max, high))
return ilower, iupper return ilower, iupper
end end
@ -454,7 +472,7 @@ function HYPREMatrix(B::PSparseMatrix)
# Create the IJ matrix # Create the IJ matrix
A = HYPREMatrix(comm, ilower, iupper) A = HYPREMatrix(comm, ilower, iupper)
# Set all the values # Set all the values
map_parts(B.values, B.rows.partition, B.cols.partition) do Bv, Br, Bc map(local_values(B), B.row_partition, B.col_partition) do Bv, Br, Bc
nrows, ncols, rows, cols, values = Internals.to_hypre_data(Bv, Br, Bc) nrows, ncols, rows, cols, values = Internals.to_hypre_data(Bv, Br, Bc)
@check HYPRE_IJMatrixSetValues(A, nrows, ncols, rows, cols, values) @check HYPRE_IJMatrixSetValues(A, nrows, ncols, rows, cols, values)
return nothing return nothing
@ -476,9 +494,11 @@ function HYPREVector(v::PVector)
# Create the IJ vector # Create the IJ vector
b = HYPREVector(comm, ilower, iupper) b = HYPREVector(comm, ilower, iupper)
# Set all the values # Set all the values
map_parts(v.values, v.owned_values, v.rows.partition) do _, vo, vr map(own_values(v), v.index_partition) do vo, vr
ilower_part = vr.lid_to_gid[vr.oid_to_lid.start] o_to_g = own_to_global(vr)
iupper_part = vr.lid_to_gid[vr.oid_to_lid.stop]
ilower_part = o_to_g[1]
iupper_part = o_to_g[end]
# Option 1: Set all values # Option 1: Set all values
nvalues = HYPRE_Int(iupper_part - ilower_part + 1) nvalues = HYPRE_Int(iupper_part - ilower_part + 1)
@ -518,36 +538,30 @@ function Internals.copy_check(dst::HYPREVector, src::PVector)
end end
# TODO: Other eltypes could be support by using a intermediate buffer # TODO: Other eltypes could be support by using a intermediate buffer
function Base.copy!(dst::PVector{HYPRE_Complex}, src::HYPREVector) function Base.copy!(dst::PVector, src::HYPREVector)
Internals.copy_check(src, dst) Internals.copy_check(src, dst)
map_parts(dst.values, dst.owned_values, dst.rows.partition) do vv, _, vr map(own_values(dst), dst.index_partition) do ov, vr
il_src_part = vr.lid_to_gid[vr.oid_to_lid.start] o_to_g = own_to_global(vr)
iu_src_part = vr.lid_to_gid[vr.oid_to_lid.stop] il_src_part = o_to_g[1]
iu_src_part = o_to_g[end]
nvalues = HYPRE_Int(iu_src_part - il_src_part + 1) nvalues = HYPRE_Int(iu_src_part - il_src_part + 1)
indices = collect(HYPRE_BigInt, il_src_part:iu_src_part) indices = collect(HYPRE_BigInt, il_src_part:iu_src_part)
@check HYPRE_IJVectorGetValues(src, nvalues, indices, ov)
# Assumption: the dst vector is assembled, and should thus have 0s on the ghost
# entries (??). If this is not true, we must call fill!(vv, 0) here. This should be
# fairly cheap anyway, so might as well do it...
fill!(vv, 0)
# TODO: Safe to use vv here? Owned values are always first?
@check HYPRE_IJVectorGetValues(src, nvalues, indices, vv)
end end
return dst return dst
end end
function Base.copy!(dst::HYPREVector, src::PVector{HYPRE_Complex}) function Base.copy!(dst::HYPREVector, src::PVector)
Internals.copy_check(dst, src) Internals.copy_check(dst, src)
# Re-initialize the vector # Re-initialize the vector
@check HYPRE_IJVectorInitialize(dst) @check HYPRE_IJVectorInitialize(dst)
map_parts(src.values, src.owned_values, src.rows.partition) do vv, _, vr map(own_values(src), src.index_partition) do ov, vr
ilower_src_part = vr.lid_to_gid[vr.oid_to_lid.start] o_to_g = own_to_global(vr)
iupper_src_part = vr.lid_to_gid[vr.oid_to_lid.stop] ilower_src_part = o_to_g[1]
iupper_src_part = o_to_g[end]
nvalues = HYPRE_Int(iupper_src_part - ilower_src_part + 1) nvalues = HYPRE_Int(iupper_src_part - ilower_src_part + 1)
indices = collect(HYPRE_BigInt, ilower_src_part:iupper_src_part) indices = collect(HYPRE_BigInt, ilower_src_part:iupper_src_part)
# TODO: Safe to use vv here? Owned values are always first? @check HYPRE_IJVectorSetValues(dst, nvalues, indices, ov)
@check HYPRE_IJVectorSetValues(dst, nvalues, indices, vv)
end end
# TODO: It shouldn't be necessary to assemble here since we only set owned rows (?) # TODO: It shouldn't be necessary to assemble here since we only set owned rows (?)
# @check HYPRE_IJVectorAssemble(dst) # @check HYPRE_IJVectorAssemble(dst)

239
test/runtests.jl

@ -91,104 +91,105 @@ end
@test H.iupper == H.jupper == 10 @test H.iupper == H.jupper == 10
end end
function tomain(x) function default_local_values_csr(I,J,V,row_indices,col_indices)
g = gather(copy(x)) # Adapted from p_sparse_matrix.jl line 487
be = get_backend(g.values) m = local_length(row_indices)
if be isa SequentialBackend n = local_length(col_indices)
return g.values.parts[1] sparsecsr(I,J,V,m,n)
else # if be isa MPIBackend
return g.values.part
end end
function distribute_as_parray(parts, backend)
if backend == :debug
parts = DebugArray(parts)
elseif backend == :mpi
parts = distribute_with_mpi(parts)
else
@assert backend == :native
parts = collect(parts)
end
return parts
end end
@testset "HYPREMatrix(::PSparseMatrix)" begin @testset "HYPREMatrix(::PSparseMatrix)" begin
# Sequential backend function diag_data(parts)
function diag_data(backend, parts) rows = uniform_partition(parts, 10)
is_seq = backend isa SequentialBackend cols = uniform_partition(parts, 10)
rows = PRange(parts, 10) np = length(parts)
cols = PRange(parts, 10) IJV = map(parts) do p
I, J, V = map_parts(parts) do p
i = Int[] i = Int[]
j = Int[] j = Int[]
v = Float64[] v = Float64[]
if (is_seq && p == 1) || !is_seq if np == 1
# MPI case is special, we only have one MPI process.
@assert p == 1
append!(i, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
append!(j, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
append!(v, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
elseif p == 1
@assert np == 2
append!(i, [1, 2, 3, 4, 5, 6]) append!(i, [1, 2, 3, 4, 5, 6])
append!(j, [1, 2, 3, 4, 5, 6]) append!(j, [1, 2, 3, 4, 5, 6])
append!(v, [1, 2, 3, 4, 5, 6]) append!(v, [1, 2, 3, 4, 5, 6])
end else
if (is_seq && p == 2) || !is_seq @assert np == 2
@assert p == 2
append!(i, [4, 5, 6, 7, 8, 9, 10]) append!(i, [4, 5, 6, 7, 8, 9, 10])
append!(j, [4, 5, 6, 7, 8, 9, 10]) append!(j, [4, 5, 6, 7, 8, 9, 10])
append!(v, [4, 5, 6, 7, 8, 9, 10]) append!(v, [4, 5, 6, 7, 8, 9, 10])
end end
return i, j, v return i, j, v
end end
add_gids!(rows, I) I, J, V = tuple_of_arrays(IJV)
assemble!(I, J, V, rows)
add_gids!(cols, J)
return I, J, V, rows, cols return I, J, V, rows, cols
end end
backend = SequentialBackend() for backend in [:native, :debug, :mpi]
parts = get_part_ids(backend, 2) @testset "Backend=$backend" begin
CSC = PSparseMatrix(diag_data(backend, parts)...; ids=:global) if backend == :mpi
CSR = PSparseMatrix(sparsecsr, diag_data(backend, parts)...; ids=:global) parts = 1:1
else
@test tomain(CSC) == tomain(CSR) == parts = 1:2
Diagonal([1, 2, 3, 8, 10, 12, 7, 8, 9, 10]) end
parts = distribute_as_parray(parts, backend)
CSC = psparse!(diag_data(parts)...) |> fetch
CSR = psparse!(default_local_values_csr, diag_data(parts)...) |> fetch
map_parts(CSC.values, CSC.rows.partition, CSC.cols.partition, for A in [CSC, CSR]
CSR.values, CSR.rows.partition, CSR.cols.partition, parts) do args... map(local_values(A), A.row_partition, A.col_partition, parts) do values, rows, cols, p
cscvalues, cscrows, csccols, csrvalues, csrrows, csrcols, p = args hypre_data = Internals.to_hypre_data(values, rows, cols)
csc = Internals.to_hypre_data(cscvalues, cscrows, csccols) if backend == :mpi
csr = Internals.to_hypre_data(csrvalues, csrrows, csrcols) @assert p == 1
if p == 1 nrows = 10
ncols = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
cols = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
elseif p == 1
nrows = 5 nrows = 5
ncols = [1, 1, 1, 1, 1] ncols = [1, 1, 1, 1, 1]
rows = [1, 2, 3, 4, 5] rows = [1, 2, 3, 4, 5]
cols = [1, 2, 3, 4, 5] cols = [1, 2, 3, 4, 5]
values = [1, 2, 3, 8, 10] values = [1, 2, 3, 8, 10]
else # if p == 1 else
@assert p == 2
nrows = 5 nrows = 5
ncols = [1, 1, 1, 1, 1] ncols = [1, 1, 1, 1, 1]
rows = [6, 7, 8, 9, 10] rows = [6, 7, 8, 9, 10]
cols = [6, 7, 8, 9, 10] cols = [6, 7, 8, 9, 10]
values = [12, 7, 8, 9, 10] values = [12, 7, 8, 9, 10]
end end
@test csc[1]::HYPRE_Int == csr[1]::HYPRE_Int == nrows @test hypre_data[1]::HYPRE_Int == nrows
@test csc[2]::Vector{HYPRE_Int} == csr[2]::Vector{HYPRE_Int} == ncols @test hypre_data[2]::Vector{HYPRE_Int} == ncols
@test csc[3]::Vector{HYPRE_BigInt} == csr[3]::Vector{HYPRE_BigInt} == rows @test hypre_data[3]::Vector{HYPRE_BigInt} == rows
@test csc[4]::Vector{HYPRE_BigInt} == csr[4]::Vector{HYPRE_BigInt} == cols @test hypre_data[4]::Vector{HYPRE_BigInt} == cols
@test csc[5]::Vector{HYPRE_Complex} == csr[5]::Vector{HYPRE_Complex} == values @test hypre_data[5]::Vector{HYPRE_Complex} == values
end
end end
# MPI backend
backend = MPIBackend()
parts = MPIData(1, MPI.COMM_WORLD, (1,)) # get_part_ids duplicates the comm
CSC = PSparseMatrix(diag_data(backend, parts)...; ids=:global)
CSR = PSparseMatrix(sparsecsr, diag_data(backend, parts)...; ids=:global)
@test tomain(CSC) == tomain(CSR) ==
Diagonal([1, 2, 3, 8, 10, 12, 7, 8, 9, 10])
map_parts(CSC.values, CSC.rows.partition, CSC.cols.partition,
CSR.values, CSR.rows.partition, CSR.cols.partition, parts) do args...
cscvalues, cscrows, csccols, csrvalues, csrrows, csrcols, p = args
csc = Internals.to_hypre_data(cscvalues, cscrows, csccols)
csr = Internals.to_hypre_data(csrvalues, csrrows, csrcols)
nrows = 10
ncols = fill(1, 10)
rows = collect(1:10)
cols = collect(1:10)
values = [1, 2, 3, 8, 10, 12, 7, 8, 9, 10]
@test csc[1]::HYPRE_Int == csr[1]::HYPRE_Int == nrows
@test csc[2]::Vector{HYPRE_Int} == csr[2]::Vector{HYPRE_Int} == ncols
@test csc[3]::Vector{HYPRE_BigInt} == csr[3]::Vector{HYPRE_BigInt} == rows
@test csc[4]::Vector{HYPRE_BigInt} == csr[4]::Vector{HYPRE_BigInt} == cols
@test csc[5]::Vector{HYPRE_Complex} == csr[5]::Vector{HYPRE_Complex} == values
end end
end end
end
@testset "HYPREVector" begin @testset "HYPREVector" begin
h = HYPREVector(MPI.COMM_WORLD, 1, 5) h = HYPREVector(MPI.COMM_WORLD, 1, 5)
@ -250,52 +251,47 @@ end
end end
@testset "HYPREVector(::PVector)" begin @testset "HYPREVector(::PVector)" begin
# Sequential backend for backend in [:native, :debug, :mpi]
backend = SequentialBackend() if backend == :mpi
parts = get_part_ids(backend, 2) parts = distribute_as_parray(1:1, backend)
rows = PRange(parts, 10) else
parts = distribute_as_parray(1:2, backend)
end
rows = uniform_partition(parts, 10)
b = rand(10) b = rand(10)
I, V = map_parts(parts) do p IV = map(parts, rows) do p, owned
if p == 1 if backend == :mpi
return collect(1:6), b[1:6] row_indices = 1:10
elseif p == 1
row_indices = 1:6
else # p == 2 else # p == 2
return collect(4:10), b[4:10] row_indices = 4:10
end end
values = zeros(length(row_indices))
for (i, row) in enumerate(row_indices)
if row in owned
values[i] = b[row]
end end
add_gids!(rows, I) end
pb = PVector(I, V, rows; ids=:global) return collect(row_indices), values
assemble!(pb) end
@test tomain(pb) == [i in 4:6 ? 2x : x for (i, x) in zip(eachindex(b), b)] I, V = tuple_of_arrays(IV)
pb = pvector!(I, V, rows) |> fetch
H = HYPREVector(pb) H = HYPREVector(pb)
# Check for valid vector
@test H.ijvector != HYPRE_IJVector(C_NULL) @test H.ijvector != HYPRE_IJVector(C_NULL)
@test H.parvector != HYPRE_ParVector(C_NULL) @test H.parvector != HYPRE_ParVector(C_NULL)
pbc = fill!(copy(pb), 0) # Copy back, check if identical
copy!(pbc, H) b_copy = copy!(similar(b), H)
@test tomain(pbc) == tomain(pb) @test b_copy == b
# Test copy to and from HYPREVector
pb2 = 2 * pb pb2 = 2 * pb
H′ = copy!(H, pb2) H′ = copy!(H, pb2)
@test H === H′ @test H === H′
pbc = similar(pb)
copy!(pbc, H) copy!(pbc, H)
@test tomain(pbc) == 2 * tomain(pb) @test pbc == 2*pb
end
# MPI backend
backend = MPIBackend()
parts = get_part_ids(backend, 1)
rows = PRange(parts, 10)
I, V = map_parts(parts) do p
return collect(1:10), b
end
add_gids!(rows, I)
pb = PVector(I, V, rows; ids=:global)
assemble!(pb)
@test tomain(pb) == b
H = HYPREVector(pb)
@test H.ijvector != HYPRE_IJVector(C_NULL)
@test H.parvector != HYPRE_ParVector(C_NULL)
pbc = fill!(copy(pb), 0)
copy!(pbc, H)
@test tomain(pbc) == tomain(pb)
end end
@testset "HYPRE(Matrix|Vector)?Assembler" begin @testset "HYPRE(Matrix|Vector)?Assembler" begin
@ -689,40 +685,55 @@ end
@test x A \ b atol=tol @test x A \ b atol=tol
end end
function topartitioned(x::Vector, A::SparseMatrixCSC, b::Vector) function topartitioned(x::Vector, A::SparseMatrixCSC, b::Vector, backend)
parts = get_part_ids(SequentialBackend(), 1) parts = distribute_as_parray(1:1, backend)
rows = PRange(parts, size(A, 1)) n = size(A, 1)
cols = PRange(parts, size(A, 2)) rows = uniform_partition(parts, n)
II, JJ, VV, bb, xx = map_parts(parts) do _ cols = uniform_partition(parts, n)
tmp = map(parts) do _
return findnz(A)..., b, x return findnz(A)..., b, x
end end
add_gids!(rows, II) II, JJ, VV, bb, xx = tuple_of_arrays(tmp)
assemble!(II, JJ, VV, rows) A_p = psparse!(II, JJ, VV, rows, cols) |> fetch
add_gids!(cols, JJ)
A_p = PSparseMatrix(II, JJ, VV, rows, cols; ids = :global)
b_p = PVector(bb, rows) b_p = PVector(bb, rows)
x_p = PVector(xx, cols) x_p = PVector(xx, cols)
return x_p, A_p, b_p return x_p, A_p, b_p
end end
@testset "solve with PartitionedArrays" begin @testset "solve with PartitionedArrays" begin
for backend in [:native, :debug, :mpi]
# Setup # Setup
A = sprand(100, 100, 0.05); A = A'A + 5I A = sprand(100, 100, 0.05); A = A'A + 5I
b = rand(100) b = rand(100)
x = zeros(100) x = zeros(100)
x_p, A_p, b_p = topartitioned(x, A, b) x_p, A_p, b_p = topartitioned(x, A, b, :native)
@test A == tomain(A_p) # Data is distributed over a single process. We can then check the following
@test b == tomain(b_p) # as local_values is the entire matrix/vector.
@test x == tomain(x_p) map(local_values(x_p)) do x_l
@test x_l == x
end
map(local_values(b_p)) do b_l
@test b_l == b
end
map(local_values(A_p)) do A_l
@test A_l == A
end
# Solve # Solve
tol = 1e-9 tol = 1e-9
pcg = HYPRE.PCG(; Tol = tol) pcg = HYPRE.PCG(; Tol = tol)
## solve! ## solve!
HYPRE.solve!(pcg, x_p, A_p, b_p) HYPRE.solve!(pcg, x_p, A_p, b_p)
@test tomain(x_p) A \ b atol=tol ref = A\b
map(local_values(x_p)) do x
@test x ref atol=tol
end
## solve ## solve
x_p = HYPRE.solve(pcg, A_p, b_p) x_p = HYPRE.solve(pcg, A_p, b_p)
@test tomain(x_p) A \ b atol=tol map(local_values(x_p)) do x
@test x ref atol=tol
end
end
end end
@testset "solve with SparseMatrixCS(C|R)" begin @testset "solve with SparseMatrixCS(C|R)" begin

Loading…
Cancel
Save