Julia interface to hypre linear solvers (https://github.com/hypre-space/hypre)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

481 lines
17 KiB

# Example translated from C to Julia based on this original source (MIT license):
# https://github.com/hypre-space/hypre/blob/ac9d7d0d7b43cd3d0c7f24ec5d64b58fbf900097/src/examples/ex5.c
#
# Note that this is more or less a line-by-line translation (Julia and C are surprisingly
# similar!), and therefore the code here is not very "Julian" in the style. Nevertheless, it
# showcases how HYPRE.jl can be used to interact with the C API directly.
# Example 5
#
# Interface: Linear-Algebraic (IJ)
#
# Sample run: mpirun -np 4 julia ex5.jl
#
# Description: This example solves the 2-D Laplacian problem with zero boundary
# conditions on an n x n grid. The number of unknowns is N=n^2.
# The standard 5-point stencil is used, and we solve for the
# interior nodes only.
#
# This example solves the same problem as Example 3. Available
# solvers are AMG, PCG, and PCG with AMG or Parasails
# preconditioners.
using MPI
using HYPRE.LibHYPRE # LibHYPRE submodule contains the raw C-interface generated by Clang.jl
function main(argc, argv)
# Initialize MPI
# MPI_Init(Ref{Cint}(length(ARGS)), ARGS)
MPI.Init()
MPI_COMM_WORLD = MPI.COMM_WORLD
myid = MPI.Comm_rank(MPI_COMM_WORLD)
num_procs = MPI.Comm_size(MPI_COMM_WORLD)
# Initialize HYPRE
HYPRE_Init()
# Default problem parameters
n = 33
solver_id = 0
print_system = false
# Parse command line
arg_index = 1
print_usage = false
let
while arg_index <= argc
if argv[arg_index] == "-n"
n = parse(Int, argv[arg_index += 1])
arg_index += 1
elseif argv[arg_index] == "-solver"
solver_id = parse(Int, argv[arg_index += 1])
arg_index += 1
elseif argv[arg_index] == "-print_system"
print_system = true
arg_index += 1
elseif argv[arg_index] == "-help"
print_usage = true
break
else
arg_index += 1
end
end
if print_usage && myid == 0
println()
println("Usage: julia ex5.jl [<options>]")
println()
println(" -n <n> : problem size in each direction (default: 33)")
println(" -solver <ID> : solver ID")
println(" 0 - AMG (default)")
println(" 1 - AMG-PCG")
println(" 8 - ParaSails-PCG")
println(" 50 - PCG")
println(" 61 - AMG-FlexGMRES")
# println(" -vis : save the solution for GLVis visualization")
println(" -print_system : print the matrix and rhs")
println()
end
if print_usage
MPI.Finalize()
return 0
end
end
# Preliminaries: want at least one processor per row
if n * n < num_procs
n = trunc(Int, sqrt(n)) + 1
end
N = n * n # global number of rows
h = 1.0 / (n + 1) # mesh size
h2 = h * h
# Each processor knows only of its own rows - the range is denoted by ilower
# and upper. Here we partition the rows. We account for the fact that
# N may not divide evenly by the number of processors.
local_size = N ÷ num_procs
extra = N - local_size * num_procs
ilower = local_size * myid
ilower += min(myid, extra)
iupper = local_size * (myid + 1)
iupper += min(myid + 1, extra)
iupper = iupper - 1
# How many rows do I have?
local_size = iupper - ilower + 1
# Create the matrix.
# Note that this is a square matrix, so we indicate the row partition
# size twice (since number of rows = number of cols)
Aref = Ref{HYPRE_IJMatrix}(C_NULL)
HYPRE_IJMatrixCreate(MPI_COMM_WORLD, ilower, iupper, ilower, iupper, Aref)
A = Aref[]
# Choose a parallel csr format storage (see the User's Manual)
HYPRE_IJMatrixSetObjectType(A, HYPRE_PARCSR)
# Initialize before setting coefficients
HYPRE_IJMatrixInitialize(A)
# Now go through my local rows and set the matrix entries.
# Each row has at most 5 entries. For example, if n=3:
# A = [M -I 0; -I M -I; 0 -I M]
# M = [4 -1 0; -1 4 -1; 0 -1 4]
# Note that here we are setting one row at a time, though
# one could set all the rows together (see the User's Manual).
let
values = Vector{Cdouble}(undef, 5)
cols = Vector{Cint}(undef, 5)
tmp = Vector{Cint}(undef, 2)
for i in ilower:iupper
nnz = 1
# The left identity block:position i-n
if (i - n) >= 0
cols[nnz] = i - n
values[nnz] = -1.0
nnz += 1
end
# The left -1: position i-1
if i % n != 0
cols[nnz] = i - 1
values[nnz] = -1.0
nnz += 1
end
# Set the diagonal: position i
cols[nnz] = i
values[nnz] = 4.0
nnz += 1
# The right -1: position i+1
if (i + 1) % n != 0
cols[nnz] = i + 1
values[nnz] = -1.0
nnz += 1
end
# The right identity block:position i+n
if (i + n) < N
cols[nnz] = i + n
values[nnz] = -1.0
nnz += 1
end
# Set the values for row i
tmp[1] = nnz - 1
tmp[2] = i
HYPRE_IJMatrixSetValues(A, 1, Ref(tmp[1]), Ref(tmp[2]), cols, values)
end
end
# Assemble after setting the coefficients
HYPRE_IJMatrixAssemble(A)
# Note: for the testing of small problems, one may wish to read
# in a matrix in IJ format (for the format, see the output files
# from the -print_system option).
# In this case, one would use the following routine:
# HYPRE_IJMatrixRead( <filename>, MPI_COMM_WORLD,
# HYPRE_PARCSR, &A );
# <filename> = IJ.A.out to read in what has been printed out
# by -print_system (processor numbers are omitted).
# A call to HYPRE_IJMatrixRead is an *alternative* to the
# following sequence of HYPRE_IJMatrix calls:
# Create, SetObjectType, Initialize, SetValues, and Assemble
# Get the parcsr matrix object to use
parcsr_A_ref = Ref{Ptr{Cvoid}}(C_NULL)
HYPRE_IJMatrixGetObject(A, parcsr_A_ref)
parcsr_A = convert(Ptr{HYPRE_ParCSRMatrix}, parcsr_A_ref[])
# Create the rhs and solution
b_ref = Ref{HYPRE_IJVector}(C_NULL)
HYPRE_IJVectorCreate(MPI_COMM_WORLD, ilower, iupper, b_ref)
b = b_ref[]
HYPRE_IJVectorSetObjectType(b, HYPRE_PARCSR)
HYPRE_IJVectorInitialize(b)
x_ref = Ref{HYPRE_IJVector}(C_NULL)
HYPRE_IJVectorCreate(MPI_COMM_WORLD, ilower, iupper, x_ref)
x = x_ref[]
HYPRE_IJVectorSetObjectType(x, HYPRE_PARCSR)
HYPRE_IJVectorInitialize(x)
# Set the rhs values to h^2 and the solution to zero
let
rhs_values = Vector{Cdouble}(undef, local_size)
x_values = Vector{Cdouble}(undef, local_size)
rows = Vector{Cint}(undef, local_size)
for i in 1:local_size
rhs_values[i] = h2
x_values[i] = 0.0
rows[i] = ilower + i - 1
end
HYPRE_IJVectorSetValues(b, local_size, rows, rhs_values)
HYPRE_IJVectorSetValues(x, local_size, rows, x_values)
end
# As with the matrix, for testing purposes, one may wish to read in a rhs:
# HYPRE_IJVectorRead( <filename>, MPI_COMM_WORLD,
# HYPRE_PARCSR, &b );
# as an alternative to the
# following sequence of HYPRE_IJVectors calls:
# Create, SetObjectType, Initialize, SetValues, and Assemble
HYPRE_IJVectorAssemble(b)
par_b_ref = Ref{Ptr{Cvoid}}(C_NULL)
HYPRE_IJVectorGetObject(b, par_b_ref)
par_b = convert(Ptr{HYPRE_ParVector}, par_b_ref[])
HYPRE_IJVectorAssemble(x)
par_x_ref = Ref{Ptr{Cvoid}}(C_NULL)
HYPRE_IJVectorGetObject(x, par_x_ref)
par_x = convert(Ptr{HYPRE_ParVector}, par_x_ref[])
# Print out the system - files names will be IJ.out.A.XXXXX
# and IJ.out.b.XXXXX, where XXXXX = processor id
if print_system
HYPRE_IJMatrixPrint(A, "IJ.out.A")
HYPRE_IJVectorPrint(b, "IJ.out.b")
end
# Choose a solver and solve the system
solver_ref = Ref{HYPRE_Solver}(C_NULL)
precond_ref = Ref{HYPRE_Solver}(C_NULL)
num_iterations = Ref{Cint}()
final_res_norm = Ref{Cdouble}()
if solver_id == 0 # AMG
# Create solver
HYPRE_BoomerAMGCreate(solver_ref)
solver = solver_ref[]
# Set some parameters (See Reference Manual for more parameters)
HYPRE_BoomerAMGSetPrintLevel(solver, 3) # print solve info + parameters
HYPRE_BoomerAMGSetOldDefault(solver) # Falgout coarsening with modified classical interpolaiton
HYPRE_BoomerAMGSetRelaxType(solver, 3) # G-S/Jacobi hybrid relaxation
HYPRE_BoomerAMGSetRelaxOrder(solver, 1) # uses C/F relaxation
HYPRE_BoomerAMGSetNumSweeps(solver, 1) # Sweeeps on each level
HYPRE_BoomerAMGSetMaxLevels(solver, 20) # maximum number of levels
HYPRE_BoomerAMGSetTol(solver, 1.0e-7) # conv. tolerance
# Now setup and solve!
HYPRE_BoomerAMGSetup(solver, parcsr_A, par_b, par_x)
HYPRE_BoomerAMGSolve(solver, parcsr_A, par_b, par_x)
# Run info - needed logging turned on
HYPRE_BoomerAMGGetNumIterations(solver, num_iterations)
HYPRE_BoomerAMGGetFinalRelativeResidualNorm(solver, final_res_norm)
if myid == 0
println()
println("Iterations = $(num_iterations[])")
println("Final Relative Residual Norm = $(final_res_norm[])")
println()
end
# Destroy solver
HYPRE_BoomerAMGDestroy(solver)
elseif solver_id == 50 # PCG
# Create solver
HYPRE_ParCSRPCGCreate(MPI_COMM_WORLD, solver_ref)
solver = solver_ref[]
# Set some parameters (See Reference Manual for more parameters)
HYPRE_PCGSetMaxIter(solver, 1000) # max iterations
HYPRE_PCGSetTol(solver, 1.0e-7) # conv. tolerance
HYPRE_PCGSetTwoNorm(solver, 1) # use the two norm as the stopping criteria
HYPRE_PCGSetPrintLevel(solver, 2) # prints out the iteration info
HYPRE_PCGSetLogging(solver, 1) # needed to get run info later
# Now setup and solve!
HYPRE_ParCSRPCGSetup(solver, parcsr_A, par_b, par_x)
HYPRE_ParCSRPCGSolve(solver, parcsr_A, par_b, par_x)
# Run info - needed logging turned on
HYPRE_PCGGetNumIterations(solver, num_iterations)
HYPRE_PCGGetFinalRelativeResidualNorm(solver, final_res_norm)
if myid == 0
println()
println("Iterations = $(num_iterations[])")
println("Final Relative Residual Norm = $(final_res_norm[])")
println()
end
# Destroy solver
HYPRE_ParCSRPCGDestroy(solver)
elseif solver_id == 1 # PCG with AMG preconditioner
# Create solver
HYPRE_ParCSRPCGCreate(MPI_COMM_WORLD, solver_ref)
solver = solver_ref[]
# Set some parameters (See Reference Manual for more parameters)
HYPRE_PCGSetMaxIter(solver, 1000) # max iterations
HYPRE_PCGSetTol(solver, 1.0e-7) # conv. tolerance
HYPRE_PCGSetTwoNorm(solver, 1) # use the two norm as the stopping criteria
HYPRE_PCGSetPrintLevel(solver, 2) # print solve info
HYPRE_PCGSetLogging(solver, 1) # needed to get run info later
# Now set up the AMG preconditioner and specify any parameters
HYPRE_BoomerAMGCreate(precond_ref)
precond = precond_ref[]
HYPRE_BoomerAMGSetPrintLevel(precond, 1) # print amg solution info
HYPRE_BoomerAMGSetCoarsenType(precond, 6)
HYPRE_BoomerAMGSetOldDefault(precond)
HYPRE_BoomerAMGSetRelaxType(precond, 6) # Sym G.S./Jacobi hybrid
HYPRE_BoomerAMGSetNumSweeps(precond, 1)
HYPRE_BoomerAMGSetTol(precond, 0.0) # conv. tolerance zero
HYPRE_BoomerAMGSetMaxIter(precond, 1) # do only one iteration!
# Set the PCG preconditioner
HYPRE_PCGSetPrecond(solver, HYPRE_BoomerAMGSolve, HYPRE_BoomerAMGSetup, precond)
# Now setup and solve!
HYPRE_ParCSRPCGSetup(solver, parcsr_A, par_b, par_x)
HYPRE_ParCSRPCGSolve(solver, parcsr_A, par_b, par_x)
# Run info - needed logging turned on
HYPRE_PCGGetNumIterations(solver, num_iterations)
HYPRE_PCGGetFinalRelativeResidualNorm(solver, final_res_norm)
if myid == 0
println()
println("Iterations = $(num_iterations[])")
println("Final Relative Residual Norm = $(final_res_norm[])")
println()
end
# Destroy solver and preconditioner
HYPRE_ParCSRPCGDestroy(solver)
HYPRE_BoomerAMGDestroy(precond)
elseif solver_id == 8 # PCG with Parasails Preconditioner
# Create solver
HYPRE_ParCSRPCGCreate(MPI_COMM_WORLD, solver_ref)
solver = solver_ref[]
# Set some parameters (See Reference Manual for more parameters)
HYPRE_PCGSetMaxIter(solver, 1000) # max iterations
HYPRE_PCGSetTol(solver, 1.0e-7) # conv. tolerance
HYPRE_PCGSetTwoNorm(solver, 1) # use the two norm as the stopping criteria
HYPRE_PCGSetPrintLevel(solver, 2) # print solve info
HYPRE_PCGSetLogging(solver, 1) # needed to get run info later
# Now set up the ParaSails preconditioner and specify any parameters
HYPRE_ParaSailsCreate(MPI_COMM_WORLD, precond_ref)
precond = precond_ref[]
# Set some parameters (See Reference Manual for more parameters)
sai_max_levels = 1
sai_threshold = 0.1
sai_filter = 0.05
sai_sym = 1
HYPRE_ParaSailsSetParams(precond, sai_threshold, sai_max_levels)
HYPRE_ParaSailsSetFilter(precond, sai_filter)
HYPRE_ParaSailsSetSym(precond, sai_sym)
HYPRE_ParaSailsSetLogging(precond, 3)
# Set the PCG preconditioner
HYPRE_PCGSetPrecond(solver, HYPRE_ParaSailsSolve, HYPRE_ParaSailsSetup, precond)
# Now setup and solve!
HYPRE_ParCSRPCGSetup(solver, parcsr_A, par_b, par_x)
HYPRE_ParCSRPCGSolve(solver, parcsr_A, par_b, par_x)
# Run info - needed logging turned on
HYPRE_PCGGetNumIterations(solver, num_iterations)
HYPRE_PCGGetFinalRelativeResidualNorm(solver, final_res_norm)
if myid == 0
println()
println("Iterations = $(num_iterations[])")
println("Final Relative Residual Norm = $(final_res_norm[])")
println()
end
# Destory solver and preconditioner
HYPRE_ParCSRPCGDestroy(solver)
HYPRE_ParaSailsDestroy(precond)
elseif solver_id == 61 # Flexible GMRES with AMG Preconditioner
# Create solver
HYPRE_ParCSRFlexGMRESCreate(MPI_COMM_WORLD, solver_ref)
solver = solver_ref[]
# Set some parameters (See Reference Manual for more parameters)
HYPRE_FlexGMRESSetKDim(solver, 30) # restart
HYPRE_FlexGMRESSetMaxIter(solver, 1000) # max iterations
HYPRE_FlexGMRESSetTol(solver, 1.0e-7) # conv. tolerance
HYPRE_FlexGMRESSetPrintLevel(solver, 2) # print solve info
HYPRE_FlexGMRESSetLogging(solver, 1) # needed to get run info later
# Now set up the AMG preconditioner and specify any parameters
HYPRE_BoomerAMGCreate(precond_ref)
precond = precond_ref[]
HYPRE_BoomerAMGSetPrintLevel(precond, 1) # print amg solution info
HYPRE_BoomerAMGSetCoarsenType(precond, 6)
HYPRE_BoomerAMGSetOldDefault(precond)
HYPRE_BoomerAMGSetRelaxType(precond, 6) # Sym G.S./Jacobi hybrid
HYPRE_BoomerAMGSetNumSweeps(precond, 1)
HYPRE_BoomerAMGSetTol(precond, 0.0) # conv. tolerance zero
HYPRE_BoomerAMGSetMaxIter(precond, 1) # do only one iteration!
# Set the FlexGMRES preconditioner
HYPRE_FlexGMRESSetPrecond(solver, HYPRE_BoomerAMGSolve, HYPRE_BoomerAMGSetup, precond)
# Now setup and solve!
HYPRE_ParCSRFlexGMRESSetup(solver, parcsr_A, par_b, par_x)
HYPRE_ParCSRFlexGMRESSolve(solver, parcsr_A, par_b, par_x)
# Run info - needed logging turned on
HYPRE_FlexGMRESGetNumIterations(solver, num_iterations)
HYPRE_FlexGMRESGetFinalRelativeResidualNorm(solver, final_res_norm)
if myid == 0
println()
println("Iterations = $(num_iterations[])")
println("Final Relative Residual Norm = $(final_res_norm[])")
println()
end
# Destory solver and preconditioner
HYPRE_ParCSRFlexGMRESDestroy(solver)
HYPRE_BoomerAMGDestroy(precond)
else
if myid == 0
println("Invalid solver id specified.")
end
end
# Clean up
HYPRE_IJMatrixDestroy(A)
HYPRE_IJVectorDestroy(b)
HYPRE_IJVectorDestroy(x)
# Finalize HYPRE
HYPRE_Finalize()
# Finalize MPI
MPI.Finalize()
return 0
end
# Run it
if abspath(PROGRAM_FILE) == @__FILE__
main(length(ARGS), ARGS)
end