Warning: This is experimental -- NERSC is looking for applications to kick the wheels. And for motivated engineers to continue development (last commit in Jul/2019): https://github.com/barche/MPIArrays.jl
@mpi_do manager begin
include(joinpath("MPIArrays.jl", "src", "MPIArrays.jl"))
using .MPIArrays, MPI
comm = MPI.COMM_WORLD
rank = MPI.Comm_rank(comm)
N = 30 # size of the matrix
end
@mpi_do manager begin
# Create an uninitialized matrix and vector
x = MPIArray{Float64}(N)
A = MPIArray{Float64}(N,N)
end
@mpi_do manager begin
using Random
# Set random values by applying the `rand!` function to each local element in x and A
forlocalpart!(rand!, x)
forlocalpart!(rand!, A)
end
@mpi_do manager begin
# Make sure every process finished initializing the coefficients
sync(A, x)
b = A*x
end
@mpi_do manager begin
println("$(rank): b=$(b)")
end
From worker 2: 0: b=[6.58448560732781, 5.397064398912319, 7.2643154419828, 6.659227285202218, 6.601799096937221, 6.087971504440199, 7.288367302111861, 5.965723191984332, 7.788123928904926, 6.906726045686252, 6.784108836906283, 8.248567581785284, 6.141569708627604, 6.066565417913599, 6.764081298937339, 7.171678033141173, 6.050349130440142, 6.551964852832224, 7.46361638548569, 8.23477485992181, 6.303863742596247, 7.572072339570294, 6.266950888366667, 7.009225600102562, 7.790626303820637, 5.4969217868349585, 6.63325855467022, 6.176998715842372, 7.191513001223977, 5.771460933503084] From worker 3: 1: b=[6.58448560732781, 5.397064398912319, 7.2643154419828, 6.659227285202218, 6.601799096937221, 6.087971504440199, 7.288367302111861, 5.965723191984332, 7.788123928904926, 6.906726045686252, 6.784108836906283, 8.248567581785284, 6.141569708627604, 6.066565417913599, 6.764081298937339, 7.171678033141173, 6.050349130440142, 6.551964852832224, 7.46361638548569, 8.23477485992181, 6.303863742596247, 7.572072339570294, 6.266950888366667, 7.009225600102562, 7.790626303820637, 5.4969217868349585, 6.63325855467022, 6.176998715842372, 7.191513001223977, 5.771460933503084]
@mpi_do manager begin
y = MPIArray{Float64}(4)
end
@mpi_do manager begin
index = rank*2 + 1
yblock = y[index : index + 1]
println("$(rank): $(yblock) / $([i for i in index : index + 1])")
end
From worker 3: 1: Main.MPIArrays.Block{Float64, 1}([5.0e-324, 2.3815620396e-314, 0.0, 0.0], (3:4,), CartesianIndex{1}[CartesianIndex(2,)]) / [3, 4] From worker 2: 0: Main.MPIArrays.Block{Float64, 1}([5.0e-324, 2.3815620396e-314, 0.0, 0.0], (1:2,), CartesianIndex{1}[CartesianIndex(1,)]) / [1, 2]
@mpi_do manager begin
# get "view" into block
ymat = getblock(yblock)
# write into view
ymat[1:2] .= rank
# syncronize changes back to block
putblock!(ymat, yblock)
# ensure that all ranks have completed the `putblock!` operation
MPI.Barrier(comm)
println("$(rank): $(yblock)")
end
From worker 3: 1: Main.MPIArrays.Block{Float64, 1}([0.0, 0.0, 1.0, 1.0], (3:4,), CartesianIndex{1}[CartesianIndex(2,)]) From worker 2: 0: Main.MPIArrays.Block{Float64, 1}([0.0, 0.0, 1.0, 1.0], (1:2,), CartesianIndex{1}[CartesianIndex(1,)])
@mpi_do manager begin
println("$(rank): $(y)")
end
From worker 2: 0: [0.0, 0.0, 1.0, 1.0] From worker 3: 1: [0.0, 0.0, 1.0, 1.0]
@mpi_do manager begin
gb = GlobalBlock(ymat, yblock)
println("$(rank): gb[$(index)] = $(gb[index])")
end
From worker 2: 0: gb[1] = 0.0 From worker 3: 1: gb[3] = 1.0