MPI + Distributed Arrays¶

(Back to Overview)

In [53]:
@mpi_do manager begin
function work(N)
    x0 = rank
    for n = 1:Int(N)
        x0 = sqrt(x0 + 1)
    end
    out = zeros(2, 2)
    out[1, 1] = x0 + rank
    return out
end
end
In [57]:
r1 = DistributedArrays.remotecall(() -> work(1e10), workers()[1]) 
r2 = DistributedArrays.remotecall(() -> work(1e10), workers()[2]) 
r3 = DistributedArrays.remotecall(() -> work(1e10), workers()[3]) 
r4 = DistributedArrays.remotecall(() -> work(1e10), workers()[4]) 
D  = DArray(reshape([r1 r2 r3 r4], (2,2)))
Out[57]:
4×4 DArray{Float64, 2, Matrix{Float64}}:
 1.61803  0.0  3.61803  0.0
 0.0      0.0  0.0      0.0
 2.61803  0.0  4.61803  0.0
 0.0      0.0  0.0      0.0
In [14]:
[@fetchfrom p localindices(D) for p in workers()]
Out[14]:
4-element Vector{Tuple{UnitRange{Int64}, UnitRange{Int64}}}:
 (1:2, 1:2)
 (3:4, 1:2)
 (1:2, 3:4)
 (3:4, 3:4)