Warning: This works only if your MPI backend supports one-sided communication
@mpi_do manager begin
using MPI: MPI, Comm, Win, free
comm = MPI.COMM_WORLD
rank = MPI.Comm_rank(comm)
size = MPI.Comm_size(comm)
println("Hello world, I am $(rank) of $(size)")
end
From worker 2: Hello world, I am 0 of 2 From worker 3: Hello world, I am 1 of 2
@mpi_do manager begin
function mpi_shared_array(node_comm::Comm, ::Type{T}, sz::Tuple{Vararg{Int}}; owner_rank=0) where T
node_rank = MPI.Comm_rank(node_comm)
len_to_alloc = MPI.Comm_rank(node_comm) == owner_rank ? prod(sz) : 0
win, bufptr = MPI.Win_allocate_shared(T, len_to_alloc, node_comm)
if node_rank != owner_rank
len, sizofT, bufvoidptr = MPI.Win_shared_query(win, owner_rank)
bufptr = convert(Ptr{T}, bufvoidptr)
end
win, unsafe_wrap(Array, bufptr, sz)
end
comm = MPI.COMM_WORLD
rank = MPI.Comm_rank(comm)
owner_rank = 1
win, shared_arr =
mpi_shared_array(comm, Float32, (100, 2); owner_rank=owner_rank)
if rank == 0
shared_arr[:, 1] .= 1:100
elseif rank == 1
shared_arr[:, 2] .= 901:1000
end
end
@mpi_do manager begin
if rank == 0
println(shared_arr[:, 2])
end
end
From worker 2: Float32[901.0, 902.0, 903.0, 904.0, 905.0, 906.0, 907.0, 908.0, 909.0, 910.0, 911.0, 912.0, 913.0, 914.0, 915.0, 916.0, 917.0, 918.0, 919.0, 920.0, 921.0, 922.0, 923.0, 924.0, 925.0, 926.0, 927.0, 928.0, 929.0, 930.0, 931.0, 932.0, 933.0, 934.0, 935.0, 936.0, 937.0, 938.0, 939.0, 940.0, 941.0, 942.0, 943.0, 944.0, 945.0, 946.0, 947.0, 948.0, 949.0, 950.0, 951.0, 952.0, 953.0, 954.0, 955.0, 956.0, 957.0, 958.0, 959.0, 960.0, 961.0, 962.0, 963.0, 964.0, 965.0, 966.0, 967.0, 968.0, 969.0, 970.0, 971.0, 972.0, 973.0, 974.0, 975.0, 976.0, 977.0, 978.0, 979.0, 980.0, 981.0, 982.0, 983.0, 984.0, 985.0, 986.0, 987.0, 988.0, 989.0, 990.0, 991.0, 992.0, 993.0, 994.0, 995.0, 996.0, 997.0, 998.0, 999.0, 1000.0]
@mpi_do manager begin
free(win)
end