.\" Man page generated from reStructuredText. . .TH "MPI_SCATTERV_INIT" "3" "Jul 22, 2024" "" "Open MPI" . .nr rst2man-indent-level 0 . .de1 rstReportMargin \\$1 \\n[an-margin] level \\n[rst2man-indent-level] level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] - \\n[rst2man-indent0] \\n[rst2man-indent1] \\n[rst2man-indent2] .. .de1 INDENT .\" .rstReportMargin pre: . RS \\$1 . nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] . nr rst2man-indent-level +1 .\" .rstReportMargin post: .. .de UNINDENT . RE .\" indent \\n[an-margin] .\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] .nr rst2man-indent-level -1 .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. .INDENT 0.0 .INDENT 3.5 .UNINDENT .UNINDENT .sp \fI\%MPI_Scatterv\fP, \fI\%MPI_Iscatterv\fP, \fI\%MPI_Scatterv_init\fP \- Scatters a buffer in parts to all tasks in a group. .SH SYNTAX .SS C Syntax .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C #include int MPI_Scatterv(const void *sendbuf, const int sendcounts[], const int displs[], MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) int MPI_Iscatterv(const void *sendbuf, const int sendcounts[], const int displs[], MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Request *request) int MPI_Scatterv_init(const void *sendbuf, const int sendcounts[], const int displs[], MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm, MPI_Info info, MPI_Request *request) .ft P .fi .UNINDENT .UNINDENT .SS Fortran Syntax .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C USE MPI ! or the older form: INCLUDE \(aqmpif.h\(aq MPI_SCATTERV(SENDBUF, SENDCOUNTS, DISPLS, SENDTYPE, RECVBUF, RECVCOUNT, RECVTYPE, ROOT, COMM, IERROR) SENDBUF(*), RECVBUF(*) INTEGER SENDCOUNTS(*), DISPLS(*), SENDTYPE INTEGER RECVCOUNT, RECVTYPE, ROOT, COMM, IERROR MPI_ISCATTERV(SENDBUF, SENDCOUNTS, DISPLS, SENDTYPE, RECVBUF, RECVCOUNT, RECVTYPE, ROOT, COMM, REQUEST, IERROR) SENDBUF(*), RECVBUF(*) INTEGER SENDCOUNTS(*), DISPLS(*), SENDTYPE INTEGER RECVCOUNT, RECVTYPE, ROOT, COMM, REQUEST, IERROR MPI_SCATTERV_INIT(SENDBUF, SENDCOUNTS, DISPLS, SENDTYPE, RECVBUF, RECVCOUNT, RECVTYPE, ROOT, COMM, INFO, REQUEST, IERROR) SENDBUF(*), RECVBUF(*) INTEGER SENDCOUNTS(*), DISPLS(*), SENDTYPE INTEGER RECVCOUNT, RECVTYPE, ROOT, COMM, INFO, REQUEST, IERROR .ft P .fi .UNINDENT .UNINDENT .SS Fortran 2008 Syntax .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C USE mpi_f08 MPI_Scatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm, ierror) TYPE(*), DIMENSION(..), INTENT(IN) :: sendbuf TYPE(*), DIMENSION(..) :: recvbuf INTEGER, INTENT(IN) :: sendcounts(*), displs(*), recvcount, root TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype TYPE(MPI_Comm), INTENT(IN) :: comm INTEGER, OPTIONAL, INTENT(OUT) :: ierror MPI_Iscatterv(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm, request, ierror) TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf INTEGER, INTENT(IN), ASYNCHRONOUS :: sendcounts(*), displs(*) INTEGER, INTENT(IN) :: recvcount, root TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype TYPE(MPI_Comm), INTENT(IN) :: comm TYPE(MPI_Request), INTENT(OUT) :: request INTEGER, OPTIONAL, INTENT(OUT) :: ierror MPI_scatterv_init(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm, info, request, ierror) TYPE(*), DIMENSION(..), INTENT(IN), ASYNCHRONOUS :: sendbuf TYPE(*), DIMENSION(..), ASYNCHRONOUS :: recvbuf INTEGER, INTENT(IN), ASYNCHRONOUS :: sendcounts(*), displs(*) INTEGER, INTENT(IN) :: recvcount, root TYPE(MPI_Datatype), INTENT(IN) :: sendtype, recvtype TYPE(MPI_Comm), INTENT(IN) :: comm TYPE(MPI_Info), INTENT(IN) :: info TYPE(MPI_Request), INTENT(OUT) :: request INTEGER, OPTIONAL, INTENT(OUT) :: ierror .ft P .fi .UNINDENT .UNINDENT .SH INPUT PARAMETERS .INDENT 0.0 .IP \(bu 2 \fBsendbuf\fP: Address of send buffer (choice, significant only at root). .IP \(bu 2 \fBsendcounts\fP: Integer array (of length group size) specifying the number of elements to send to each processor. .IP \(bu 2 \fBdispls\fP: Integer array (of length group size). Entry i specifies the displacement (relative to sendbuf) from which to take the outgoing data to process i. .IP \(bu 2 \fBsendtype\fP: Datatype of send buffer elements (handle). .IP \(bu 2 \fBrecvcount\fP: Number of elements in receive buffer (integer). .IP \(bu 2 \fBrecvtype\fP: Datatype of receive buffer elements (handle). .IP \(bu 2 \fBroot\fP: Rank of sending process (integer). .IP \(bu 2 \fBcomm\fP: Communicator (handle). .IP \(bu 2 \fBinfo\fP: Info (handle, persistent only). .UNINDENT .SH OUTPUT PARAMETERS .INDENT 0.0 .IP \(bu 2 \fBrecvbuf\fP: Address of receive buffer (choice). .IP \(bu 2 \fBrequest\fP: Request (handle, non\-blocking only). .IP \(bu 2 \fBierror\fP: Fortran only: Error status (integer). .UNINDENT .SH DESCRIPTION .sp \fI\%MPI_Scatterv\fP is the inverse operation to \fI\%MPI_Gatherv\fP\&. .sp \fI\%MPI_Scatterv\fP extends the functionality of \fI\%MPI_Scatter\fP by allowing a varying count of data to be sent to each process, since \fIsendcounts\fP is now an array. It also allows more flexibility as to where the data is taken from on the root, by providing the new argument, \fIdispls\fP\&. .sp The outcome is as if the root executed \fIn\fP send operations, .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C MPI_Send(sendbuf + displs[i] * extent(sendtype), sendcounts[i], sendtype, i, ...); // and each process executed a receive, MPI_Recv(recvbuf, recvcount, recvtype, root, ...) .ft P .fi .UNINDENT .UNINDENT .sp The send buffer is ignored for all nonroot processes. .sp The type signature implied by \fIsendcount\fP[\fIi\fP], \fIsendtype\fP at the root must be equal to the type signature implied by \fIrecvcount\fP, \fIrecvtype\fP at process \fIi\fP (however, the type maps may be different). This implies that the amount of data sent must be equal to the amount of data received, pairwise between each process and the root. Distinct type maps between sender and receiver are still allowed. .sp All arguments to the function are significant on process \fIroot\fP, while on other processes, only arguments \fIrecvbuf\fP, \fIrecvcount\fP, \fIrecvtype\fP, \fIroot\fP, \fIcomm\fP are significant. The arguments \fIroot\fP and \fIcomm\fP must have identical values on all processes. .sp The specification of counts, types, and displacements should not cause any location on the root to be read more than once. .sp \fBExample 1:\fP The reverse of Example 5 in the \fI\%MPI_Gatherv\fP manpage. We have a varying stride between blocks at sending (root) side, at the receiving side we receive 100 \- \fIi\fP elements into the \fIi\fPth column of a 100 x 150 C array at process \fIi\fP\&. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C MPI_Comm comm; int gsize,recvarray[100][150],*rptr; int root, *sendbuf, myrank, bufsize, *stride; MPI_Datatype rtype; int i, *displs, *scounts, offset; \&... MPI_Comm_size( comm, &gsize); MPI_Comm_rank( comm, &myrank ); stride = (int *)malloc(gsize*sizeof(int)); \&... /* stride[i] for i = 0 to gsize\-1 is set somehow * sendbuf comes from elsewhere */ \&... displs = (int *)malloc(gsize*sizeof(int)); scounts = (int *)malloc(gsize*sizeof(int)); offset = 0; for (i=0; i= 100. .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C MPI_Comm comm; int gsize,*sendbuf; int root, rbuf[100], i, *displs, *scounts; \&... MPI_Comm_size(comm, &gsize); sendbuf = (int *)malloc(gsize*stride*sizeof(int)); \&... displs = (int *)malloc(gsize*sizeof(int)); scounts = (int *)malloc(gsize*sizeof(int)); for (i=0; i